repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
moijes12/oh-mainline
|
vendor/packages/mechanize/test/test_pullparser.py
|
22
|
13277
|
#!/usr/bin/env python
from unittest import TestCase
def peek_token(p):
tok = p.get_token()
p.unget_token(tok)
return tok
class PullParserTests(TestCase):
from mechanize._pullparser import PullParser, TolerantPullParser
PARSERS = [(PullParser, False), (TolerantPullParser, True)]
def data_and_file(self):
from StringIO import StringIO
data = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title an=attr>Title</title>
</head>
<body>
<p>This is a data <img alt="blah & a"> & that was an entityref and this a is
a charref. <blah foo="bing" blam="wallop">.
<!-- comment blah blah
still a comment , blah and a space at the end
-->
<!rheum>
<?rhaponicum>
<randomtag spam="eggs"/>
</body>
</html>
""" #"
f = StringIO(data)
return data, f
def test_encoding(self):
#from mechanize import _pullparser
#for pc, tolerant in [(_pullparser.PullParser, False)]:#PullParserTests.PARSERS:
for pc, tolerant in PullParserTests.PARSERS:
self._test_encoding(pc, tolerant)
def _test_encoding(self, parser_class, tolerant):
from StringIO import StringIO
datas = ["<a>ф</a>", "<a>ф</a>"]
def get_text(data, encoding):
p = _get_parser(data, encoding)
p.get_tag("a")
return p.get_text()
def get_attr(data, encoding, et_name, attr_name):
p = _get_parser(data, encoding)
while True:
tag = p.get_tag(et_name)
attrs = tag.attrs
if attrs is not None:
break
return dict(attrs)[attr_name]
def _get_parser(data, encoding):
f = StringIO(data)
p = parser_class(f, encoding=encoding)
#print 'p._entitydefs>>%s<<' % p._entitydefs['—']
return p
for data in datas:
self.assertEqual(get_text(data, "KOI8-R"), "\xc6")
self.assertEqual(get_text(data, "UTF-8"), "\xd1\x84")
self.assertEqual(get_text("<a>—</a>", "UTF-8"),
u"\u2014".encode('utf8'))
self.assertEqual(
get_attr('<a name="—">blah</a>', "UTF-8", "a", "name"),
u"\u2014".encode('utf8'))
self.assertEqual(get_text("<a>—</a>", "ascii"), "—")
# response = urllib.addinfourl(f, {"content-type": "text/html; charset=XXX"}, req.get_full_url())
def test_get_token(self):
for pc, tolerant in PullParserTests.PARSERS:
self._test_get_token(pc, tolerant)
def _test_get_token(self, parser_class, tolerant):
data, f = self.data_and_file()
p = parser_class(f)
from mechanize._pullparser import NoMoreTokensError
self.assertEqual(
p.get_token(), ("decl",
'''DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd"''', None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("starttag", "html", []))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("starttag", "head", []))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("starttag", "title", [("an", "attr")]))
self.assertEqual(p.get_token(), ("data", "Title", None))
self.assertEqual(p.get_token(), ("endtag", "title", None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("endtag", "head", None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("starttag", "body", []))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("starttag", "p", []))
self.assertEqual(p.get_token(), ("data", "This is a data ", None))
self.assertEqual(p.get_token(), ("starttag", "img", [("alt", "blah & a")]))
self.assertEqual(p.get_token(), ("data", " ", None))
self.assertEqual(p.get_token(), ("entityref", "amp", None))
self.assertEqual(p.get_token(), ("data",
" that was an entityref and this ",
None))
self.assertEqual(p.get_token(), ("charref", "097", None))
self.assertEqual(p.get_token(), ("data", " is\na charref. ", None))
self.assertEqual(p.get_token(), ("starttag", "blah",
[("foo", "bing"), ("blam", "wallop")]))
self.assertEqual(p.get_token(), ("data", ".\n", None))
self.assertEqual(p.get_token(), (
"comment", " comment blah blah\n"
"still a comment , blah and a space at the end \n", None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("decl", "rheum", None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("pi", "rhaponicum", None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), (
(tolerant and "starttag" or "startendtag"), "randomtag",
[("spam", "eggs")]))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("endtag", "body", None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertEqual(p.get_token(), ("endtag", "html", None))
self.assertEqual(p.get_token(), ("data", "\n", None))
self.assertRaises(NoMoreTokensError, p.get_token)
# print "token", p.get_token()
# sys.exit()
def test_unget_token(self):
for pc, tolerant in PullParserTests.PARSERS:
self._test_unget_token(pc, tolerant)
def _test_unget_token(self, parser_class, tolerant):
data, f = self.data_and_file()
p = parser_class(f)
p.get_token()
tok = p.get_token()
self.assertEqual(tok, ("data", "\n", None))
p.unget_token(tok)
self.assertEqual(p.get_token(), ("data", "\n", None))
tok = p.get_token()
self.assertEqual(tok, ("starttag", "html", []))
p.unget_token(tok)
self.assertEqual(tok, ("starttag", "html", []))
def test_get_tag(self):
for pc, tolerant in PullParserTests.PARSERS:
self._test_get_tag(pc, tolerant)
def _test_get_tag(self, parser_class, tolerant):
from mechanize._pullparser import NoMoreTokensError
data, f = self.data_and_file()
p = parser_class(f)
self.assertEqual(p.get_tag(), ("starttag", "html", []))
self.assertEqual(p.get_tag("blah", "body", "title"),
("starttag", "title", [("an", "attr")]))
self.assertEqual(p.get_tag(), ("endtag", "title", None))
self.assertEqual(p.get_tag("randomtag"),
((tolerant and "starttag" or "startendtag"), "randomtag",
[("spam", "eggs")]))
self.assertEqual(p.get_tag(), ("endtag", "body", None))
self.assertEqual(p.get_tag(), ("endtag", "html", None))
self.assertRaises(NoMoreTokensError, p.get_tag)
# print "tag", p.get_tag()
# sys.exit()
def test_get_text(self):
for pc, tolerant in PullParserTests.PARSERS:
self._test_get_text(pc, tolerant)
def _test_get_text(self, parser_class, tolerant):
from mechanize._pullparser import NoMoreTokensError
data, f = self.data_and_file()
p = parser_class(f)
self.assertEqual(p.get_text(), "\n")
self.assertEqual(peek_token(p).data, "html")
self.assertEqual(p.get_text(), "")
self.assertEqual(peek_token(p).data, "html"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
self.assertEqual(p.get_text(), "Title"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
self.assertEqual(p.get_text(),
"This is a data blah & a[IMG]"); p.get_token()
self.assertEqual(p.get_text(), " & that was an entityref "
"and this a is\na charref. "); p.get_token()
self.assertEqual(p.get_text(), ".\n\n\n\n"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
self.assertEqual(p.get_text(), "\n"); p.get_token()
# no more tokens, so we just get empty string
self.assertEqual(p.get_text(), "")
self.assertEqual(p.get_text(), "")
self.assertRaises(NoMoreTokensError, p.get_token)
#print "text", `p.get_text()`
#sys.exit()
def test_get_text_2(self):
for pc, tolerant in PullParserTests.PARSERS:
self._test_get_text_2(pc, tolerant)
def _test_get_text_2(self, parser_class, tolerant):
# more complicated stuff
# endat
data, f = self.data_and_file()
p = parser_class(f)
self.assertEqual(p.get_text(endat=("endtag", "html")),
u"\n\n\nTitle\n\n\nThis is a data blah & a[IMG]"
" & that was an entityref and this a is\na charref. ."
"\n\n\n\n\n\n")
f.close()
data, f = self.data_and_file()
p = parser_class(f)
self.assertEqual(p.get_text(endat=("endtag", "title")),
"\n\n\nTitle")
self.assertEqual(p.get_text(endat=("starttag", "img")),
"\n\n\nThis is a data blah & a[IMG]")
f.close()
# textify arg
data, f = self.data_and_file()
p = parser_class(f, textify={"title": "an", "img": lambda x: "YYY"})
self.assertEqual(p.get_text(endat=("endtag", "title")),
"\n\n\nattr[TITLE]Title")
self.assertEqual(p.get_text(endat=("starttag", "img")),
"\n\n\nThis is a data YYY")
f.close()
# get_compressed_text
data, f = self.data_and_file()
p = parser_class(f)
self.assertEqual(p.get_compressed_text(endat=("endtag", "html")),
u"Title This is a data blah & a[IMG]"
" & that was an entityref and this a is a charref. .")
f.close()
def test_tags(self):
for pc, tolerant in PullParserTests.PARSERS:
self._test_tags(pc, tolerant)
def _test_tags(self, parser_class, tolerant):
# no args
data, f = self.data_and_file()
p = parser_class(f)
expected_tag_names = [
"html", "head", "title", "title", "head", "body", "p", "img",
"blah", "randomtag", "body", "html"
]
for i, token in enumerate(p.tags()):
self.assertEquals(token.data, expected_tag_names[i])
f.close()
# tag name args
data, f = self.data_and_file()
p = parser_class(f)
expected_tokens = [
("starttag", "head", []),
("endtag", "head", None),
("starttag", "p", []),
]
for i, token in enumerate(p.tags("head", "p")):
self.assertEquals(token, expected_tokens[i])
f.close()
def test_tokens(self):
for pc, tolerant in PullParserTests.PARSERS:
self._test_tokens(pc, tolerant)
def _test_tokens(self, parser_class, tolerant):
# no args
data, f = self.data_and_file()
p = parser_class(f)
expected_token_types = [
"decl", "data", "starttag", "data", "starttag", "data", "starttag",
"data", "endtag", "data", "endtag", "data", "starttag", "data",
"starttag", "data", "starttag", "data", "entityref", "data",
"charref", "data", "starttag", "data", "comment", "data", "decl",
"data", "pi", "data", (tolerant and "starttag" or "startendtag"),
"data", "endtag", "data", "endtag", "data"
]
for i, token in enumerate(p.tokens()):
self.assertEquals(token.type, expected_token_types[i])
f.close()
# token type args
data, f = self.data_and_file()
p = parser_class(f)
expected_tokens = [
("entityref", "amp", None),
("charref", "097", None),
]
for i, token in enumerate(p.tokens("charref", "entityref")):
self.assertEquals(token, expected_tokens[i])
f.close()
def test_token_eq(self):
from mechanize._pullparser import Token
for (a, b) in [
(Token('endtag', 'html', None),
('endtag', 'html', None)),
(Token('endtag', 'html', {'woof': 'bark'}),
('endtag', 'html', {'woof': 'bark'})),
]:
self.assertEquals(a, a)
self.assertEquals(a, b)
self.assertEquals(b, a)
if __name__ == "__main__":
import unittest
unittest.main()
|
agpl-3.0
|
philsch/ansible
|
lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py
|
69
|
9488
|
#!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_access_group_manager
short_description: Manage SolidFire Volume Access Groups
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update volume access groups on SolidFire
options:
state:
description:
- Whether the specified volume access group should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Name of the volume access group. It is not required to be unique, but recommended.
required: true
initiators:
description:
- List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
required: false
default: None
volumes:
description:
- List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
required: false
default: None
virtual_network_id:
description:
- The ID of the SolidFire Virtual Network ID to associate the volume access group with.
required: false
default: None
virtual_network_tags:
description:
- The ID of the VLAN Virtual Network Tag to associate the volume access group with.
required: false
default: None
attributes:
description: List of Name/Value pairs in JSON object format.
required: false
default: None
volume_access_group_id:
description:
- The ID of the volume access group to modify or delete.
required: false
default: None
'''
EXAMPLES = """
- name: Create Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVolumeAccessGroup
volumes: [7,8]
- name: Modify Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
volume_access_group_id: 1
name: AnsibleVolumeAccessGroup-Renamed
attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
- name: Delete Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
volume_access_group_id: 1
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolumeAccessGroup(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
volume_access_group_id=dict(required=False, type='int', default=None),
initiators=dict(required=False, type='list', default=None),
volumes=dict(required=False, type='list', default=None),
virtual_network_id=dict(required=False, type='list', default=None),
virtual_network_tags=dict(required=False, type='list', default=None),
attributes=dict(required=False, type='dict', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.volume_access_group_id = p['volume_access_group_id']
self.initiators = p['initiators']
self.volumes = p['volumes']
self.virtual_network_id = p['virtual_network_id']
self.virtual_network_tags = p['virtual_network_tags']
self.attributes = p['attributes']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume_access_group(self):
access_groups_list = self.sfe.list_volume_access_groups()
for group in access_groups_list.volume_access_groups:
if group.name == self.name:
# Update self.volume_access_group_id:
if self.volume_access_group_id is not None:
if group.volume_access_group_id == self.volume_access_group_id:
return group
else:
self.volume_access_group_id = group.volume_access_group_id
return group
return None
def create_volume_access_group(self):
try:
self.sfe.create_volume_access_group(name=self.name,
initiators=self.initiators,
volumes=self.volumes,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg="Error creating volume access group %s" % self.name,
exception=str(err))
def delete_volume_access_group(self):
try:
self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
except:
err = get_exception()
self.module.fail_json(msg="Error deleting volume access group %s" % self.volume_access_group_id,
exception=str(err))
def update_volume_access_group(self):
try:
self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
name=self.name,
initiators=self.initiators,
volumes=self.volumes,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg="Error updating volume access group %s" % self.volume_access_group_id,
exception=str(err))
def apply(self):
changed = False
group_exists = False
update_group = False
group_detail = self.get_volume_access_group()
if group_detail:
group_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the group
if self.volumes is not None and group_detail.volumes != self.volumes:
update_group = True
changed = True
elif self.initiators is not None and group_detail.initiators != self.initiators:
update_group = True
changed = True
elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
self.attributes is not None:
update_group = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not group_exists:
self.create_volume_access_group()
elif update_group:
self.update_volume_access_group()
elif self.state == 'absent':
self.delete_volume_access_group()
self.module.exit_json(changed=changed)
def main():
v = SolidFireVolumeAccessGroup()
v.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/tool/grammar.py
|
217
|
2313
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
def plural(noun):
# This is a dumb plural() implementation that is just enough for our uses.
if re.search("h$", noun):
return noun + "es"
else:
return noun + "s"
def pluralize(noun, count):
if count != 1:
noun = plural(noun)
return "%d %s" % (count, noun)
def join_with_separators(list_of_strings, separator=', ', only_two_separator=" and ", last_separator=', and '):
if not list_of_strings:
return ""
if len(list_of_strings) == 1:
return list_of_strings[0]
if len(list_of_strings) == 2:
return only_two_separator.join(list_of_strings)
return "%s%s%s" % (separator.join(list_of_strings[:-1]), last_separator, list_of_strings[-1])
|
gpl-3.0
|
alkadis/vcv
|
src/adhocracy/migration/versions/009_Make_final_polling_optional.py
|
4
|
1117
|
from datetime import datetime
from sqlalchemy import *
from migrate import *
import migrate.changeset
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
instance_table = Table('instance', meta,
Column('id', Integer, primary_key=True),
Column('key', Unicode(20), nullable=False, unique=True),
Column('label', Unicode(255), nullable=False),
Column('description', UnicodeText(), nullable=True),
Column('required_majority', Float, nullable=False),
Column('activation_delay', Integer, nullable=False),
Column('create_time', DateTime, default=func.now()),
Column('access_time', DateTime, default=func.now(), onupdate=func.now()),
Column('delete_time', DateTime, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('default_group_id', Integer, ForeignKey('group.id'), nullable=True)
)
allow_adopt = Column('allow_adopt', Boolean, default=True)
allow_adopt.create(instance_table)
def downgrade(migrate_engine):
raise NotImplementedError()
|
agpl-3.0
|
SlateScience/MozillaJS
|
js/src/testing/mozbase/mozdevice/mozdevice/dmcli.py
|
3
|
16149
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Command-line client to control a device
"""
import errno
import os
import posixpath
import StringIO
import sys
import mozdevice
import mozlog
import argparse
class DMCli(object):
def __init__(self):
self.commands = { 'install': { 'function': self.install,
'args': [ { 'name': 'file' } ],
'help': 'push this package file to the device and install it' },
'uninstall': { 'function': self.uninstall,
'args': [ { 'name': 'packagename' } ],
'help': 'uninstall the named app from the device' },
'killapp': { 'function': self.kill,
'args': [ { 'name': 'process_name', 'nargs': '*' } ],
'help': 'kills any processes with name(s) on device' },
'launchapp': { 'function': self.launchapp,
'args': [ { 'name': 'appname' },
{ 'name': 'activity_name' },
{ 'name': '--intent',
'action': 'store',
'default': 'android.intent.action.VIEW' },
{ 'name': '--url',
'action': 'store' },
{ 'name': '--no-fail-if-running',
'action': 'store_true',
'help': 'Don\'t fail if application is already running' }
],
'help': 'launches application on device' },
'push': { 'function': self.push,
'args': [ { 'name': 'local_file' },
{ 'name': 'remote_file' }
],
'help': 'copy file/dir to device' },
'pull': { 'function': self.pull,
'args': [ { 'name': 'local_file' },
{ 'name': 'remote_file', 'nargs': '?' } ],
'help': 'copy file/dir from device' },
'shell': { 'function': self.shell,
'args': [ { 'name': 'command', 'nargs': argparse.REMAINDER },
{ 'name': '--root', 'action': 'store_true',
'help': 'Run command as root' }],
'help': 'run shell command on device' },
'info': { 'function': self.getinfo,
'args': [ { 'name': 'directive', 'nargs': '?' } ],
'help': 'get information on specified '
'aspect of the device (if no argument '
'given, print all available information)'
},
'ps': { 'function': self.processlist,
'help': 'get information on running processes on device'
},
'logcat' : { 'function': self.logcat,
'help': 'get logcat from device'
},
'ls': { 'function': self.listfiles,
'args': [ { 'name': 'remote_dir' } ],
'help': 'list files on device'
},
'rm': { 'function': self.removefile,
'args': [ { 'name': 'remote_file' } ],
'help': 'remove file from device'
},
'isdir': { 'function': self.isdir,
'args': [ { 'name': 'remote_dir' } ],
'help': 'print if remote file is a directory'
},
'mkdir': { 'function': self.mkdir,
'args': [ { 'name': 'remote_dir' } ],
'help': 'makes a directory on device'
},
'rmdir': { 'function': self.rmdir,
'args': [ { 'name': 'remote_dir' } ],
'help': 'recursively remove directory from device'
},
'screencap': { 'function': self.screencap,
'args': [ { 'name': 'png_file' } ],
'help': 'capture screenshot of device in action'
},
'sutver': { 'function': self.sutver,
'help': 'SUTAgent\'s product name and version (SUT only)'
},
'clearlogcat': { 'function': self.clearlogcat,
'help': 'clear the logcat'
},
'reboot': { 'function': self.reboot,
'help': 'reboot the device'
},
'isfile': { 'function': self.isfile,
'args': [ { 'name': 'remote_file' } ],
'help': 'check whether a file exists on the device'
},
'launchfennec': { 'function': self.launchfennec,
'args': [ { 'name': 'appname' },
{ 'name': '--intent', 'action': 'store',
'default': 'android.intent.action.VIEW' },
{ 'name': '--url', 'action': 'store' },
{ 'name': '--extra-args', 'action': 'store' },
{ 'name': '--mozenv', 'action': 'store',
'help': 'Gecko environment variables to set in "KEY1=VAL1 KEY2=VAL2" format' },
{ 'name': '--no-fail-if-running',
'action': 'store_true',
'help': 'Don\'t fail if application is already running' }
],
'help': 'launch fennec'
},
'getip': { 'function': self.getip,
'args': [ { 'name': 'interface', 'nargs': '*' } ],
'help': 'get the ip address of the device'
}
}
self.parser = argparse.ArgumentParser()
self.add_options(self.parser)
self.add_commands(self.parser)
def run(self, args=sys.argv[1:]):
args = self.parser.parse_args()
if args.dmtype == "sut" and not args.host and not args.hwid:
self.parser.error("Must specify device ip in TEST_DEVICE or "
"with --host option with SUT")
self.dm = self.getDevice(dmtype=args.dmtype, hwid=args.hwid,
host=args.host, port=args.port,
verbose=args.verbose)
ret = args.func(args)
if ret is None:
ret = 0
sys.exit(ret)
def add_options(self, parser):
parser.add_argument("-v", "--verbose", action="store_true",
help="Verbose output from DeviceManager",
default=False)
parser.add_argument("--host", action="store",
help="Device hostname (only if using TCP/IP)",
default=os.environ.get('TEST_DEVICE'))
parser.add_argument("-p", "--port", action="store",
type=int,
help="Custom device port (if using SUTAgent or "
"adb-over-tcp)", default=None)
parser.add_argument("-m", "--dmtype", action="store",
help="DeviceManager type (adb or sut, defaults " \
"to adb)", default=os.environ.get('DM_TRANS',
'adb'))
parser.add_argument("-d", "--hwid", action="store",
help="HWID", default=None)
parser.add_argument("--package-name", action="store",
help="Packagename (if using DeviceManagerADB)",
default=None)
def add_commands(self, parser):
subparsers = parser.add_subparsers(title="Commands", metavar="<command>")
for (commandname, commandprops) in sorted(self.commands.iteritems()):
subparser = subparsers.add_parser(commandname, help=commandprops['help'])
if commandprops.get('args'):
for arg in commandprops['args']:
# this is more elegant but doesn't work in python 2.6
# (which we still use on tbpl @ mozilla where we install
# this package)
# kwargs = { k: v for k,v in arg.items() if k is not 'name' }
kwargs = {}
for (k, v) in arg.items():
if k is not 'name':
kwargs[k] = v
subparser.add_argument(arg['name'], **kwargs)
subparser.set_defaults(func=commandprops['function'])
def getDevice(self, dmtype="adb", hwid=None, host=None, port=None,
packagename=None, verbose=False):
'''
Returns a device with the specified parameters
'''
logLevel = mozlog.ERROR
if verbose:
logLevel = mozlog.DEBUG
if hwid:
return mozdevice.DroidConnectByHWID(hwid, logLevel=logLevel)
if dmtype == "adb":
if host and not port:
port = 5555
return mozdevice.DroidADB(packageName=packagename,
host=host, port=port,
logLevel=logLevel)
elif dmtype == "sut":
if not host:
self.parser.error("Must specify host with SUT!")
if not port:
port = 20701
return mozdevice.DroidSUT(host=host, port=port,
logLevel=logLevel)
else:
self.parser.error("Unknown device manager type: %s" % type)
def push(self, args):
(src, dest) = (args.local_file, args.remote_file)
if os.path.isdir(src):
self.dm.pushDir(src, dest)
else:
dest_is_dir = dest[-1] == '/' or self.dm.dirExists(dest)
dest = posixpath.normpath(dest)
if dest_is_dir:
dest = posixpath.join(dest, os.path.basename(src))
self.dm.pushFile(src, dest)
def pull(self, args):
(src, dest) = (args.local_file, args.remote_file)
if not self.dm.fileExists(src):
print 'No such file or directory'
return
if not dest:
dest = posixpath.basename(src)
if self.dm.dirExists(src):
self.dm.getDirectory(src, dest)
else:
self.dm.getFile(src, dest)
def install(self, args):
basename = os.path.basename(args.file)
app_path_on_device = posixpath.join(self.dm.getDeviceRoot(),
basename)
self.dm.pushFile(args.file, app_path_on_device)
self.dm.installApp(app_path_on_device)
def uninstall(self, args):
self.dm.uninstallApp(args.packagename)
def launchapp(self, args):
self.dm.launchApplication(args.appname, args.activity_name,
args.intent, url=args.url,
failIfRunning=(not args.no_fail_if_running))
def kill(self, args):
for name in args.process_name:
self.dm.killProcess(name)
def shell(self, args):
buf = StringIO.StringIO()
self.dm.shell(args.command, buf, root=args.root)
print str(buf.getvalue()[0:-1]).rstrip()
def getinfo(self, args):
info = self.dm.getInfo(directive=args.directive)
for (infokey, infoitem) in sorted(info.iteritems()):
if infokey == "process":
pass # skip process list: get that through ps
elif not args.directive and not infoitem:
print "%s:" % infokey.upper()
elif not args.directive:
for line in infoitem:
print "%s: %s" % (infokey.upper(), line)
else:
print "%s" % "\n".join(infoitem)
def logcat(self, args):
print ''.join(self.dm.getLogcat())
def clearlogcat(self, args):
self.dm.recordLogcat()
def reboot(self, args):
self.dm.reboot()
def processlist(self, args):
pslist = self.dm.getProcessList()
for ps in pslist:
print " ".join(str(i) for i in ps)
def listfiles(self, args):
filelist = self.dm.listFiles(args.remote_dir)
for file in filelist:
print file
def removefile(self, args):
self.dm.removeFile(args.remote_file)
def isdir(self, args):
if self.dm.dirExists(args.remote_dir):
print "TRUE"
return
print "FALSE"
return errno.ENOTDIR
def mkdir(self, args):
self.dm.mkDir(args.remote_dir)
def rmdir(self, args):
self.dm.removeDir(args.remote_dir)
def screencap(self, args):
self.dm.saveScreenshot(args.png_file)
def sutver(self, args):
if args.dmtype == 'sut':
print '%s Version %s' % (self.dm.agentProductName,
self.dm.agentVersion)
else:
print 'Must use SUT transport to get SUT version.'
def isfile(self, args):
if self.dm.fileExists(args.remote_file):
print "TRUE"
return
print "FALSE"
return errno.ENOENT
def launchfennec(self, args):
mozEnv = None
if args.mozenv:
mozEnv = {}
keyvals = args.mozenv.split()
for keyval in keyvals:
(key, _, val) = keyval.partition("=")
mozEnv[key] = val
self.dm.launchFennec(args.appname, intent=args.intent,
mozEnv=mozEnv,
extraArgs=args.extra_args, url=args.url,
failIfRunning=(not args.no_fail_if_running))
def getip(self, args):
if args.interface:
print(self.dm.getIP(args.interface))
else:
print(self.dm.getIP())
def cli(args=sys.argv[1:]):
# process the command line
cli = DMCli()
cli.run(args)
if __name__ == '__main__':
cli()
|
mpl-2.0
|
tkarna/cofs
|
thetis/forcing.py
|
1
|
35873
|
"""
Routines for interpolating forcing fields for the 3D solver.
"""
from firedrake import *
import numpy as np
import scipy.spatial.qhull as qhull
import thetis.timezone as timezone
import thetis.interpolation as interpolation
import thetis.coordsys as coordsys
from .log import *
import netCDF4
import thetis.physical_constants as physical_constants
import uptide
import uptide.tidal_netcdf
from abc import ABCMeta, abstractmethod, abstractproperty
import os
def compute_wind_stress(wind_u, wind_v, method='LargePond1981'):
r"""
Compute wind stress from atmospheric 10 m wind.
wind stress is defined as
.. math:
tau_w = C_D \rho_{air} \|U_{10}\| U_{10}
where :math:`C_D` is the drag coefficient, :math:`\rho_{air}` is the density of
air, and :math:`U_{10}` is wind speed 10 m above the sea surface.
In practice `C_D` depends on the wind speed.
Two formulation are currently implemented:
- "LargePond1981":
Wind stress formulation by [1]
- "SmithBanke1975":
Wind stress formulation by [2]
[1] Large and Pond (1981). Open Ocean Momentum Flux Measurements in
Moderate to Strong Winds. Journal of Physical Oceanography,
11(3):324-336.
https://doi.org/10.1175/1520-0485(1981)011%3C0324:OOMFMI%3E2.0.CO;2
[2] Smith and Banke (1975). Variation of the sea surface drag coefficient with
wind speed. Q J R Meteorol Soc., 101(429):665-673.
https://doi.org/10.1002/qj.49710142920
:arg wind_u, wind_v: Wind u and v components as numpy arrays
:kwarg method: Choose the stress formulation. Currently supports:
'LargePond1981' (default) or 'SmithBanke1975'.
:returns: (tau_x, tau_y) wind stress x and y components as numpy arrays
"""
rho_air = float(physical_constants['rho_air'])
wind_mag = np.hypot(wind_u, wind_v)
if method == 'LargePond1981':
CD_LOW = 1.2e-3
C_D = np.ones_like(wind_u)*CD_LOW
high_wind = wind_mag > 11.0
C_D[high_wind] = 1.0e-3*(0.49 + 0.065*wind_mag[high_wind])
elif method == 'SmithBanke1975':
C_D = (0.63 + 0.066 * wind_mag)/1000.
tau = C_D*rho_air*wind_mag
tau_x = tau*wind_u
tau_y = tau*wind_v
return tau_x, tau_y
class ATMNetCDFTime(interpolation.NetCDFTimeParser):
"""
A TimeParser class for reading WRF/NAM atmospheric forecast files.
"""
def __init__(self, filename, max_duration=24.*3600., verbose=False):
"""
:arg filename:
:kwarg max_duration: Time span to read from each file (in secords,
default one day). Forecast files are usually daily files that
contain forecast for > 1 days.
:kwarg bool verbose: Se True to print debug information.
"""
super(ATMNetCDFTime, self).__init__(filename, time_variable_name='time')
# NOTE these are daily forecast files, limit time steps to one day
self.start_time = timezone.epoch_to_datetime(float(self.time_array[0]))
self.end_time_raw = timezone.epoch_to_datetime(float(self.time_array[-1]))
self.time_step = np.mean(np.diff(self.time_array))
self.max_steps = int(max_duration / self.time_step)
self.time_array = self.time_array[:self.max_steps]
self.end_time = timezone.epoch_to_datetime(float(self.time_array[-1]))
if verbose:
print_output('Parsed file {:}'.format(filename))
print_output(' Raw time span: {:} -> {:}'.format(self.start_time, self.end_time_raw))
print_output(' Time step: {:} h'.format(self.time_step/3600.))
print_output(' Restricting duration to {:} h -> keeping {:} steps'.format(max_duration/3600., self.max_steps))
print_output(' New time span: {:} -> {:}'.format(self.start_time, self.end_time))
class ATMInterpolator(object):
"""
Interpolates WRF/NAM atmospheric model data on 2D fields.
"""
def __init__(self, function_space, wind_stress_field,
atm_pressure_field, to_latlon,
ncfile_pattern, init_date, target_coordsys, verbose=False):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg wind_stress_field: A 2D vector :class:`Function` where the output
wind stress will be stored.
:arg atm_pressure_field: A 2D scalar :class:`Function` where the output
atmospheric pressure will be stored.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg ncfile_pattern: A file name pattern for reading the atmospheric
model output files. E.g. 'forcings/nam_air.local.2006_*.nc'
:arg init_date: A :class:`datetime` object that indicates the start
date/time of the Thetis simulation. Must contain time zone. E.g.
'datetime(2006, 5, 1, tzinfo=pytz.utc)'
:arg target_coordsys: coordinate system in which the model grid is
defined. This is used to rotate vectors to local coordinates.
:kwarg bool verbose: Se True to print debug information.
"""
self.function_space = function_space
self.wind_stress_field = wind_stress_field
self.atm_pressure_field = atm_pressure_field
# construct interpolators
self.grid_interpolator = interpolation.NetCDFLatLonInterpolator2d(self.function_space, to_latlon)
self.reader = interpolation.NetCDFSpatialInterpolator(self.grid_interpolator, ['uwind', 'vwind', 'prmsl'])
self.timesearch_obj = interpolation.NetCDFTimeSearch(ncfile_pattern, init_date, ATMNetCDFTime, verbose=verbose)
self.time_interpolator = interpolation.LinearTimeInterpolator(self.timesearch_obj, self.reader)
lon = self.grid_interpolator.mesh_lonlat[:, 0]
lat = self.grid_interpolator.mesh_lonlat[:, 1]
self.vect_rotator = coordsys.VectorCoordSysRotation(
coordsys.LL_WGS84, target_coordsys, lon, lat)
def set_fields(self, time):
"""
Evaluates forcing fields at the given time.
Performs interpolation and updates the output wind stress and
atmospheric pressure fields in place.
:arg float time: Thetis simulation time in seconds.
"""
lon_wind, lat_wind, prmsl = self.time_interpolator(time)
u_wind, v_wind = self.vect_rotator(lon_wind, lat_wind)
u_stress, v_stress = compute_wind_stress(u_wind, v_wind)
self.wind_stress_field.dat.data_with_halos[:, 0] = u_stress
self.wind_stress_field.dat.data_with_halos[:, 1] = v_stress
self.atm_pressure_field.dat.data_with_halos[:] = prmsl
class SpatialInterpolatorNCOMBase(interpolation.SpatialInterpolator):
"""
Base class for 2D and 3D NCOM spatial interpolators.
"""
def __init__(self, function_space, to_latlon, grid_path):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg grid_path: File path where the NCOM model grid files
('model_lat.nc', 'model_lon.nc', 'model_zm.nc') are located.
"""
self.function_space = function_space
self.grid_path = grid_path
self._initialized = False
def _create_2d_mapping(self, ncfile):
"""
Create map for 2D nodes.
"""
# read source lat lon grid
lat_full = self._get_forcing_grid('model_lat.nc', 'Lat')
lon_full = self._get_forcing_grid('model_lon.nc', 'Long')
x_ind = ncfile['X_Index'][:].astype(int)
y_ind = ncfile['Y_Index'][:].astype(int)
lon = lon_full[y_ind, :][:, x_ind]
lat = lat_full[y_ind, :][:, x_ind]
# find where data values are not defined
varkey = None
for k in ncfile.variables.keys():
if k not in ['X_Index', 'Y_Index', 'level']:
varkey = k
break
assert varkey is not None, 'Could not find variable in file'
vals = ncfile[varkey][:] # shape (nz, lat, lon) or (lat, lon)
is3d = len(vals.shape) == 3
land_mask = np.all(vals.mask, axis=0) if is3d else vals.mask
# build 2d mask
mask_good_values = ~land_mask
# neighborhood mask with bounding box
mask_cover = np.zeros_like(mask_good_values)
buffer = 0.2
lat_min = self.latlonz_array[:, 0].min() - buffer
lat_max = self.latlonz_array[:, 0].max() + buffer
lon_min = self.latlonz_array[:, 1].min() - buffer
lon_max = self.latlonz_array[:, 1].max() + buffer
mask_cover[(lat >= lat_min)
* (lat <= lat_max)
* (lon >= lon_min)
* (lon <= lon_max)] = True
mask_cover *= mask_good_values
# include nearest valid neighbors
# needed for nearest neighbor filling
from scipy.spatial import cKDTree
good_lat = lat[mask_good_values]
good_lon = lon[mask_good_values]
ll = np.vstack([good_lat.ravel(), good_lon.ravel()]).T
dist, ix = cKDTree(ll).query(self.latlonz_array[:, :2])
ix = np.unique(ix)
ix = np.nonzero(mask_good_values.ravel())[0][ix]
a, b = np.unravel_index(ix, lat.shape)
mask_nn = np.zeros_like(mask_good_values)
mask_nn[a, b] = True
# final mask
mask = mask_cover + mask_nn
self.nodes = np.nonzero(mask.ravel())[0]
self.ind_lat, self.ind_lon = np.unravel_index(self.nodes, lat.shape)
lat_subset = lat[self.ind_lat, self.ind_lon]
lon_subset = lon[self.ind_lat, self.ind_lon]
assert len(lat_subset) > 0, 'rank {:} has no source lat points'
assert len(lon_subset) > 0, 'rank {:} has no source lon points'
return lon_subset, lat_subset, x_ind, y_ind, vals
def _get_forcing_grid(self, filename, varname):
"""
Helper function to load NCOM grid files.
"""
v = None
with netCDF4.Dataset(os.path.join(self.grid_path, filename), 'r') as ncfile:
v = ncfile[varname][:]
return v
class SpatialInterpolatorNCOM3d(SpatialInterpolatorNCOMBase):
"""
Spatial interpolator class for interpolatin NCOM ocean model 3D fields.
"""
def __init__(self, function_space, to_latlon, grid_path):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg grid_path: File path where the NCOM model grid files
('model_lat.nc', 'model_lon.nc', 'model_zm.nc') are located.
"""
super().__init__(function_space, to_latlon, grid_path)
# construct local coordinates
xyz = SpatialCoordinate(self.function_space.mesh())
tmp_func = self.function_space.get_work_function()
xyz_array = np.zeros((tmp_func.dat.data_with_halos.shape[0], 3))
for i in range(3):
tmp_func.interpolate(xyz[i])
xyz_array[:, i] = tmp_func.dat.data_with_halos[:]
self.function_space.restore_work_function(tmp_func)
self.latlonz_array = np.zeros_like(xyz_array)
lat, lon = to_latlon(xyz_array[:, 0], xyz_array[:, 1], positive_lon=True)
self.latlonz_array[:, 0] = lat
self.latlonz_array[:, 1] = lon
self.latlonz_array[:, 2] = xyz_array[:, 2]
def _create_interpolator(self, ncfile):
"""
Create a compact interpolator by finding the minimal necessary support
"""
lon_subset, lat_subset, x_ind, y_ind, vals = self._create_2d_mapping(ncfile)
# find 3d mask where data is not defined
vals = vals[:, self.ind_lat, self.ind_lon]
self.good_mask_3d = ~vals.mask
# construct vertical grid
zm = self._get_forcing_grid('model_zm.nc', 'zm')
zm = zm[:, y_ind, :][:, :, x_ind]
grid_z = zm[:, self.ind_lat, self.ind_lon] # shape (nz, nlatlon)
grid_z = grid_z.filled(-5000.)
# nudge water surface higher for interpolation
grid_z[0, :] = 1.5
nz = grid_z.shape[0]
# data shape is [nz, neta*nxi]
grid_lat = np.tile(lat_subset, (nz, 1))[self.good_mask_3d]
grid_lon = np.tile(lon_subset, (nz, 1))[self.good_mask_3d]
grid_z = grid_z[self.good_mask_3d]
if np.ma.isMaskedArray(grid_lat):
grid_lat = grid_lat.filled(0.0)
if np.ma.isMaskedArray(grid_lon):
grid_lon = grid_lon.filled(0.0)
if np.ma.isMaskedArray(grid_z):
grid_z = grid_z.filled(0.0)
grid_latlonz = np.vstack((grid_lat, grid_lon, grid_z)).T
# building 3D interpolator, this can take a long time (minutes)
print_output('Constructing 3D GridInterpolator...')
self.interpolator = interpolation.GridInterpolator(
grid_latlonz, self.latlonz_array,
normalize=True, fill_mode='nearest', dont_raise=True
)
print_output('done.')
self._initialized = True
def interpolate(self, nc_filename, variable_list, itime):
"""
Calls the interpolator object
"""
with netCDF4.Dataset(nc_filename, 'r') as ncfile:
if not self._initialized:
self._create_interpolator(ncfile)
output = []
for var in variable_list:
assert var in ncfile.variables
grid_data = ncfile[var][:][:, self.ind_lat, self.ind_lon][self.good_mask_3d]
data = self.interpolator(grid_data)
output.append(data)
return output
class SpatialInterpolatorNCOM2d(SpatialInterpolatorNCOMBase):
"""
Spatial interpolator class for interpolatin NCOM ocean model 2D fields.
"""
def __init__(self, function_space, to_latlon, grid_path):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg grid_path: File path where the NCOM model grid files
('model_lat.nc', 'model_lon.nc', 'model_zm.nc') are located.
"""
super().__init__(function_space, to_latlon, grid_path)
# construct local coordinates
xyz = SpatialCoordinate(self.function_space.mesh())
tmp_func = self.function_space.get_work_function()
xy_array = np.zeros((tmp_func.dat.data_with_halos.shape[0], 2))
for i in range(2):
tmp_func.interpolate(xyz[i])
xy_array[:, i] = tmp_func.dat.data_with_halos[:]
self.function_space.restore_work_function(tmp_func)
self.latlonz_array = np.zeros_like(xy_array)
lat, lon = to_latlon(xy_array[:, 0], xy_array[:, 1], positive_lon=True)
self.latlonz_array[:, 0] = lat
self.latlonz_array[:, 1] = lon
def _create_interpolator(self, ncfile):
"""
Create a compact interpolator by finding the minimal necessary support
"""
lon_subset, lat_subset, x_ind, y_ind, vals = self._create_2d_mapping(ncfile)
grid_lat = lat_subset
grid_lon = lon_subset
if np.ma.isMaskedArray(grid_lat):
grid_lat = grid_lat.filled(0.0)
if np.ma.isMaskedArray(grid_lon):
grid_lon = grid_lon.filled(0.0)
grid_latlon = np.vstack((grid_lat, grid_lon)).T
# building 3D interpolator, this can take a long time (minutes)
self.interpolator = interpolation.GridInterpolator(
grid_latlon, self.latlonz_array,
normalize=False, fill_mode='nearest', dont_raise=True
)
self._initialized = True
def interpolate(self, nc_filename, variable_list, itime):
"""
Calls the interpolator object
"""
with netCDF4.Dataset(nc_filename, 'r') as ncfile:
if not self._initialized:
self._create_interpolator(ncfile)
output = []
for var in variable_list:
assert var in ncfile.variables
grid_data = ncfile[var][:][self.ind_lat, self.ind_lon]
data = self.interpolator(grid_data)
output.append(data)
return output
class NCOMInterpolator(object):
"""
Interpolates NCOM model data on 3D fields.
.. note::
The following NCOM output files must be present:
./forcings/ncom/model_h.nc
./forcings/ncom/model_lat.nc
./forcings/ncom/model_ang.nc
./forcings/ncom/model_lon.nc
./forcings/ncom/model_zm.nc
./forcings/ncom/2006/s3d/s3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/s3d/s3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/t3d/t3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/t3d/t3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/u3d/u3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/u3d/u3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/v3d/v3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/v3d/v3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/ssh/ssh.glb8_2f_2006041900.nc
./forcings/ncom/2006/ssh/ssh.glb8_2f_2006042000.nc
"""
def __init__(self, function_space_2d, function_space_3d, fields, field_names, field_fnstr,
to_latlon, basedir,
file_pattern, init_date, target_coordsys, verbose=False):
"""
:arg function_space_2d: Target (scalar) :class:`FunctionSpace` object onto
which 2D data will be interpolated.
:arg function_space_3d: Target (scalar) :class:`FunctionSpace` object onto
which 3D data will be interpolated.
:arg fields: list of :class:`Function` objects where data will be
stored.
:arg field_names: List of netCDF variable names for the fields. E.g.
['Salinity', 'Temperature'].
:arg field_fnstr: List of variables in netCDF file names. E.g.
['s3d', 't3d'].
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg basedir: Root dir where NCOM files are stored.
E.g. '/forcings/ncom'.
:arg file_pattern: A file name pattern for reading the NCOM output
files (excluding the basedir). E.g.
{year:04d}/{fieldstr:}/{fieldstr:}.glb8_2f_{year:04d}{month:02d}{day:02d}00.nc'.
:arg init_date: A :class:`datetime` object that indicates the start
date/time of the Thetis simulation. Must contain time zone. E.g.
'datetime(2006, 5, 1, tzinfo=pytz.utc)'
:arg target_coordsys: coordinate system in which the model grid is
defined. This is used to rotate vectors to local coordinates.
:kwarg bool verbose: Se True to print debug information.
"""
self.function_space_2d = function_space_2d
self.function_space_3d = function_space_3d
for f in fields:
assert f.function_space() in [self.function_space_2d, self.function_space_3d], 'field \'{:}\' does not belong to given function space.'.format(f.name())
assert len(fields) == len(field_names)
assert len(fields) == len(field_fnstr)
self.field_names = field_names
self.fields = dict(zip(self.field_names, fields))
# construct interpolators
self.grid_interpolator_2d = SpatialInterpolatorNCOM2d(self.function_space_2d, to_latlon, basedir)
self.grid_interpolator_3d = SpatialInterpolatorNCOM3d(self.function_space_3d, to_latlon, basedir)
# each field is in different file
# construct time search and interp objects separately for each
self.time_interpolator = {}
for ncvarname, fnstr in zip(field_names, field_fnstr):
gi = self.grid_interpolator_2d if fnstr == 'ssh' else self.grid_interpolator_3d
r = interpolation.NetCDFSpatialInterpolator(gi, [ncvarname])
pat = file_pattern.replace('{fieldstr:}', fnstr)
pat = os.path.join(basedir, pat)
ts = interpolation.DailyFileTimeSearch(pat, init_date, verbose=verbose)
ti = interpolation.LinearTimeInterpolator(ts, r)
self.time_interpolator[ncvarname] = ti
# construct velocity rotation object
self.rotate_velocity = ('U_Velocity' in field_names
and 'V_Velocity' in field_names)
self.scalar_field_names = list(self.field_names)
if self.rotate_velocity:
self.scalar_field_names.remove('U_Velocity')
self.scalar_field_names.remove('V_Velocity')
lat = self.grid_interpolator_3d.latlonz_array[:, 0]
lon = self.grid_interpolator_3d.latlonz_array[:, 1]
self.vect_rotator = coordsys.VectorCoordSysRotation(
coordsys.LL_WGS84, target_coordsys, lon, lat)
def set_fields(self, time):
"""
Evaluates forcing fields at the given time
"""
if self.rotate_velocity:
# water_u (meter/sec) = Eastward Water Velocity
# water_v (meter/sec) = Northward Water Velocity
lon_vel = self.time_interpolator['U_Velocity'](time)[0]
lat_vel = self.time_interpolator['V_Velocity'](time)[0]
u, v = self.vect_rotator(lon_vel, lat_vel)
self.fields['U_Velocity'].dat.data_with_halos[:] = u
self.fields['V_Velocity'].dat.data_with_halos[:] = v
for fname in self.scalar_field_names:
vals = self.time_interpolator[fname](time)[0]
self.fields[fname].dat.data_with_halos[:] = vals
class SpatialInterpolatorROMS3d(interpolation.SpatialInterpolator):
"""
Abstract spatial interpolator class that can interpolate onto a Function
"""
def __init__(self, function_space, to_latlon):
"""
:arg function_space: target Firedrake FunctionSpace
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
"""
self.function_space = function_space
# construct local coordinates
xyz = SpatialCoordinate(self.function_space.mesh())
tmp_func = self.function_space.get_work_function()
xyz_array = np.zeros((tmp_func.dat.data_with_halos.shape[0], 3))
for i in range(3):
tmp_func.interpolate(xyz[i])
xyz_array[:, i] = tmp_func.dat.data_with_halos[:]
self.function_space.restore_work_function(tmp_func)
self.latlonz_array = np.zeros_like(xyz_array)
lat, lon = to_latlon(xyz_array[:, 0], xyz_array[:, 1])
self.latlonz_array[:, 0] = lat
self.latlonz_array[:, 1] = lon
self.latlonz_array[:, 2] = xyz_array[:, 2]
self._initialized = False
def _get_subset_nodes(self, grid_x, grid_y, target_x, target_y):
"""
Retuns grid nodes that are necessary for intepolating onto target_x,y
"""
orig_shape = grid_x.shape
grid_xy = np.array((grid_x.ravel(), grid_y.ravel())).T
target_xy = np.array((target_x.ravel(), target_y.ravel())).T
tri = qhull.Delaunay(grid_xy)
simplex = tri.find_simplex(target_xy)
vertices = np.take(tri.simplices, simplex, axis=0)
nodes = np.unique(vertices.ravel())
nodes_x, nodes_y = np.unravel_index(nodes, orig_shape)
return nodes, nodes_x, nodes_y
def _compute_roms_z_coord(self, ncfile, constant_zeta=None):
zeta = ncfile['zeta'][0, :, :]
bath = ncfile['h'][:]
# NOTE compute z coordinates for full levels (w)
cs = ncfile['Cs_w'][:]
s = ncfile['s_w'][:]
hc = ncfile['hc'][:]
# ROMS transformation ver. 2:
# z(x, y, sigma, t) = zeta(x, y, t) + (zeta(x, y, t) + h(x, y))*S(x, y, sigma)
zeta = zeta[self.ind_lat, self.ind_lon][self.mask].filled(0.0)
bath = bath[self.ind_lat, self.ind_lon][self.mask]
if constant_zeta:
zeta = np.ones_like(bath)*constant_zeta
ss = (hc*s[:, np.newaxis] + bath[np.newaxis, :]*cs[:, np.newaxis])/(hc + bath[np.newaxis, :])
grid_z_w = zeta[np.newaxis, :]*(1 + ss) + bath[np.newaxis, :]*ss
grid_z = 0.5*(grid_z_w[1:, :] + grid_z_w[:-1, :])
grid_z[0, :] = grid_z_w[0, :]
grid_z[-1, :] = grid_z_w[-1, :]
return grid_z
def _create_interpolator(self, ncfile):
"""
Create compact interpolator by finding the minimal necessary support
"""
lat = ncfile['lat_rho'][:]
lon = ncfile['lon_rho'][:]
self.mask = ncfile['mask_rho'][:].astype(bool)
self.nodes, self.ind_lat, self.ind_lon = self._get_subset_nodes(lat, lon, self.latlonz_array[:, 0], self.latlonz_array[:, 1])
lat_subset = lat[self.ind_lat, self.ind_lon]
lon_subset = lon[self.ind_lat, self.ind_lon]
self.mask = self.mask[self.ind_lat, self.ind_lon]
# COMPUTE z coords for constant elevation=0.1
grid_z = self._compute_roms_z_coord(ncfile, constant_zeta=0.1)
# omit land mask
lat_subset = lat_subset[self.mask]
lon_subset = lon_subset[self.mask]
nz = grid_z.shape[0]
# data shape is [nz, neta, nxi]
grid_lat = np.tile(lat_subset, (nz, 1, 1)).ravel()
grid_lon = np.tile(lon_subset, (nz, 1, 1)).ravel()
grid_z = grid_z.ravel()
if np.ma.isMaskedArray(grid_lat):
grid_lat = grid_lat.filled(0.0)
if np.ma.isMaskedArray(grid_lon):
grid_lon = grid_lon.filled(0.0)
if np.ma.isMaskedArray(grid_z):
grid_z = grid_z.filled(0.0)
grid_latlonz = np.vstack((grid_lat, grid_lon, grid_z)).T
# building 3D interpolator, this can take a long time (minutes)
print_output('Constructing 3D GridInterpolator...')
self.interpolator = interpolation.GridInterpolator(
grid_latlonz, self.latlonz_array, normalize=True,
fill_mode='nearest'
)
print_output('done.')
self._initialized = True
def interpolate(self, nc_filename, variable_list, itime):
"""
Calls the interpolator object
"""
with netCDF4.Dataset(nc_filename, 'r') as ncfile:
if not self._initialized:
self._create_interpolator(ncfile)
output = []
for var in variable_list:
assert var in ncfile.variables
grid_data = ncfile[var][itime, :, :, :][:, self.ind_lat, self.ind_lon][:, self.mask].filled(np.nan).ravel()
data = self.interpolator(grid_data)
output.append(data)
return output
class LiveOceanInterpolator(object):
"""
Interpolates LiveOcean (ROMS) model data on 3D fields
"""
def __init__(self, function_space, fields, field_names, ncfile_pattern, init_date, to_latlon):
self.function_space = function_space
for f in fields:
assert f.function_space() == self.function_space, 'field \'{:}\' does not belong to given function space {:}.'.format(f.name(), self.function_space.name)
assert len(fields) == len(field_names)
self.fields = fields
self.field_names = field_names
# construct interpolators
self.grid_interpolator = SpatialInterpolatorROMS3d(self.function_space, to_latlon)
self.reader = interpolation.NetCDFSpatialInterpolator(self.grid_interpolator, field_names)
self.timesearch_obj = interpolation.NetCDFTimeSearch(ncfile_pattern, init_date, interpolation.NetCDFTimeParser, time_variable_name='ocean_time', verbose=False)
self.time_interpolator = interpolation.LinearTimeInterpolator(self.timesearch_obj, self.reader)
def set_fields(self, time):
"""
Evaluates forcing fields at the given time
"""
vals = self.time_interpolator(time)
for i in range(len(self.fields)):
self.fields[i].dat.data_with_halos[:] = vals[i]
class TidalBoundaryForcing(object):
"""Base class for tidal boundary interpolators."""
__metaclass__ = ABCMeta
@abstractproperty
def coord_layout():
"""
Data layout in the netcdf files.
Either 'lon,lat' or 'lat,lon'.
"""
return 'lon,lat'
@abstractproperty
def compute_velocity():
"""If True, compute tidal currents as well."""
return False
@abstractproperty
def elev_nc_file():
"""Tidal elavation NetCDF file name."""
return None
@abstractproperty
def uv_nc_file():
"""Tidal velocity NetCDF file name."""
return None
@abstractproperty
def grid_nc_file():
"""Grid NetCDF file name."""
return None
def __init__(self, elev_field, init_date, to_latlon, target_coordsys,
uv_field=None, constituents=None, boundary_ids=None,
data_dir=None):
"""
:arg elev_field: Function where tidal elevation will be interpolated.
:arg init_date: Datetime object defining the simulation init time.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg target_coordsys: coordinate system in which the model grid is
defined. This is used to rotate vectors to local coordinates.
:kwarg uv_field: Function where tidal transport will be interpolated.
:kwarg constituents: list of tidal constituents, e.g. ['M2', 'K1']
:kwarg boundary_ids: list of boundary_ids where tidal data will be
evaluated. If not defined, tides will be in evaluated in the entire
domain.
:kward data_dir: path to directory where tidal model netCDF files are
located.
"""
assert init_date.tzinfo is not None, 'init_date must have time zone information'
if constituents is None:
constituents = ['Q1', 'O1', 'P1', 'K1', 'N2', 'M2', 'S2', 'K2']
self.data_dir = data_dir if data_dir is not None else ''
if not self.compute_velocity and uv_field is not None:
warning('{:}: uv_field is defined but velocity computation is not supported. uv_field will be ignored.'.format(__class__.__name__))
self.compute_velocity = self.compute_velocity and uv_field is not None
# determine nodes at the boundary
self.elev_field = elev_field
self.uv_field = uv_field
fs = elev_field.function_space()
if boundary_ids is None:
# interpolate in the whole domain
self.nodes = np.arange(self.elev_field.dat.data_with_halos.shape[0])
else:
bc = DirichletBC(fs, 0., boundary_ids, method='geometric')
self.nodes = bc.nodes
self._empty_set = self.nodes.size == 0
xy = SpatialCoordinate(fs.mesh())
fsx = Function(fs).interpolate(xy[0]).dat.data_ro_with_halos
fsy = Function(fs).interpolate(xy[1]).dat.data_ro_with_halos
if not self._empty_set:
latlon = []
for node in self.nodes:
x, y = fsx[node], fsy[node]
lat, lon = to_latlon(x, y, positive_lon=True)
latlon.append((lat, lon))
self.latlon = np.array(latlon)
# compute bounding box
bounds_lat = [self.latlon[:, 0].min(), self.latlon[:, 0].max()]
bounds_lon = [self.latlon[:, 1].min(), self.latlon[:, 1].max()]
if self.coord_layout == 'lon,lat':
self.ranges = (bounds_lon, bounds_lat)
else:
self.ranges = (bounds_lat, bounds_lon)
self.tide = uptide.Tides(constituents)
self.tide.set_initial_time(init_date)
self._create_readers()
if self.compute_velocity:
lat = self.latlon[:, 0]
lon = self.latlon[:, 1]
self.vect_rotator = coordsys.VectorCoordSysRotation(
coordsys.LL_WGS84, target_coordsys, lon, lat)
@abstractmethod
def _create_readers(self, ):
"""Create uptide netcdf reader objects."""
pass
def set_tidal_field(self, t):
if not self._empty_set:
self.tnci.set_time(t)
if self.compute_velocity:
self.tnciu.set_time(t)
self.tnciv.set_time(t)
elev_data = self.elev_field.dat.data_with_halos
if self.compute_velocity:
uv_data = self.uv_field.dat.data_with_halos
for i, node in enumerate(self.nodes):
lat, lon = self.latlon[i, :]
point = (lon, lat) if self.coord_layout == 'lon,lat' else (lat, lon)
try:
elev = self.tnci.get_val(point, allow_extrapolation=True)
elev_data[node] = elev
except uptide.netcdf_reader.CoordinateError:
elev_data[node] = 0.
if self.compute_velocity:
try:
lon_vel = self.tnciu.get_val(point, allow_extrapolation=True)
lat_vel = self.tnciv.get_val(point, allow_extrapolation=True)
u, v = self.vect_rotator(lon_vel, lat_vel, i_node=i)
uv_data[node, :] = (u, v)
except uptide.netcdf_reader.CoordinateError:
uv_data[node, :] = (0, 0)
class TPXOTidalBoundaryForcing(TidalBoundaryForcing):
"""Tidal boundary interpolator for TPXO tidal model."""
elev_nc_file = 'h_tpxo9.v1.nc'
uv_nc_file = 'u_tpxo9.v1.nc'
grid_nc_file = 'grid_tpxo9.nc'
coord_layout = 'lon,lat'
compute_velocity = True
def _create_readers(self, ):
"""Create uptide netcdf reader objects."""
msg = 'File {:} not found, download it from \nftp://ftp.oce.orst.edu/dist/tides/Global/tpxo9_netcdf.tar.gz'
f_grid = os.path.join(self.data_dir, self.grid_nc_file)
assert os.path.exists(f_grid), msg.format(f_grid)
f_elev = os.path.join(self.data_dir, self.elev_nc_file)
assert os.path.exists(f_elev), msg.format(f_elev)
self.tnci = uptide.tidal_netcdf.OTPSncTidalInterpolator(self.tide, f_grid, f_elev, ranges=self.ranges)
if self.uv_field is not None:
f_uv = os.path.join(self.data_dir, self.uv_nc_file)
assert os.path.exists(f_uv), msg.format(f_uv)
self.tnciu = uptide.tidal_netcdf.OTPSncTidalComponentInterpolator(self.tide, f_grid, f_uv, 'u', 'u', ranges=self.ranges)
self.tnciv = uptide.tidal_netcdf.OTPSncTidalComponentInterpolator(self.tide, f_grid, f_uv, 'v', 'v', ranges=self.ranges)
class FES2004TidalBoundaryForcing(TidalBoundaryForcing):
"""Tidal boundary interpolator for FES2004 tidal model."""
elev_nc_file = 'tide.fes2004.nc'
uv_nc_file = None
grid_nc_file = None
coord_layout = 'lat,lon'
compute_velocity = False
def _create_readers(self, ):
"""Create uptide netcdf reader objects."""
f_elev = os.path.join(self.data_dir, self.elev_nc_file)
msg = 'File {:} not found, download it from \nftp://ftp.legos.obs-mip.fr/pub/soa/maree/tide_model/global_solution/fes2004/'.format(f_elev)
assert os.path.exists(f_elev), msg
self.tnci = uptide.tidal_netcdf.FESTidalInterpolator(self.tide, f_elev, ranges=self.ranges)
|
mit
|
syci/ingadhoc-odoo-addons
|
product_catalog_aeroo_report/report/product_catalog_parser.py
|
1
|
3894
|
# -*- coding: utf-8 -*-
import time
from openerp.tools.translate import _
from openerp.report import report_sxw
from openerp.report.report_sxw import rml_parse
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
lang = context.get('lang', 'es_ES')
self.print_product_uom = context.get('print_product_uom', False)
self.product_type = context.get('product_type', 'product.product')
self.prod_display_type = context.get('prod_display_type', False)
pricelist_ids = context.get('pricelist_ids', [])
pricelists = self.pool['product.pricelist'].browse(
cr, uid, pricelist_ids, context=context)
categories_order = context.get('categories_order', '')
# Get categories ordered
category_type = context.get('category_type', False)
if category_type == 'public_category':
categories = self.pool['product.public.category']
else:
categories = self.pool['product.category']
category_ids = context.get('category_ids', [])
category_ids = categories.search(
cr, uid, [('id', 'in', category_ids)],
order=categories_order, context=context)
categories = categories.browse(
cr, uid, category_ids, context=context)
products = self.get_products(category_ids, context=context)
company_id = self.pool['res.users'].browse(
cr, uid, [uid])[0].company_id
self.localcontext.update({
'lang': lang,
'categories': categories,
'products': products,
'print_product_uom': self.print_product_uom,
'product_type': self.product_type,
'prod_display_type': self.prod_display_type,
'company_logo': company_id.logo,
'pricelists': pricelists,
'today': time.localtime(),
'get_price': self.get_price,
'get_products': self.get_products,
'context': context,
'field_value_get': self.field_value_get,
})
def field_value_get(self, product, field, context=None):
# TODO hacer funcioal esto en el reporte ods. El problema es que
# deberiamos usar export_data en vez de read para poder elegir que ver
# del padre, por ejemplo "categ_id/name"
if not context:
context = {}
product_obj = self.pool.get(self.product_type)
field_value = product_obj.read(
self.cr, self.uid, [product.id], [field], context=context)
return field_value[0].get(field, '')
def get_price(self, product, pricelist, context=None):
if not context:
context = {}
context['pricelist'] = pricelist.id
product_obj = self.pool[self.product_type]
price = product_obj.browse(
self.cr, self.uid, [product.id], context=context).price
return price
def get_products(self, category_ids, context=None):
if not isinstance(category_ids, list):
category_ids = [category_ids]
if not context:
context = {}
order = context.get('products_order', '')
only_with_stock = context.get('only_with_stock', False)
category_type = context.get('category_type', False)
if category_type == 'public_category':
domain = [('public_categ_ids', 'in', category_ids)]
else:
domain = [('categ_id', 'in', category_ids)]
if only_with_stock:
domain.append(('qty_available', '>', 0))
product_ids = self.pool[self.product_type].search(
self.cr, self.uid, domain, order=order, context=context)
products = self.pool[self.product_type].browse(
self.cr, self.uid, product_ids, context=context)
return products
|
agpl-3.0
|
MartinHjelmare/home-assistant
|
homeassistant/components/arest/sensor.py
|
7
|
6406
|
"""Support for an exposed aREST RESTful API of a device."""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE, CONF_RESOURCE,
CONF_MONITORED_VARIABLES, CONF_NAME)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
CONF_FUNCTIONS = 'functions'
CONF_PINS = 'pins'
DEFAULT_NAME = 'aREST sensor'
PIN_VARIABLE_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PINS, default={}):
vol.Schema({cv.string: PIN_VARIABLE_SCHEMA}),
vol.Optional(CONF_MONITORED_VARIABLES, default={}):
vol.Schema({cv.string: PIN_VARIABLE_SCHEMA}),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the aREST sensor."""
resource = config.get(CONF_RESOURCE)
var_conf = config.get(CONF_MONITORED_VARIABLES)
pins = config.get(CONF_PINS)
try:
response = requests.get(resource, timeout=10).json()
except requests.exceptions.MissingSchema:
_LOGGER.error("Missing resource or schema in configuration. "
"Add http:// to your URL")
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device at %s", resource)
return False
arest = ArestData(resource)
def make_renderer(value_template):
"""Create a renderer based on variable_template value."""
if value_template is None:
return lambda value: value
value_template.hass = hass
def _render(value):
try:
return value_template.async_render({'value': value})
except TemplateError:
_LOGGER.exception("Error parsing value")
return value
return _render
dev = []
if var_conf is not None:
for variable, var_data in var_conf.items():
if variable not in response['variables']:
_LOGGER.error("Variable: %s does not exist", variable)
continue
renderer = make_renderer(var_data.get(CONF_VALUE_TEMPLATE))
dev.append(ArestSensor(
arest, resource, config.get(CONF_NAME, response[CONF_NAME]),
var_data.get(CONF_NAME, variable), variable=variable,
unit_of_measurement=var_data.get(CONF_UNIT_OF_MEASUREMENT),
renderer=renderer))
if pins is not None:
for pinnum, pin in pins.items():
renderer = make_renderer(pin.get(CONF_VALUE_TEMPLATE))
dev.append(ArestSensor(
ArestData(resource, pinnum), resource,
config.get(CONF_NAME, response[CONF_NAME]), pin.get(CONF_NAME),
pin=pinnum, unit_of_measurement=pin.get(
CONF_UNIT_OF_MEASUREMENT), renderer=renderer))
add_entities(dev, True)
class ArestSensor(Entity):
"""Implementation of an aREST sensor for exposed variables."""
def __init__(self, arest, resource, location, name, variable=None,
pin=None, unit_of_measurement=None, renderer=None):
"""Initialize the sensor."""
self.arest = arest
self._resource = resource
self._name = '{} {}'.format(location.title(), name.title())
self._variable = variable
self._pin = pin
self._state = None
self._unit_of_measurement = unit_of_measurement
self._renderer = renderer
if self._pin is not None:
request = requests.get(
'{}/mode/{}/i'.format(self._resource, self._pin), timeout=10)
if request.status_code != 200:
_LOGGER.error("Can't set mode of %s", self._resource)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
values = self.arest.data
if 'error' in values:
return values['error']
value = self._renderer(
values.get('value', values.get(self._variable, None)))
return value
def update(self):
"""Get the latest data from aREST API."""
self.arest.update()
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.arest.available
class ArestData:
"""The Class for handling the data retrieval for variables."""
def __init__(self, resource, pin=None):
"""Initialize the data object."""
self._resource = resource
self._pin = pin
self.data = {}
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from aREST device."""
try:
if self._pin is None:
response = requests.get(self._resource, timeout=10)
self.data = response.json()['variables']
else:
try:
if str(self._pin[0]) == 'A':
response = requests.get('{}/analog/{}'.format(
self._resource, self._pin[1:]), timeout=10)
self.data = {'value': response.json()['return_value']}
except TypeError:
response = requests.get('{}/digital/{}'.format(
self._resource, self._pin), timeout=10)
self.data = {'value': response.json()['return_value']}
self.available = True
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device %s", self._resource)
self.available = False
|
apache-2.0
|
nchammas/spark
|
python/pyspark/mllib/__init__.py
|
11
|
1372
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RDD-based machine learning APIs for Python (in maintenance mode).
The `pyspark.mllib` package is in maintenance mode as of the Spark 2.0.0 release to encourage
migration to the DataFrame-based APIs under the `pyspark.ml` package.
"""
# MLlib currently needs NumPy 1.4+, so complain if lower
import numpy
ver = [int(x) for x in numpy.version.version.split('.')[:2]]
if ver < [1, 4]:
raise Exception("MLlib requires NumPy 1.4+")
__all__ = ['classification', 'clustering', 'feature', 'fpm', 'linalg', 'random',
'recommendation', 'regression', 'stat', 'tree', 'util']
|
apache-2.0
|
peterlauri/django
|
django/contrib/admin/checks.py
|
13
|
39299
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.contrib.admin.utils import (
NotRelationField, flatten, get_fields_from_path,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
from django.template.engine import Engine
def check_admin_app(**kwargs):
from django.contrib.admin.sites import system_check_errors
return system_check_errors
def check_dependencies(**kwargs):
"""
Check that the admin's dependencies are correctly installed.
"""
errors = []
# contrib.contenttypes must be installed.
if not apps.is_installed('django.contrib.contenttypes'):
missing_app = checks.Error(
"'django.contrib.contenttypes' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E401",
)
errors.append(missing_app)
# The auth context processor must be installed if using the default
# authentication backend.
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors and
'django.contrib.auth.backends.ModelBackend' in settings.AUTHENTICATION_BACKENDS):
missing_template = checks.Error(
"'django.contrib.auth.context_processors.auth' must be in "
"TEMPLATES in order to use the admin application.",
id="admin.E402"
)
errors.append(missing_template)
return errors
class BaseModelAdminChecks(object):
def check(self, admin_obj, **kwargs):
errors = []
errors.extend(self._check_raw_id_fields(admin_obj))
errors.extend(self._check_fields(admin_obj))
errors.extend(self._check_fieldsets(admin_obj))
errors.extend(self._check_exclude(admin_obj))
errors.extend(self._check_form(admin_obj))
errors.extend(self._check_filter_vertical(admin_obj))
errors.extend(self._check_filter_horizontal(admin_obj))
errors.extend(self._check_radio_fields(admin_obj))
errors.extend(self._check_prepopulated_fields(admin_obj))
errors.extend(self._check_view_on_site_url(admin_obj))
errors.extend(self._check_ordering(admin_obj))
errors.extend(self._check_readonly_fields(admin_obj))
return errors
def _check_raw_id_fields(self, obj):
""" Check that `raw_id_fields` only contains field names that are listed
on the model. """
if not isinstance(obj.raw_id_fields, (list, tuple)):
return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')
else:
return list(chain(*[
self._check_raw_id_fields_item(obj, obj.model, field_name, 'raw_id_fields[%d]' % index)
for index, field_name in enumerate(obj.raw_id_fields)
]))
def _check_raw_id_fields_item(self, obj, model, field_name, label):
""" Check an item of `raw_id_fields`, i.e. check that field named
`field_name` exists in model `model` and is a ForeignKey or a
ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E002')
else:
if not field.many_to_many and not isinstance(field, models.ForeignKey):
return must_be('a foreign key or a many-to-many field',
option=label, obj=obj, id='admin.E003')
else:
return []
def _check_fields(self, obj):
""" Check that `fields` only refer to existing fields, doesn't contain
duplicates. Check if at most one of `fields` and `fieldsets` is defined.
"""
if obj.fields is None:
return []
elif not isinstance(obj.fields, (list, tuple)):
return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004')
elif obj.fieldsets:
return [
checks.Error(
"Both 'fieldsets' and 'fields' are specified.",
obj=obj.__class__,
id='admin.E005',
)
]
fields = flatten(obj.fields)
if len(fields) != len(set(fields)):
return [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
obj=obj.__class__,
id='admin.E006',
)
]
return list(chain(*[
self._check_field_spec(obj, obj.model, field_name, 'fields')
for field_name in obj.fields
]))
def _check_fieldsets(self, obj):
""" Check that fieldsets is properly formatted and doesn't contain
duplicates. """
if obj.fieldsets is None:
return []
elif not isinstance(obj.fieldsets, (list, tuple)):
return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007')
else:
return list(chain(*[
self._check_fieldsets_item(obj, obj.model, fieldset, 'fieldsets[%d]' % index)
for index, fieldset in enumerate(obj.fieldsets)
]))
def _check_fieldsets_item(self, obj, model, fieldset, label):
""" Check an item of `fieldsets`, i.e. check that this is a pair of a
set name and a dictionary containing "fields" key. """
if not isinstance(fieldset, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E008')
elif len(fieldset) != 2:
return must_be('of length 2', option=label, obj=obj, id='admin.E009')
elif not isinstance(fieldset[1], dict):
return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010')
elif 'fields' not in fieldset[1]:
return [
checks.Error(
"The value of '%s[1]' must contain the key 'fields'." % label,
obj=obj.__class__,
id='admin.E011',
)
]
elif not isinstance(fieldset[1]['fields'], (list, tuple)):
return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008')
fields = flatten(fieldset[1]['fields'])
if len(fields) != len(set(fields)):
return [
checks.Error(
"There are duplicate field(s) in '%s[1]'." % label,
obj=obj.__class__,
id='admin.E012',
)
]
return list(chain(*[
self._check_field_spec(obj, model, fieldset_fields, '%s[1]["fields"]' % label)
for fieldset_fields in fieldset[1]['fields']
]))
def _check_field_spec(self, obj, model, fields, label):
""" `fields` should be an item of `fields` or an item of
fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a
field name or a tuple of field names. """
if isinstance(fields, tuple):
return list(chain(*[
self._check_field_spec_item(obj, model, field_name, "%s[%d]" % (label, index))
for index, field_name in enumerate(fields)
]))
else:
return self._check_field_spec_item(obj, model, fields, label)
def _check_field_spec_item(self, obj, model, field_name, label):
if field_name in obj.readonly_fields:
# Stuff can be put in fields that isn't actually a model field if
# it's in readonly_fields, readonly_fields will handle the
# validation of such things.
return []
else:
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could
# be an extra field on the form.
return []
else:
if (isinstance(field, models.ManyToManyField) and
not field.remote_field.through._meta.auto_created):
return [
checks.Error(
"The value of '%s' cannot include the ManyToManyField '%s', "
"because that field manually specifies a relationship model."
% (label, field_name),
obj=obj.__class__,
id='admin.E013',
)
]
else:
return []
def _check_exclude(self, obj):
""" Check that exclude is a sequence without duplicates. """
if obj.exclude is None: # default value is None
return []
elif not isinstance(obj.exclude, (list, tuple)):
return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')
elif len(obj.exclude) > len(set(obj.exclude)):
return [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
obj=obj.__class__,
id='admin.E015',
)
]
else:
return []
def _check_form(self, obj):
""" Check that form subclasses BaseModelForm. """
if hasattr(obj, 'form') and not issubclass(obj.form, BaseModelForm):
return must_inherit_from(parent='BaseModelForm', option='form',
obj=obj, id='admin.E016')
else:
return []
def _check_filter_vertical(self, obj):
""" Check that filter_vertical is a sequence of field names. """
if not hasattr(obj, 'filter_vertical'):
return []
elif not isinstance(obj.filter_vertical, (list, tuple)):
return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_vertical[%d]" % index)
for index, field_name in enumerate(obj.filter_vertical)
]))
def _check_filter_horizontal(self, obj):
""" Check that filter_horizontal is a sequence of field names. """
if not hasattr(obj, 'filter_horizontal'):
return []
elif not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_horizontal[%d]" % index)
for index, field_name in enumerate(obj.filter_horizontal)
]))
def _check_filter_item(self, obj, model, field_name, label):
""" Check one item of `filter_vertical` or `filter_horizontal`, i.e.
check that given field exists and is a ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E019')
else:
if not field.many_to_many:
return must_be('a many-to-many field', option=label, obj=obj, id='admin.E020')
else:
return []
def _check_radio_fields(self, obj):
""" Check that `radio_fields` is a dictionary. """
if not hasattr(obj, 'radio_fields'):
return []
elif not isinstance(obj.radio_fields, dict):
return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021')
else:
return list(chain(*[
self._check_radio_fields_key(obj, obj.model, field_name, 'radio_fields') +
self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name)
for field_name, val in obj.radio_fields.items()
]))
def _check_radio_fields_key(self, obj, model, field_name, label):
""" Check that a key of `radio_fields` dictionary is name of existing
field and that the field is a ForeignKey or has `choices` defined. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E022')
else:
if not (isinstance(field, models.ForeignKey) or field.choices):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an "
"instance of ForeignKey, and does not have a 'choices' definition." % (
label, field_name
),
obj=obj.__class__,
id='admin.E023',
)
]
else:
return []
def _check_radio_fields_value(self, obj, val, label):
""" Check type of a value of `radio_fields` dictionary. """
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if val not in (HORIZONTAL, VERTICAL):
return [
checks.Error(
"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label,
obj=obj.__class__,
id='admin.E024',
)
]
else:
return []
def _check_view_on_site_url(self, obj):
if hasattr(obj, 'view_on_site'):
if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool):
return [
checks.Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
obj=obj.__class__,
id='admin.E025',
)
]
else:
return []
else:
return []
def _check_prepopulated_fields(self, obj):
""" Check that `prepopulated_fields` is a dictionary containing allowed
field types. """
if not hasattr(obj, 'prepopulated_fields'):
return []
elif not isinstance(obj.prepopulated_fields, dict):
return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026')
else:
return list(chain(*[
self._check_prepopulated_fields_key(obj, obj.model, field_name, 'prepopulated_fields') +
self._check_prepopulated_fields_value(obj, obj.model, val, 'prepopulated_fields["%s"]' % field_name)
for field_name, val in obj.prepopulated_fields.items()
]))
def _check_prepopulated_fields_key(self, obj, model, field_name, label):
""" Check a key of `prepopulated_fields` dictionary, i.e. check that it
is a name of existing field and the field is one of the allowed types.
"""
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E027')
else:
if isinstance(field, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)):
return [
checks.Error(
"The value of '%s' refers to '%s', which must not be a DateTimeField, "
"a ForeignKey, or a ManyToManyField." % (label, field_name),
obj=obj.__class__,
id='admin.E028',
)
]
else:
return []
def _check_prepopulated_fields_value(self, obj, model, val, label):
""" Check a value of `prepopulated_fields` dictionary, i.e. it's an
iterable of existing fields. """
if not isinstance(val, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')
else:
return list(chain(*[
self._check_prepopulated_fields_value_item(obj, model, subfield_name, "%s[%r]" % (label, index))
for index, subfield_name in enumerate(val)
]))
def _check_prepopulated_fields_value_item(self, obj, model, field_name, label):
""" For `prepopulated_fields` equal to {"slug": ("title",)},
`field_name` is "title". """
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E030')
else:
return []
def _check_ordering(self, obj):
""" Check that ordering refers to existing fields or is random. """
# ordering = None
if obj.ordering is None: # The default value is None
return []
elif not isinstance(obj.ordering, (list, tuple)):
return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031')
else:
return list(chain(*[
self._check_ordering_item(obj, obj.model, field_name, 'ordering[%d]' % index)
for index, field_name in enumerate(obj.ordering)
]))
def _check_ordering_item(self, obj, model, field_name, label):
""" Check that `ordering` refers to existing fields. """
if field_name == '?' and len(obj.ordering) != 1:
return [
checks.Error(
"The value of 'ordering' has the random ordering marker '?', "
"but contains other fields as well.",
hint='Either remove the "?", or remove the other fields.',
obj=obj.__class__,
id='admin.E032',
)
]
elif field_name == '?':
return []
elif LOOKUP_SEP in field_name:
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
return []
else:
if field_name.startswith('-'):
field_name = field_name[1:]
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E033')
else:
return []
def _check_readonly_fields(self, obj):
""" Check that readonly_fields refers to proper attribute or field. """
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034')
else:
return list(chain(*[
self._check_readonly_fields_item(obj, obj.model, field_name, "readonly_fields[%d]" % index)
for index, field_name in enumerate(obj.readonly_fields)
]))
def _check_readonly_fields_item(self, obj, model, field_name, label):
if callable(field_name):
return []
elif hasattr(obj, field_name):
return []
elif hasattr(model, field_name):
return []
else:
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % (
label, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id='admin.E035',
)
]
else:
return []
class ModelAdminChecks(BaseModelAdminChecks):
def check(self, admin_obj, **kwargs):
errors = super(ModelAdminChecks, self).check(admin_obj)
errors.extend(self._check_save_as(admin_obj))
errors.extend(self._check_save_on_top(admin_obj))
errors.extend(self._check_inlines(admin_obj))
errors.extend(self._check_list_display(admin_obj))
errors.extend(self._check_list_display_links(admin_obj))
errors.extend(self._check_list_filter(admin_obj))
errors.extend(self._check_list_select_related(admin_obj))
errors.extend(self._check_list_per_page(admin_obj))
errors.extend(self._check_list_max_show_all(admin_obj))
errors.extend(self._check_list_editable(admin_obj))
errors.extend(self._check_search_fields(admin_obj))
errors.extend(self._check_date_hierarchy(admin_obj))
return errors
def _check_save_as(self, obj):
""" Check save_as is a boolean. """
if not isinstance(obj.save_as, bool):
return must_be('a boolean', option='save_as',
obj=obj, id='admin.E101')
else:
return []
def _check_save_on_top(self, obj):
""" Check save_on_top is a boolean. """
if not isinstance(obj.save_on_top, bool):
return must_be('a boolean', option='save_on_top',
obj=obj, id='admin.E102')
else:
return []
def _check_inlines(self, obj):
""" Check all inline model admin classes. """
if not isinstance(obj.inlines, (list, tuple)):
return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103')
else:
return list(chain(*[
self._check_inlines_item(obj, obj.model, item, "inlines[%d]" % index)
for index, item in enumerate(obj.inlines)
]))
def _check_inlines_item(self, obj, model, inline, label):
""" Check one inline model admin. """
inline_label = '.'.join([inline.__module__, inline.__name__])
from django.contrib.admin.options import InlineModelAdmin
if not issubclass(inline, InlineModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'InlineModelAdmin'." % inline_label,
obj=obj.__class__,
id='admin.E104',
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
obj=obj.__class__,
id='admin.E105',
)
]
elif not issubclass(inline.model, models.Model):
return must_be('a Model', option='%s.model' % inline_label, obj=obj, id='admin.E106')
else:
return inline(model, obj.admin_site).check()
def _check_list_display(self, obj):
""" Check that list_display only contains fields or usable attributes.
"""
if not isinstance(obj.list_display, (list, tuple)):
return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107')
else:
return list(chain(*[
self._check_list_display_item(obj, obj.model, item, "list_display[%d]" % index)
for index, item in enumerate(obj.list_display)
]))
def _check_list_display_item(self, obj, model, item, label):
if callable(item):
return []
elif hasattr(obj, item):
return []
elif hasattr(model, item):
# getattr(model, item) could be an X_RelatedObjectsDescriptor
try:
field = model._meta.get_field(item)
except FieldDoesNotExist:
try:
field = getattr(model, item)
except AttributeError:
field = None
if field is None:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not a "
"callable, an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id='admin.E108',
)
]
elif isinstance(field, models.ManyToManyField):
return [
checks.Error(
"The value of '%s' must not be a ManyToManyField." % label,
obj=obj.__class__,
id='admin.E109',
)
]
else:
return []
else:
try:
model._meta.get_field(item)
except FieldDoesNotExist:
return [
# This is a deliberate repeat of E108; there's more than one path
# required to test this condition.
checks.Error(
"The value of '%s' refers to '%s', which is not a callable, "
"an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id='admin.E108',
)
]
else:
return []
def _check_list_display_links(self, obj):
""" Check that list_display_links is a unique subset of list_display.
"""
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')
else:
return list(chain(*[
self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index)
for index, field_name in enumerate(obj.list_display_links)
]))
def _check_list_display_links_item(self, obj, field_name, label):
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in 'list_display'." % (
label, field_name
),
obj=obj.__class__,
id='admin.E111',
)
]
else:
return []
def _check_list_filter(self, obj):
if not isinstance(obj.list_filter, (list, tuple)):
return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112')
else:
return list(chain(*[
self._check_list_filter_item(obj, obj.model, item, "list_filter[%d]" % index)
for index, item in enumerate(obj.list_filter)
]))
def _check_list_filter_item(self, obj, model, item, label):
"""
Check one item of `list_filter`, i.e. check if it is one of three options:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
return must_inherit_from(parent='ListFilter', option=label,
obj=obj, id='admin.E113')
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'." % label,
obj=obj.__class__,
id='admin.E114',
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label, obj=obj, id='admin.E115')
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a Field." % (label, field),
obj=obj.__class__,
id='admin.E116',
)
]
else:
return []
def _check_list_select_related(self, obj):
""" Check that list_select_related is a boolean, a list or a tuple. """
if not isinstance(obj.list_select_related, (bool, list, tuple)):
return must_be('a boolean, tuple or list', option='list_select_related', obj=obj, id='admin.E117')
else:
return []
def _check_list_per_page(self, obj):
""" Check that list_per_page is an integer. """
if not isinstance(obj.list_per_page, int):
return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118')
else:
return []
def _check_list_max_show_all(self, obj):
""" Check that list_max_show_all is an integer. """
if not isinstance(obj.list_max_show_all, int):
return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')
else:
return []
def _check_list_editable(self, obj):
""" Check that list_editable is a sequence of editable fields from
list_display without first element. """
if not isinstance(obj.list_editable, (list, tuple)):
return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120')
else:
return list(chain(*[
self._check_list_editable_item(obj, obj.model, item, "list_editable[%d]" % index)
for index, item in enumerate(obj.list_editable)
]))
def _check_list_editable_item(self, obj, model, field_name, label):
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E121')
else:
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not "
"contained in 'list_display'." % (label, field_name),
obj=obj.__class__,
id='admin.E122',
)
]
elif obj.list_display_links and field_name in obj.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name,
obj=obj.__class__,
id='admin.E123',
)
]
# If list_display[0] is in list_editable, check that
# list_display_links is set. See #22792 and #26229 for use cases.
elif (obj.list_display[0] == field_name and not obj.list_display_links and
obj.list_display_links is not None):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' ('%s'), "
"which cannot be used unless 'list_display_links' is set." % (
label, obj.list_display[0]
),
obj=obj.__class__,
id='admin.E124',
)
]
elif not field.editable:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable through the admin." % (
label, field_name
),
obj=obj.__class__,
id='admin.E125',
)
]
else:
return []
def _check_search_fields(self, obj):
""" Check search_fields is a sequence. """
if not isinstance(obj.search_fields, (list, tuple)):
return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126')
else:
return []
def _check_date_hierarchy(self, obj):
""" Check that date_hierarchy refers to DateField or DateTimeField. """
if obj.date_hierarchy is None:
return []
else:
try:
field = get_fields_from_path(obj.model, obj.date_hierarchy)[-1]
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of 'date_hierarchy' refers to '%s', which "
"does not refer to a Field." % obj.date_hierarchy,
obj=obj.__class__,
id='admin.E127',
)
]
else:
if not isinstance(field, (models.DateField, models.DateTimeField)):
return must_be('a DateField or DateTimeField', option='date_hierarchy', obj=obj, id='admin.E128')
else:
return []
class InlineModelAdminChecks(BaseModelAdminChecks):
def check(self, inline_obj, **kwargs):
errors = super(InlineModelAdminChecks, self).check(inline_obj)
parent_model = inline_obj.parent_model
errors.extend(self._check_relation(inline_obj, parent_model))
errors.extend(self._check_exclude_of_parent_model(inline_obj, parent_model))
errors.extend(self._check_extra(inline_obj))
errors.extend(self._check_max_num(inline_obj))
errors.extend(self._check_min_num(inline_obj))
errors.extend(self._check_formset(inline_obj))
return errors
def _check_exclude_of_parent_model(self, obj, parent_model):
# Do not perform more specific checks if the base checks result in an
# error.
errors = super(InlineModelAdminChecks, self)._check_exclude(obj)
if errors:
return []
# Skip if `fk_name` is invalid.
if self._check_relation(obj, parent_model):
return []
if obj.exclude is None:
return []
fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
if fk.name in obj.exclude:
return [
checks.Error(
"Cannot exclude the field '%s', because it is the foreign key "
"to the parent model '%s.%s'." % (
fk.name, parent_model._meta.app_label, parent_model._meta.object_name
),
obj=obj.__class__,
id='admin.E201',
)
]
else:
return []
def _check_relation(self, obj, parent_model):
try:
_get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
except ValueError as e:
return [checks.Error(e.args[0], obj=obj.__class__, id='admin.E202')]
else:
return []
def _check_extra(self, obj):
""" Check that extra is an integer. """
if not isinstance(obj.extra, int):
return must_be('an integer', option='extra', obj=obj, id='admin.E203')
else:
return []
def _check_max_num(self, obj):
""" Check that max_num is an integer. """
if obj.max_num is None:
return []
elif not isinstance(obj.max_num, int):
return must_be('an integer', option='max_num', obj=obj, id='admin.E204')
else:
return []
def _check_min_num(self, obj):
""" Check that min_num is an integer. """
if obj.min_num is None:
return []
elif not isinstance(obj.min_num, int):
return must_be('an integer', option='min_num', obj=obj, id='admin.E205')
else:
return []
def _check_formset(self, obj):
""" Check formset is a subclass of BaseModelFormSet. """
if not issubclass(obj.formset, BaseModelFormSet):
return must_inherit_from(parent='BaseModelFormSet', option='formset', obj=obj, id='admin.E206')
else:
return []
def must_be(type, option, obj, id):
return [
checks.Error(
"The value of '%s' must be %s." % (option, type),
obj=obj.__class__,
id=id,
),
]
def must_inherit_from(parent, option, obj, id):
return [
checks.Error(
"The value of '%s' must inherit from '%s'." % (option, parent),
obj=obj.__class__,
id=id,
),
]
def refer_to_missing_field(field, option, model, obj, id):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % (
option, field, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id=id,
),
]
|
bsd-3-clause
|
leilihh/nova
|
nova/tests/objects/test_external_event.py
|
28
|
1751
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.objects import external_event as external_event_obj
from nova.tests.objects import test_objects
class _TestInstanceExternalEventObject(object):
def test_make_key(self):
key = external_event_obj.InstanceExternalEvent.make_key('foo', 'bar')
self.assertEqual('foo-bar', key)
def test_make_key_no_tag(self):
key = external_event_obj.InstanceExternalEvent.make_key('foo')
self.assertEqual('foo', key)
def test_key(self):
event = external_event_obj.InstanceExternalEvent(name='foo',
tag='bar')
with mock.patch.object(event, 'make_key') as make_key:
make_key.return_value = 'key'
self.assertEqual('key', event.key)
make_key.assert_called_once_with('foo', 'bar')
class TestInstanceExternalEventObject(test_objects._LocalTest,
_TestInstanceExternalEventObject):
pass
class TestRemoteInstanceExternalEventObject(test_objects._RemoteTest,
_TestInstanceExternalEventObject):
pass
|
apache-2.0
|
linmajia/dlbench
|
tools/mxnet/symbols/lenet.py
|
12
|
1385
|
"""
LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick Haffner.
Gradient-based learning applied to document recognition.
Proceedings of the IEEE (1998)
"""
import mxnet as mx
def get_symbol(num_classes=10, add_stn=False, **kwargs):
data = mx.symbol.Variable('data')
if(add_stn):
data = mx.sym.SpatialTransformer(data=data, loc=get_loc(data), target_shape = (28,28),
transform_type="affine", sampler_type="bilinear")
# first conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max",
kernel=(2,2), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50)
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max",
kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=num_classes)
# loss
lenet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
return lenet
|
mit
|
fernandog/osmc
|
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0d6.py
|
253
|
4765
|
data = (
'hyeo', # 0x00
'hyeog', # 0x01
'hyeogg', # 0x02
'hyeogs', # 0x03
'hyeon', # 0x04
'hyeonj', # 0x05
'hyeonh', # 0x06
'hyeod', # 0x07
'hyeol', # 0x08
'hyeolg', # 0x09
'hyeolm', # 0x0a
'hyeolb', # 0x0b
'hyeols', # 0x0c
'hyeolt', # 0x0d
'hyeolp', # 0x0e
'hyeolh', # 0x0f
'hyeom', # 0x10
'hyeob', # 0x11
'hyeobs', # 0x12
'hyeos', # 0x13
'hyeoss', # 0x14
'hyeong', # 0x15
'hyeoj', # 0x16
'hyeoc', # 0x17
'hyeok', # 0x18
'hyeot', # 0x19
'hyeop', # 0x1a
'hyeoh', # 0x1b
'hye', # 0x1c
'hyeg', # 0x1d
'hyegg', # 0x1e
'hyegs', # 0x1f
'hyen', # 0x20
'hyenj', # 0x21
'hyenh', # 0x22
'hyed', # 0x23
'hyel', # 0x24
'hyelg', # 0x25
'hyelm', # 0x26
'hyelb', # 0x27
'hyels', # 0x28
'hyelt', # 0x29
'hyelp', # 0x2a
'hyelh', # 0x2b
'hyem', # 0x2c
'hyeb', # 0x2d
'hyebs', # 0x2e
'hyes', # 0x2f
'hyess', # 0x30
'hyeng', # 0x31
'hyej', # 0x32
'hyec', # 0x33
'hyek', # 0x34
'hyet', # 0x35
'hyep', # 0x36
'hyeh', # 0x37
'ho', # 0x38
'hog', # 0x39
'hogg', # 0x3a
'hogs', # 0x3b
'hon', # 0x3c
'honj', # 0x3d
'honh', # 0x3e
'hod', # 0x3f
'hol', # 0x40
'holg', # 0x41
'holm', # 0x42
'holb', # 0x43
'hols', # 0x44
'holt', # 0x45
'holp', # 0x46
'holh', # 0x47
'hom', # 0x48
'hob', # 0x49
'hobs', # 0x4a
'hos', # 0x4b
'hoss', # 0x4c
'hong', # 0x4d
'hoj', # 0x4e
'hoc', # 0x4f
'hok', # 0x50
'hot', # 0x51
'hop', # 0x52
'hoh', # 0x53
'hwa', # 0x54
'hwag', # 0x55
'hwagg', # 0x56
'hwags', # 0x57
'hwan', # 0x58
'hwanj', # 0x59
'hwanh', # 0x5a
'hwad', # 0x5b
'hwal', # 0x5c
'hwalg', # 0x5d
'hwalm', # 0x5e
'hwalb', # 0x5f
'hwals', # 0x60
'hwalt', # 0x61
'hwalp', # 0x62
'hwalh', # 0x63
'hwam', # 0x64
'hwab', # 0x65
'hwabs', # 0x66
'hwas', # 0x67
'hwass', # 0x68
'hwang', # 0x69
'hwaj', # 0x6a
'hwac', # 0x6b
'hwak', # 0x6c
'hwat', # 0x6d
'hwap', # 0x6e
'hwah', # 0x6f
'hwae', # 0x70
'hwaeg', # 0x71
'hwaegg', # 0x72
'hwaegs', # 0x73
'hwaen', # 0x74
'hwaenj', # 0x75
'hwaenh', # 0x76
'hwaed', # 0x77
'hwael', # 0x78
'hwaelg', # 0x79
'hwaelm', # 0x7a
'hwaelb', # 0x7b
'hwaels', # 0x7c
'hwaelt', # 0x7d
'hwaelp', # 0x7e
'hwaelh', # 0x7f
'hwaem', # 0x80
'hwaeb', # 0x81
'hwaebs', # 0x82
'hwaes', # 0x83
'hwaess', # 0x84
'hwaeng', # 0x85
'hwaej', # 0x86
'hwaec', # 0x87
'hwaek', # 0x88
'hwaet', # 0x89
'hwaep', # 0x8a
'hwaeh', # 0x8b
'hoe', # 0x8c
'hoeg', # 0x8d
'hoegg', # 0x8e
'hoegs', # 0x8f
'hoen', # 0x90
'hoenj', # 0x91
'hoenh', # 0x92
'hoed', # 0x93
'hoel', # 0x94
'hoelg', # 0x95
'hoelm', # 0x96
'hoelb', # 0x97
'hoels', # 0x98
'hoelt', # 0x99
'hoelp', # 0x9a
'hoelh', # 0x9b
'hoem', # 0x9c
'hoeb', # 0x9d
'hoebs', # 0x9e
'hoes', # 0x9f
'hoess', # 0xa0
'hoeng', # 0xa1
'hoej', # 0xa2
'hoec', # 0xa3
'hoek', # 0xa4
'hoet', # 0xa5
'hoep', # 0xa6
'hoeh', # 0xa7
'hyo', # 0xa8
'hyog', # 0xa9
'hyogg', # 0xaa
'hyogs', # 0xab
'hyon', # 0xac
'hyonj', # 0xad
'hyonh', # 0xae
'hyod', # 0xaf
'hyol', # 0xb0
'hyolg', # 0xb1
'hyolm', # 0xb2
'hyolb', # 0xb3
'hyols', # 0xb4
'hyolt', # 0xb5
'hyolp', # 0xb6
'hyolh', # 0xb7
'hyom', # 0xb8
'hyob', # 0xb9
'hyobs', # 0xba
'hyos', # 0xbb
'hyoss', # 0xbc
'hyong', # 0xbd
'hyoj', # 0xbe
'hyoc', # 0xbf
'hyok', # 0xc0
'hyot', # 0xc1
'hyop', # 0xc2
'hyoh', # 0xc3
'hu', # 0xc4
'hug', # 0xc5
'hugg', # 0xc6
'hugs', # 0xc7
'hun', # 0xc8
'hunj', # 0xc9
'hunh', # 0xca
'hud', # 0xcb
'hul', # 0xcc
'hulg', # 0xcd
'hulm', # 0xce
'hulb', # 0xcf
'huls', # 0xd0
'hult', # 0xd1
'hulp', # 0xd2
'hulh', # 0xd3
'hum', # 0xd4
'hub', # 0xd5
'hubs', # 0xd6
'hus', # 0xd7
'huss', # 0xd8
'hung', # 0xd9
'huj', # 0xda
'huc', # 0xdb
'huk', # 0xdc
'hut', # 0xdd
'hup', # 0xde
'huh', # 0xdf
'hweo', # 0xe0
'hweog', # 0xe1
'hweogg', # 0xe2
'hweogs', # 0xe3
'hweon', # 0xe4
'hweonj', # 0xe5
'hweonh', # 0xe6
'hweod', # 0xe7
'hweol', # 0xe8
'hweolg', # 0xe9
'hweolm', # 0xea
'hweolb', # 0xeb
'hweols', # 0xec
'hweolt', # 0xed
'hweolp', # 0xee
'hweolh', # 0xef
'hweom', # 0xf0
'hweob', # 0xf1
'hweobs', # 0xf2
'hweos', # 0xf3
'hweoss', # 0xf4
'hweong', # 0xf5
'hweoj', # 0xf6
'hweoc', # 0xf7
'hweok', # 0xf8
'hweot', # 0xf9
'hweop', # 0xfa
'hweoh', # 0xfb
'hwe', # 0xfc
'hweg', # 0xfd
'hwegg', # 0xfe
'hwegs', # 0xff
)
|
gpl-2.0
|
yuewko/neutron
|
neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py
|
15
|
1460
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""floatingip_status
Revision ID: 2eeaf963a447
Revises: e766b19a3bb
Create Date: 2014-01-14 11:58:13.754747
"""
# revision identifiers, used by Alembic.
revision = '2eeaf963a447'
down_revision = 'e766b19a3bb'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade():
if not migration.schema_has_table('floatingips'):
# In the database we are migrating from, the configured plugin
# did not create the floatingips table.
return
op.add_column('floatingips',
sa.Column('last_known_router_id',
sa.String(length=36),
nullable=True))
op.add_column('floatingips',
sa.Column('status',
sa.String(length=16),
nullable=True))
|
apache-2.0
|
nkgilley/home-assistant
|
tests/components/august/test_binary_sensor.py
|
3
|
4944
|
"""The binary_sensor tests for the august platform."""
from homeassistant.components.lock import DOMAIN as LOCK_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_LOCK,
SERVICE_UNLOCK,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_activities_from_fixture,
_mock_doorbell_from_fixture,
_mock_lock_from_fixture,
)
async def test_doorsense(hass):
"""Test creation of a lock with doorsense and bridge."""
lock_one = await _mock_lock_from_fixture(
hass, "get_lock.online_with_doorsense.json"
)
await _create_august_with_devices(hass, [lock_one])
binary_sensor_online_with_doorsense_name = hass.states.get(
"binary_sensor.online_with_doorsense_name_open"
)
assert binary_sensor_online_with_doorsense_name.state == STATE_ON
data = {ATTR_ENTITY_ID: "lock.online_with_doorsense_name"}
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True
)
await hass.async_block_till_done()
binary_sensor_online_with_doorsense_name = hass.states.get(
"binary_sensor.online_with_doorsense_name_open"
)
assert binary_sensor_online_with_doorsense_name.state == STATE_ON
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, data, blocking=True
)
await hass.async_block_till_done()
binary_sensor_online_with_doorsense_name = hass.states.get(
"binary_sensor.online_with_doorsense_name_open"
)
assert binary_sensor_online_with_doorsense_name.state == STATE_OFF
async def test_create_doorbell(hass):
"""Test creation of a doorbell."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
await _create_august_with_devices(hass, [doorbell_one])
binary_sensor_k98gidt45gul_name_motion = hass.states.get(
"binary_sensor.k98gidt45gul_name_motion"
)
assert binary_sensor_k98gidt45gul_name_motion.state == STATE_OFF
binary_sensor_k98gidt45gul_name_online = hass.states.get(
"binary_sensor.k98gidt45gul_name_online"
)
assert binary_sensor_k98gidt45gul_name_online.state == STATE_ON
binary_sensor_k98gidt45gul_name_ding = hass.states.get(
"binary_sensor.k98gidt45gul_name_ding"
)
assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF
binary_sensor_k98gidt45gul_name_motion = hass.states.get(
"binary_sensor.k98gidt45gul_name_motion"
)
assert binary_sensor_k98gidt45gul_name_motion.state == STATE_OFF
async def test_create_doorbell_offline(hass):
"""Test creation of a doorbell that is offline."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.offline.json")
await _create_august_with_devices(hass, [doorbell_one])
binary_sensor_tmt100_name_motion = hass.states.get(
"binary_sensor.tmt100_name_motion"
)
assert binary_sensor_tmt100_name_motion.state == STATE_UNAVAILABLE
binary_sensor_tmt100_name_online = hass.states.get(
"binary_sensor.tmt100_name_online"
)
assert binary_sensor_tmt100_name_online.state == STATE_OFF
binary_sensor_tmt100_name_ding = hass.states.get("binary_sensor.tmt100_name_ding")
assert binary_sensor_tmt100_name_ding.state == STATE_UNAVAILABLE
async def test_create_doorbell_with_motion(hass):
"""Test creation of a doorbell."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
activities = await _mock_activities_from_fixture(
hass, "get_activity.doorbell_motion.json"
)
await _create_august_with_devices(hass, [doorbell_one], activities=activities)
binary_sensor_k98gidt45gul_name_motion = hass.states.get(
"binary_sensor.k98gidt45gul_name_motion"
)
assert binary_sensor_k98gidt45gul_name_motion.state == STATE_ON
binary_sensor_k98gidt45gul_name_online = hass.states.get(
"binary_sensor.k98gidt45gul_name_online"
)
assert binary_sensor_k98gidt45gul_name_online.state == STATE_ON
binary_sensor_k98gidt45gul_name_ding = hass.states.get(
"binary_sensor.k98gidt45gul_name_ding"
)
assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF
async def test_doorbell_device_registry(hass):
"""Test creation of a lock with doorsense and bridge ands up in the registry."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.offline.json")
await _create_august_with_devices(hass, [doorbell_one])
device_registry = await hass.helpers.device_registry.async_get_registry()
reg_device = device_registry.async_get_device(
identifiers={("august", "tmt100")}, connections=set()
)
assert reg_device.model == "hydra1"
assert reg_device.name == "tmt100 Name"
assert reg_device.manufacturer == "August"
assert reg_device.sw_version == "3.1.0-HYDRC75+201909251139"
|
apache-2.0
|
kitallis/rvhouse
|
site_scons/site_tools/qt4/test/qrc/manual/sconstest-manualwflags.py
|
6
|
1599
|
#!/usr/bin/env python
#
# Copyright (c) 2001-2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Basic test for the Qrc() builder, called explicitly with '-name' flag set.
"""
import TestSCons
test = TestSCons.TestSCons()
test.dir_fixture("image")
test.file_fixture('SConscript-wflags','SConscript')
test.file_fixture('../../qtenv.py')
test.file_fixture('../../../__init__.py','site_scons/site_tools/qt4/__init__.py')
test.run()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
nguyentran/openviber
|
tools/scons-local/scons-local-2.0.1/SCons/Tool/pdf.py
|
61
|
3033
|
"""SCons.Tool.pdf
Common PDF Builder definition for various other Tool modules that use it.
Add an explicit action to run epstopdf to convert .eps files to .pdf
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdf.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Builder
import SCons.Tool
PDFBuilder = None
EpsPdfAction = SCons.Action.Action('$EPSTOPDFCOM', '$EPSTOPDFCOMSTR')
def generate(env):
try:
env['BUILDERS']['PDF']
except KeyError:
global PDFBuilder
if PDFBuilder is None:
PDFBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.PDFLaTeXScanner,
prefix = '$PDFPREFIX',
suffix = '$PDFSUFFIX',
emitter = {},
source_ext_match = None,
single_source=True)
env['BUILDERS']['PDF'] = PDFBuilder
env['PDFPREFIX'] = ''
env['PDFSUFFIX'] = '.pdf'
# put the epstopdf builder in this routine so we can add it after
# the pdftex builder so that one is the default for no source suffix
def generate2(env):
bld = env['BUILDERS']['PDF']
#bld.add_action('.ps', EpsPdfAction) # this is covered by direct Ghostcript action in gs.py
bld.add_action('.eps', EpsPdfAction)
env['EPSTOPDF'] = 'epstopdf'
env['EPSTOPDFFLAGS'] = SCons.Util.CLVar('')
env['EPSTOPDFCOM'] = '$EPSTOPDF $EPSTOPDFFLAGS ${SOURCE} --outfile=${TARGET}'
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
carolFrohlich/nipype
|
nipype/interfaces/afni/tests/test_auto_Detrend.py
|
12
|
1115
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..preprocess import Detrend
def test_Detrend_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s_detrend',
),
outputtype=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = Detrend.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Detrend_outputs():
output_map = dict(out_file=dict(),
)
outputs = Detrend.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
bsd-3-clause
|
ubuntustudio-kernel/ubuntu-raring-lowlatency
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
mikemccann/stoqs
|
stoqs/static/OpenLayers-2.11/tools/mergejs.py
|
42
|
7820
|
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2011 OpenLayers contributors / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires?:? (.*)\n" # TODO: Ensure in comment?
class MissingImport(Exception):
"""Exception raised when a listed import is not found in the lib."""
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
exclude/this/dir
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def undesired(filepath, excludes):
# exclude file if listed
exclude = filepath in excludes
if not exclude:
# check if directory is listed
for excludepath in excludes:
if not excludepath.endswith("/"):
excludepath += "/"
if filepath.startswith(excludepath):
exclude = True
break
return exclude
def run (sourceDirectory, outputFilename = None, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (not undesired(filepath, cfg.exclude)):
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
complete = True
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
for path in info.requires:
if not files.has_key(path):
complete = False
fullpath = os.path.join(sourceDirectory, path).strip()
if os.path.exists(fullpath):
print "Importing: %s" % path
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[path] = SourceFile(path, content) # TODO: Chop path?
else:
raise MissingImport("File '%s' not found (required by '%s')." % (path, filepath))
# create dictionary of dependencies
dependencies = {}
for filepath, info in files.items():
dependencies[filepath] = info.requires
print "Sorting..."
order = toposort(dependencies) #[x for x in toposort(dependencies)]
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )
|
gpl-3.0
|
ryansnowboarder/zulip
|
zerver/filters.py
|
124
|
1058
|
from __future__ import absolute_import
from django.views.debug import SafeExceptionReporterFilter
from django.http import build_request_repr
class ZulipExceptionReporterFilter(SafeExceptionReporterFilter):
def get_post_parameters(self, request):
filtered_post = SafeExceptionReporterFilter.get_post_parameters(self, request).copy()
filtered_vars = ['content', 'secret', 'password', 'key', 'api-key', 'subject', 'stream',
'subscriptions', 'to', 'csrfmiddlewaretoken', 'api_key']
for var in filtered_vars:
if var in filtered_post:
filtered_post[var] = '**********'
return filtered_post
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request,
POST_override=self.get_post_parameters(request),
COOKIES_override="**********",
META_override="**********")
|
apache-2.0
|
cyberplant/scrapy
|
scrapy/responsetypes.py
|
6
|
4781
|
"""
This module implements a class which returns the appropriate Response class
based on different criteria.
"""
from __future__ import absolute_import
from mimetypes import MimeTypes
from pkgutil import get_data
from io import StringIO
import six
from scrapy.http import Response
from scrapy.utils.misc import load_object
from scrapy.utils.python import binary_is_text, to_bytes, to_native_str
class ResponseTypes(object):
CLASSES = {
'text/html': 'scrapy.http.HtmlResponse',
'application/atom+xml': 'scrapy.http.XmlResponse',
'application/rdf+xml': 'scrapy.http.XmlResponse',
'application/rss+xml': 'scrapy.http.XmlResponse',
'application/xhtml+xml': 'scrapy.http.HtmlResponse',
'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',
'application/xml': 'scrapy.http.XmlResponse',
'application/json': 'scrapy.http.TextResponse',
'application/x-json': 'scrapy.http.TextResponse',
'application/javascript': 'scrapy.http.TextResponse',
'application/x-javascript': 'scrapy.http.TextResponse',
'text/xml': 'scrapy.http.XmlResponse',
'text/*': 'scrapy.http.TextResponse',
}
def __init__(self):
self.classes = {}
self.mimetypes = MimeTypes()
mimedata = get_data('scrapy', 'mime.types').decode('utf8')
self.mimetypes.readfp(StringIO(mimedata))
for mimetype, cls in six.iteritems(self.CLASSES):
self.classes[mimetype] = load_object(cls)
def from_mimetype(self, mimetype):
"""Return the most appropriate Response class for the given mimetype"""
if mimetype is None:
return Response
elif mimetype in self.classes:
return self.classes[mimetype]
else:
basetype = "%s/*" % mimetype.split('/')[0]
return self.classes.get(basetype, Response)
def from_content_type(self, content_type, content_encoding=None):
"""Return the most appropriate Response class from an HTTP Content-Type
header """
if content_encoding:
return Response
mimetype = to_native_str(content_type).split(';')[0].strip().lower()
return self.from_mimetype(mimetype)
def from_content_disposition(self, content_disposition):
try:
filename = to_native_str(content_disposition,
encoding='latin-1', errors='replace').split(';')[1].split('=')[1]
filename = filename.strip('"\'')
return self.from_filename(filename)
except IndexError:
return Response
def from_headers(self, headers):
"""Return the most appropriate Response class by looking at the HTTP
headers"""
cls = Response
if b'Content-Type' in headers:
cls = self.from_content_type(
content_type=headers[b'Content-type'],
content_encoding=headers.get(b'Content-Encoding')
)
if cls is Response and b'Content-Disposition' in headers:
cls = self.from_content_disposition(headers[b'Content-Disposition'])
return cls
def from_filename(self, filename):
"""Return the most appropriate Response class from a file name"""
mimetype, encoding = self.mimetypes.guess_type(filename)
if mimetype and not encoding:
return self.from_mimetype(mimetype)
else:
return Response
def from_body(self, body):
"""Try to guess the appropriate response based on the body content.
This method is a bit magic and could be improved in the future, but
it's not meant to be used except for special cases where response types
cannot be guess using more straightforward methods."""
chunk = body[:5000]
chunk = to_bytes(chunk)
if not binary_is_text(chunk):
return self.from_mimetype('application/octet-stream')
elif b"<html>" in chunk.lower():
return self.from_mimetype('text/html')
elif b"<?xml" in chunk.lower():
return self.from_mimetype('text/xml')
else:
return self.from_mimetype('text')
def from_args(self, headers=None, url=None, filename=None, body=None):
"""Guess the most appropriate Response class based on
the given arguments."""
cls = Response
if headers is not None:
cls = self.from_headers(headers)
if cls is Response and url is not None:
cls = self.from_filename(url)
if cls is Response and filename is not None:
cls = self.from_filename(filename)
if cls is Response and body is not None:
cls = self.from_body(body)
return cls
responsetypes = ResponseTypes()
|
bsd-3-clause
|
AnhellO/DAS_Sistemas
|
Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/pip/commands/freeze.py
|
342
|
2835
|
from __future__ import absolute_import
import sys
import pip
from pip.compat import stdlib_pkgs
from pip.basecommand import Command
from pip.operations.freeze import freeze
from pip.wheel import WheelCache
DEV_PKGS = ('pip', 'setuptools', 'distribute', 'wheel')
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output. This option can be "
"used multiple times.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.cmd_opts.add_option(
'--all',
dest='freeze_all',
action='store_true',
help='Do not skip these packages in the output:'
' %s' % ', '.join(DEV_PKGS))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
skip = set(stdlib_pkgs)
if not options.freeze_all:
skip.update(DEV_PKGS)
freeze_kwargs = dict(
requirement=options.requirements,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
skip_regex=options.skip_requirements_regex,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
skip=skip)
for line in freeze(**freeze_kwargs):
sys.stdout.write(line + '\n')
|
mit
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/Django-1.6.10/tests/comment_tests/tests/test_comment_form.py
|
53
|
3001
|
from __future__ import absolute_import
import time
from django.conf import settings
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Article
class CommentFormTests(CommentTestCase):
def testInit(self):
f = CommentForm(Article.objects.get(pk=1))
self.assertEqual(f.initial['content_type'], str(Article._meta))
self.assertEqual(f.initial['object_pk'], "1")
self.assertNotEqual(f.initial['security_hash'], None)
self.assertNotEqual(f.initial['timestamp'], None)
def testValidPost(self):
a = Article.objects.get(pk=1)
f = CommentForm(a, data=self.getValidData(a))
self.assertTrue(f.is_valid(), f.errors)
return f
def tamperWithForm(self, **kwargs):
a = Article.objects.get(pk=1)
d = self.getValidData(a)
d.update(kwargs)
f = CommentForm(Article.objects.get(pk=1), data=d)
self.assertFalse(f.is_valid())
return f
def testHoneypotTampering(self):
self.tamperWithForm(honeypot="I am a robot")
def testTimestampTampering(self):
self.tamperWithForm(timestamp=str(time.time() - 28800))
def testSecurityHashTampering(self):
self.tamperWithForm(security_hash="Nobody expects the Spanish Inquisition!")
def testContentTypeTampering(self):
self.tamperWithForm(content_type="auth.user")
def testObjectPKTampering(self):
self.tamperWithForm(object_pk="3")
def testSecurityErrors(self):
f = self.tamperWithForm(honeypot="I am a robot")
self.assertTrue("honeypot" in f.security_errors())
def testGetCommentObject(self):
f = self.testValidPost()
c = f.get_comment_object()
self.assertIsInstance(c, Comment)
self.assertEqual(c.content_object, Article.objects.get(pk=1))
self.assertEqual(c.comment, "This is my comment")
c.save()
self.assertEqual(Comment.objects.count(), 1)
def testProfanities(self):
"""Test COMMENTS_ALLOW_PROFANITIES and PROFANITIES_LIST settings"""
a = Article.objects.get(pk=1)
d = self.getValidData(a)
# Save settings in case other tests need 'em
saved = settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES
# Don't wanna swear in the unit tests if we don't have to...
settings.PROFANITIES_LIST = ["rooster"]
# Try with COMMENTS_ALLOW_PROFANITIES off
settings.COMMENTS_ALLOW_PROFANITIES = False
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertFalse(f.is_valid())
# Now with COMMENTS_ALLOW_PROFANITIES on
settings.COMMENTS_ALLOW_PROFANITIES = True
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertTrue(f.is_valid())
# Restore settings
settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES = saved
|
gpl-2.0
|
wndias/bc.repository
|
script.module.youtube.dl/lib/youtube_dl/extractor/newgrounds.py
|
150
|
1261
|
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
class NewgroundsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newgrounds\.com/audio/listen/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.newgrounds.com/audio/listen/549479',
'md5': 'fe6033d297591288fa1c1f780386f07a',
'info_dict': {
'id': '549479',
'ext': 'mp3',
'title': 'B7 - BusMode',
'uploader': 'Burn7',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
music_id = mobj.group('id')
webpage = self._download_webpage(url, music_id)
title = self._html_search_regex(
r',"name":"([^"]+)",', webpage, 'music title')
uploader = self._html_search_regex(
r',"artist":"([^"]+)",', webpage, 'music uploader')
music_url_json_string = self._html_search_regex(
r'({"url":"[^"]+"),', webpage, 'music url') + '}'
music_url_json = json.loads(music_url_json_string)
music_url = music_url_json['url']
return {
'id': music_id,
'title': title,
'url': music_url,
'uploader': uploader,
}
|
gpl-2.0
|
40223240/2015cdb_g3
|
static/Brython3.1.0-20150301-090019/Lib/xml/dom/minidom.py
|
727
|
66854
|
"""Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
if encoding is None:
writer = io.StringIO()
else:
writer = io.TextIOWrapper(io.BytesIO(),
encoding=encoding,
errors="xmlcharrefreplace",
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.getvalue()
else:
return writer.detach().getvalue()
def hasChildNodes(self):
return bool(self.childNodes)
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.previousSibling = last
last.nextSibling = node
childNodes.append(node)
node.parentNode = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
__slots__=('_name', '_value', 'namespaceURI',
'_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
nodeType = Node.ATTRIBUTE_NODE
attributes = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
self.ownerElement = None
self._name = qName
self.namespaceURI = namespaceURI
self._prefix = prefix
self.childNodes = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
nodeName = name = property(_get_name, _set_name)
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.childNodes[0].data = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
nodeValue = value = property(_get_value, _set_value)
def _get_prefix(self):
return self._prefix
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
self._prefix = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.name = newName
prefix = property(_get_prefix, _set_prefix)
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
__slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
'nextSibling', 'previousSibling')
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.parentNode = None
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self.nextSibling = self.previousSibling = None
# Attribute dictionaries are lazily created
# attributes are double-indexed:
# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
self._attrs = None
self._attrsNS = None
def _ensure_attributes(self):
if self._attrs is None:
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
if self._attrs is not None:
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
if self._attrs is None:
return ""
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return ""
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
attr.value = value # also sets nodeValue
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.value = value
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
else:
if value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
attr.prefix = prefix
attr.nodeName = qualifiedName
def getAttributeNode(self, attrname):
if self._attrs is None:
return None
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return None
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
self._ensure_attributes()
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
if self._attrs is None:
return False
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return False
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
self._ensure_attributes()
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr._is_id = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._ensure_attributes()
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
__slots__ = ()
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
__slots__ = ('target', 'data')
def __init__(self, target, data):
self.target = target
self.data = data
# nodeValue is an alias for data
def _get_nodeValue(self):
return self.data
def _set_nodeValue(self, value):
self.data = data
nodeValue = property(_get_nodeValue, _set_nodeValue)
# nodeName is an alias for target
def _get_nodeName(self):
return self.target
def _set_nodeName(self, value):
self.target = value
nodeName = property(_get_nodeName, _set_nodeName)
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
__slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
def __init__(self):
self.ownerDocument = self.parentNode = None
self.previousSibling = self.nextSibling = None
self._data = ''
Node.__init__(self)
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
data = nodeValue = property(_get_data, _set_data)
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
__slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
self.data = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
CharacterData.__init__(self)
self._data = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
__slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
__slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
__slots__ = ('_elem_info', 'doctype',
'_id_search_stack', 'childNodes', '_id_cache')
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
implementation = DOMImplementation()
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
parentNode = None
previousSibling = nextSibling = None
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.doctype = None
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
n.prefix = prefix
n._localName = localName
n.namespaceURI = namespaceURI
n.nodeName = name
if n.nodeType == Node.ELEMENT_NODE:
n.tagName = name
else:
# attribute node
n.name = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
|
gpl-3.0
|
JavML/django
|
tests/flatpages_tests/test_csrf.py
|
290
|
4819
|
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import Client, TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
CSRF_FAILURE_VIEW='django.views.csrf.csrf_failure',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageCSRFTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
def setUp(self):
self.client = Client(enforce_csrf_checks=True)
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_post_view_flatpage(self):
"POSTing to a flatpage served through a view will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 403)
def test_post_fallback_flatpage(self):
"POSTing to a flatpage served by the middleware will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage/')
self.assertEqual(response.status_code, 403)
def test_post_unknown_page(self):
"POSTing to an unknown page isn't caught as a 403 CSRF error"
response = self.client.post('/no_such_page/')
self.assertEqual(response.status_code, 404)
|
bsd-3-clause
|
rmetzger/flink
|
flink-python/pyflink/table/table_result.py
|
5
|
11562
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Optional
from py4j.java_gateway import get_method
from pyflink.common.types import RowKind
from pyflink.common import Row
from pyflink.common.job_client import JobClient
from pyflink.java_gateway import get_gateway
from pyflink.table.result_kind import ResultKind
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import _from_java_type
from pyflink.table.utils import pickled_bytes_to_python_converter
__all__ = ['TableResult']
class TableResult(object):
"""
A :class:`~pyflink.table.TableResult` is the representation of the statement execution result.
.. versionadded:: 1.11.0
"""
def __init__(self, j_table_result):
self._j_table_result = j_table_result
def get_job_client(self) -> Optional[JobClient]:
"""
For DML and DQL statement, return the JobClient which associates the submitted Flink job.
For other statements (e.g. DDL, DCL) return empty.
:return: The job client, optional.
:rtype: pyflink.common.JobClient
.. versionadded:: 1.11.0
"""
job_client = self._j_table_result.getJobClient()
if job_client.isPresent():
return JobClient(job_client.get())
else:
return None
def wait(self, timeout_ms: int = None):
"""
Wait if necessary for at most the given time (milliseconds) for the data to be ready.
For a select operation, this method will wait until the first row can be accessed locally.
For an insert operation, this method will wait for the job to finish,
because the result contains only one row.
For other operations, this method will return immediately,
because the result is already available locally.
.. versionadded:: 1.12.0
"""
if timeout_ms:
TimeUnit = get_gateway().jvm.java.util.concurrent.TimeUnit
get_method(self._j_table_result, "await")(timeout_ms, TimeUnit.MILLISECONDS)
else:
get_method(self._j_table_result, "await")()
def get_table_schema(self) -> TableSchema:
"""
Get the schema of result.
The schema of DDL, USE, EXPLAIN:
::
+-------------+-------------+----------+
| column name | column type | comments |
+-------------+-------------+----------+
| result | STRING | |
+-------------+-------------+----------+
The schema of SHOW:
::
+---------------+-------------+----------+
| column name | column type | comments |
+---------------+-------------+----------+
| <object name> | STRING | |
+---------------+-------------+----------+
The column name of `SHOW CATALOGS` is "catalog name",
the column name of `SHOW DATABASES` is "database name",
the column name of `SHOW TABLES` is "table name",
the column name of `SHOW VIEWS` is "view name",
the column name of `SHOW FUNCTIONS` is "function name".
The schema of DESCRIBE:
::
+------------------+-------------+-------------------------------------------------+
| column name | column type | comments |
+------------------+-------------+-------------------------------------------------+
| name | STRING | field name |
+------------------+-------------+-------------------------------------------------+
| type | STRING | field type expressed as a String |
+------------------+-------------+-------------------------------------------------+
| null | BOOLEAN | field nullability: true if a field is nullable, |
| | | else false |
+------------------+-------------+-------------------------------------------------+
| key | BOOLEAN | key constraint: 'PRI' for primary keys, |
| | | 'UNQ' for unique keys, else null |
+------------------+-------------+-------------------------------------------------+
| computed column | STRING | computed column: string expression |
| | | if a field is computed column, else null |
+------------------+-------------+-------------------------------------------------+
| watermark | STRING | watermark: string expression if a field is |
| | | watermark, else null |
+------------------+-------------+-------------------------------------------------+
The schema of INSERT: (one column per one sink)
::
+----------------------------+-------------+-----------------------+
| column name | column type | comments |
+----------------------------+-------------+-----------------------+
| (name of the insert table) | BIGINT | the insert table name |
+----------------------------+-------------+-----------------------+
The schema of SELECT is the selected field names and types.
:return: The schema of result.
:rtype: pyflink.table.TableSchema
.. versionadded:: 1.11.0
"""
return TableSchema(j_table_schema=self._j_table_result.getTableSchema())
def get_result_kind(self) -> ResultKind:
"""
Return the ResultKind which represents the result type.
For DDL operation and USE operation, the result kind is always SUCCESS.
For other operations, the result kind is always SUCCESS_WITH_CONTENT.
:return: The result kind.
.. versionadded:: 1.11.0
"""
return ResultKind._from_j_result_kind(self._j_table_result.getResultKind())
def collect(self) -> 'CloseableIterator':
"""
Get the result contents as a closeable row iterator.
Note:
For SELECT operation, the job will not be finished unless all result data has been
collected. So we should actively close the job to avoid resource leak through
CloseableIterator#close method. Calling CloseableIterator#close method will cancel the job
and release related resources.
For DML operation, Flink does not support getting the real affected row count now. So the
affected row count is always -1 (unknown) for every sink, and them will be returned until
the job is finished.
Calling CloseableIterator#close method will cancel the job.
For other operations, no flink job will be submitted (get_job_client() is always empty), and
the result is bounded. Do noting when calling CloseableIterator#close method.
Recommended code to call CloseableIterator#close method looks like:
>>> table_result = t_env.execute("select ...")
>>> with table_result.collect() as results:
>>> for result in results:
>>> ...
In order to fetch result to local, you can call either collect() and print(). But, they can
not be called both on the same TableResult instance.
:return: A CloseableIterator.
.. versionadded:: 1.12.0
"""
field_data_types = self._j_table_result.getTableSchema().getFieldDataTypes()
j_iter = self._j_table_result.collect()
return CloseableIterator(j_iter, field_data_types)
def print(self):
"""
Print the result contents as tableau form to client console.
This method has slightly different behaviors under different checkpointing settings.
- For batch jobs or streaming jobs without checkpointing,
this method has neither exactly-once nor at-least-once guarantee.
Query results are immediately accessible by the clients once they're produced,
but exceptions will be thrown when the job fails and restarts.
- For streaming jobs with exactly-once checkpointing,
this method guarantees an end-to-end exactly-once record delivery.
A result will be accessible by clients only after its corresponding checkpoint
completes.
- For streaming jobs with at-least-once checkpointing,
this method guarantees an end-to-end at-least-once record delivery.
Query results are immediately accessible by the clients once they're produced,
but it is possible for the same result to be delivered multiple times.
.. versionadded:: 1.11.0
"""
self._j_table_result.print()
class CloseableIterator(object):
"""
Representing an Iterator that is also auto closeable.
"""
def __init__(self, j_closeable_iterator, field_data_types):
self._j_closeable_iterator = j_closeable_iterator
self._j_field_data_types = field_data_types
self._data_types = [_from_java_type(j_field_data_type)
for j_field_data_type in self._j_field_data_types]
def __iter__(self):
return self
def __next__(self):
if not self._j_closeable_iterator.hasNext():
raise StopIteration("No more data.")
gateway = get_gateway()
pickle_bytes = gateway.jvm.PythonBridgeUtils. \
getPickledBytesFromRow(self._j_closeable_iterator.next(),
self._j_field_data_types)
row_kind = RowKind(int.from_bytes(pickle_bytes[0], byteorder='big', signed=False))
pickle_bytes = list(pickle_bytes[1:])
field_data = zip(pickle_bytes, self._data_types)
fields = []
for data, field_type in field_data:
if len(data) == 0:
fields.append(None)
else:
fields.append(pickled_bytes_to_python_converter(data, field_type))
result_row = Row(*fields)
result_row.set_row_kind(row_kind)
return result_row
def next(self):
return self.__next__()
def close(self):
self._j_closeable_iterator.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
|
apache-2.0
|
qitta/libhugin
|
hugin/harvest/provider/omdb/omdbmovie.py
|
1
|
5772
|
#!/usr/bin/env python
# encoding: utf-8
# stdlib
from parse import parse
from urllib.parse import urlencode, quote_plus
import json
#hugin
from hugin.utils.stringcompare import string_similarity_ratio
from hugin.harvest.provider.genrenorm import GenreNormalize
import hugin.harvest.provider as provider
class OMDBMovie(provider.IMovieProvider):
""" OMDB Person text metadata provider.
Interfaces implemented according to hugin.provider.interfaces.
"""
def __init__(self):
self._base_url = 'http://www.omdbapi.com/?{query}&plot=full'
self._genrenorm = GenreNormalize('omdb.genre')
self._priority = 80
self._attrs = {
'title', 'plot', 'runtime', 'imdbid', 'vote_count', 'rating',
'directors', 'writers', 'year', 'poster', 'genre', 'genre_norm',
'actors', 'original_title'
}
def build_url(self, search_params):
if search_params.imdbid:
params = {
'i': search_params.imdbid
}
return [self._base_url.format(query=urlencode(params))]
if search_params.title:
params = {
's': quote_plus(search_params.title) or '',
'y': search_params.year or ''
}
return [self._base_url.format(query=urlencode(params))]
def parse_response(self, url_response, search_params):
fail_states = ['Incorrect IMDb ID', 'Movie not found!']
try:
url, response = url_response.pop()
if response is None:
return None, False
if response:
# some json docs from this provider have mysterious newlines.
response = response.replace('\n', '')
response = json.loads(response)
except (TypeError, ValueError) as e:
print('Exception in parse_response omdbmovie:', e)
return None, True
if 'Error' in response and response['Error'] in fail_states:
return [], True
if 'Search' in response:
return self._parse_search_module(response, search_params), False
if 'Title' in response:
return self._parse_movie_module(response, search_params), True
return None, True
def _parse_search_module(self, result, search_params):
similarity_map = []
for result in result['Search']:
if result['Type'] == 'movie' or result['Type'] == 'N/A':
ratio = string_similarity_ratio(
result['Title'],
search_params.title
)
similarity_map.append(
{'imdbid': result['imdbID'], 'ratio': ratio}
)
similarity_map.sort(key=lambda value: value['ratio'], reverse=True)
item_count = min(len(similarity_map), search_params.amount)
movieids = [item['imdbid'] for item in similarity_map[:item_count]]
return self._movieids_to_urllist(movieids)
def _movieids_to_urllist(self, movieids):
url_list = []
for movieid in movieids:
query = 'i={imdb_id}'.format(imdb_id=movieid)
url_list.append([self._base_url.format(query=query)])
return url_list
def _parse_movie_module(self, result, search_params):
result_dict = {k: None for k in self._attrs}
#str attrs
result_dict['title'] = ''.join(result['Title'].split(','))
result_dict['original_title'] = result_dict.get('title')
result_dict['plot'] = ''.join(result['Plot'].split(','))
result_dict['imdbid'] = result.get('imdbID')
result_dict['rating'] = result.get('imdbRating')
#list attrs
result_dict['poster'] = self._parse_poster(result)
result_dict['actors'] = self._parse_list_attr(result, 'Actors')
result_dict['directors'] = self._parse_list_attr(result, 'Director')
result_dict['writers'] = self._parse_list_attr(result, 'Writer')
result_dict['genre'] = self._parse_list_attr(result, 'Genre')
result_dict['genre_norm'] = self._genrenorm.normalize_genre_list(
result_dict['genre']
)
#numeric attrs
result_dict['runtime'] = int(self._format_runtime(result['Runtime']))
vote_count = result['imdbVotes'].replace(',', '')
if vote_count.isnumeric():
result_dict['vote_count'] = int(vote_count)
if result['Year'].isdecimal():
result_dict['year'] = int(result['Year'])
return {key: self._filter_na(val) for key, val in result_dict.items()}
def _parse_poster(self, response):
poster = response.get('Poster')
if self._filter_na(poster):
return [(None, poster)]
def _parse_list_attr(self, response, person_type):
persons = response.get(person_type)
if self._filter_na(persons):
persons = persons.split(',')
if person_type == 'Actors':
return [(None, person.strip()) for person in persons]
return [person.strip() for person in persons]
def _filter_na(self, value):
if value == 'N/A' or value == ['N/A']:
return None
return value
def _format_runtime(self, runtime):
result = 0
time_fmt = {'HM': '{:d} h {:d} min', 'H': '{:d} h', 'M': '{:d} min'}
if runtime and 'h' in runtime and 'min' in runtime:
h, m = parse(time_fmt['HM'], runtime)
result = (h * 60) + m
elif 'min' in runtime:
result, = parse(time_fmt['M'], runtime)
elif 'h' in runtime:
result, = parse(time_fmt['H'], runtime)
return result
@property
def supported_attrs(self):
return self._attrs
|
gpl-3.0
|
poljeff/odoo
|
openerp/addons/base/ir/ir_exports.py
|
338
|
1672
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class ir_exports(osv.osv):
_name = "ir.exports"
_order = 'name'
_columns = {
'name': fields.char('Export Name'),
'resource': fields.char('Resource', select=True),
'export_fields': fields.one2many('ir.exports.line', 'export_id',
'Export ID', copy=True),
}
class ir_exports_line(osv.osv):
_name = 'ir.exports.line'
_order = 'id'
_columns = {
'name': fields.char('Field Name'),
'export_id': fields.many2one('ir.exports', 'Export', select=True, ondelete='cascade'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
dylanlesko/youtube-dl
|
youtube_dl/extractor/escapist.py
|
98
|
3384
|
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
determine_ext,
clean_html,
int_or_none,
float_or_none,
)
def _decrypt_config(key, string):
a = ''
i = ''
r = ''
while len(a) < (len(string) / 2):
a += key
a = a[0:int(len(string) / 2)]
t = 0
while t < len(string):
i += chr(int(string[t] + string[t + 1], 16))
t += 2
icko = [s for s in i]
for t, c in enumerate(a):
r += chr(ord(c) ^ ord(icko[t]))
return r
class EscapistIE(InfoExtractor):
_VALID_URL = r'https?://?(?:www\.)?escapistmagazine\.com/videos/view/[^/?#]+/(?P<id>[0-9]+)-[^/?#]*(?:$|[?#])'
_TESTS = [{
'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
'info_dict': {
'id': '6618',
'ext': 'mp4',
'description': "Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
'title': "Breaking Down Baldur's Gate",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 264,
'uploader': 'The Escapist',
}
}, {
'url': 'http://www.escapistmagazine.com/videos/view/zero-punctuation/10044-Evolve-One-vs-Multiplayer',
'md5': '9e8c437b0dbb0387d3bd3255ca77f6bf',
'info_dict': {
'id': '10044',
'ext': 'mp4',
'description': 'This week, Zero Punctuation reviews Evolve.',
'title': 'Evolve - One vs Multiplayer',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 304,
'uploader': 'The Escapist',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
ims_video = self._parse_json(
self._search_regex(
r'imsVideo\.play\(({.+?})\);', webpage, 'imsVideo'),
video_id)
video_id = ims_video['videoID']
key = ims_video['hash']
config_req = compat_urllib_request.Request(
'http://www.escapistmagazine.com/videos/'
'vidconfig.php?videoID=%s&hash=%s' % (video_id, key))
config_req.add_header('Referer', url)
config = self._download_webpage(config_req, video_id, 'Downloading video config')
data = json.loads(_decrypt_config(key, config))
video_data = data['videoData']
title = clean_html(video_data['title'])
duration = float_or_none(video_data.get('duration'), 1000)
uploader = video_data.get('publisher')
formats = [{
'url': video['src'],
'format_id': '%s-%sp' % (determine_ext(video['src']), video['res']),
'height': int_or_none(video.get('res')),
} for video in data['files']['videos']]
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'duration': duration,
'uploader': uploader,
}
|
unlicense
|
berrange/nova
|
nova/scheduler/filters/aggregate_multitenancy_isolation.py
|
20
|
1972
|
# Copyright (c) 2011-2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
"""Isolate tenants in specific aggregates."""
# Aggregate data and tenant do not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
"""If a host is in an aggregate that has the metadata key
"filter_tenant_id" it can only create instances from that tenant(s).
A host can be in different aggregates.
If a host doesn't belong to an aggregate with the metadata key
"filter_tenant_id" it can create instances from all tenants.
"""
spec = filter_properties.get('request_spec', {})
props = spec.get('instance_properties', {})
tenant_id = props.get('project_id')
context = filter_properties['context']
metadata = db.aggregate_metadata_get_by_host(context, host_state.host,
key="filter_tenant_id")
if metadata != {}:
if tenant_id not in metadata["filter_tenant_id"]:
LOG.debug("%s fails tenant id on aggregate", host_state)
return False
return True
|
apache-2.0
|
cashtag/cashtag
|
contrib/spendfrom/spendfrom.py
|
792
|
10053
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
mit
|
ArchiFleKs/magnum
|
api-ref/source/conf.py
|
2
|
6595
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Magnum documentation build configuration file
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
extensions = [
'os_api_ref',
'openstackdocstheme',
]
html_theme = 'openstackdocs'
html_theme_options = {
"sidebar_mode": "toc",
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2010-present, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/magnum'
openstackdocs_use_storyboard = True
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'magnumdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Magnum.tex',
u'OpenStack Container Infrastructure Management API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
|
apache-2.0
|
danche354/Sequence-Labeling
|
ner_BIOES/senna-hash-2-pos-chunk-gazetteer-BIOES-128-64.py
|
1
|
7837
|
from keras.models import Model
from keras.layers import Input, Masking, Dense, LSTM
from keras.layers import Dropout, TimeDistributed, Bidirectional, merge
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
import numpy as np
import pandas as pd
import sys
import math
import os
from datetime import datetime
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
from tools import plot
np.random.seed(0)
# train hyperparameters
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
gazetteer_length = conf.BIOES_gazetteer_length
emb_vocab = conf.senna_vocab
emb_length = conf.senna_length
hash_vocab = conf.ner_hash_vocab
hash_length = conf.ner_hash_length
output_length = conf.ner_BIOES_length
batch_size = conf.batch_size
nb_epoch = conf.nb_epoch
model_name = os.path.basename(__file__)[:-3]
folder_path = 'model/%s'%model_name
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# the data, shuffled and split between train and test sets
train_data = load_data.load_ner(dataset='eng.train', form='BIOES')
dev_data = load_data.load_ner(dataset='eng.testa', form='BIOES')
train_samples = len(train_data)
dev_samples = len(dev_data)
print('train shape:', train_samples)
print('dev shape:', dev_samples)
print()
word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None)
word_embedding = word_embedding.values
word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))])
hash_embedding = pd.read_csv('../preprocessing/ner-auto-encoder-2/auto-encoder-embeddings.txt', delimiter=' ', header=None)
hash_embedding = hash_embedding.values
hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.rand(1,hash_length)])
embed_index_input = Input(shape=(step_length,))
embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input)
hash_index_input = Input(shape=(step_length,))
encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input)
pos_input = Input(shape=(step_length, pos_length))
chunk_input = Input(shape=(step_length, chunk_length))
gazetteer_input = Input(shape=(step_length, gazetteer_length))
senna_hash_pos_chunk_gazetteer_merge = merge([embedding, encoder_embedding, pos_input, chunk_input, gazetteer_input], mode='concat')
input_mask = Masking(mask_value=0)(senna_hash_pos_chunk_gazetteer_merge)
dp_1 = Dropout(0.5)(input_mask)
hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1)
hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1)
dp_2 = Dropout(0.5)(hidden_2)
output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2)
model = Model(input=[embed_index_input,hash_index_input,pos_input,chunk_input, gazetteer_input], output=output)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
number_of_train_batches = int(math.ceil(float(train_samples)/batch_size))
number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size))
print('start train %s ...\n'%model_name)
best_accuracy = 0
best_epoch = 0
all_train_loss = []
all_dev_loss = []
all_dev_accuracy = []
log = open('%s/model_log.txt'%folder_path, 'w')
start_time = datetime.now()
print('train start at %s\n'%str(start_time))
log.write('train start at %s\n\n'%str(start_time))
for epoch in range(nb_epoch):
start = datetime.now()
print('-'*60)
print('epoch %d start at %s'%(epoch, str(start)))
log.write('-'*60+'\n')
log.write('epoch %d start at %s\n'%(epoch, str(start)))
train_loss = 0
dev_loss = 0
np.random.shuffle(train_data)
for i in range(number_of_train_batches):
train_batch = train_data[i*batch_size: (i+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=train_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer_BIOES(batch=train_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
train_metrics = model.train_on_batch([embed_index, hash_index, pos, chunk, gazetteer], y)
train_loss += train_metrics[0]
all_train_loss.append(train_loss)
correct_predict = 0
all_predict = 0
for j in range(number_of_dev_batches):
dev_batch = dev_data[j*batch_size: (j+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=dev_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer_BIOES(batch=dev_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
# for loss
dev_metrics = model.test_on_batch([embed_index, hash_index, pos, chunk, gazetteer], y)
dev_loss += dev_metrics[0]
# for accuracy
prob = model.predict_on_batch([embed_index, hash_index, pos, chunk, gazetteer])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
correct_predict += np.sum(predict_label[:l]==label[i][:l])
all_predict += np.sum(length)
epcoh_accuracy = float(correct_predict)/all_predict
all_dev_accuracy.append(epcoh_accuracy)
all_dev_loss.append(dev_loss)
if epcoh_accuracy>=best_accuracy:
best_accuracy = epcoh_accuracy
best_epoch = epoch
end = datetime.now()
model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True)
print('epoch %d end at %s'%(epoch, str(end)))
print('epoch %d train loss: %f'%(epoch, train_loss))
print('epoch %d dev loss: %f'%(epoch, dev_loss))
print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy))
print('best epoch now: %d\n'%best_epoch)
log.write('epoch %d end at %s\n'%(epoch, str(end)))
log.write('epoch %d train loss: %f\n'%(epoch, train_loss))
log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss))
log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy))
log.write('best epoch now: %d\n\n'%best_epoch)
end_time = datetime.now()
print('train end at %s\n'%str(end_time))
log.write('train end at %s\n\n'%str(end_time))
timedelta = end_time - start_time
print('train cost time: %s\n'%str(timedelta))
print('best epoch last: %d\n'%best_epoch)
log.write('train cost time: %s\n\n'%str(timedelta))
log.write('best epoch last: %d\n\n'%best_epoch)
plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name)
plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
|
mit
|
oasiswork/odoo
|
addons/mail/tests/test_message_read.py
|
199
|
14584
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .common import TestMail
class test_mail_access_rights(TestMail):
def test_00_message_read(self):
""" Tests for message_read and expandables. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
self.mail_group.message_subscribe_users(cr, uid, [group_pigs.id], [user_raoul.id])
pigs_domain = [('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)]
# Data: create a discussion in Pigs (3 threads, with respectively 0, 4 and 4 answers)
msg_id0 = self.group_pigs.message_post(body='0', subtype='mt_comment')
msg_id1 = self.group_pigs.message_post(body='1', subtype='mt_comment')
msg_id2 = self.group_pigs.message_post(body='2', subtype='mt_comment')
msg_id3 = self.group_pigs.message_post(body='1-1', subtype='mt_comment', parent_id=msg_id1)
msg_id4 = self.group_pigs.message_post(body='2-1', subtype='mt_comment', parent_id=msg_id2)
msg_id5 = self.group_pigs.message_post(body='1-2', subtype='mt_comment', parent_id=msg_id1)
msg_id6 = self.group_pigs.message_post(body='2-2', subtype='mt_comment', parent_id=msg_id2)
msg_id7 = self.group_pigs.message_post(body='1-1-1', subtype='mt_comment', parent_id=msg_id3)
msg_id8 = self.group_pigs.message_post(body='2-1-1', subtype='mt_comment', parent_id=msg_id4)
msg_id9 = self.group_pigs.message_post(body='1-1-1', subtype='mt_comment', parent_id=msg_id3)
msg_id10 = self.group_pigs.message_post(body='2-1-1', subtype='mt_comment', parent_id=msg_id4)
msg_ids = [msg_id10, msg_id9, msg_id8, msg_id7, msg_id6, msg_id5, msg_id4, msg_id3, msg_id2, msg_id1, msg_id0]
ordered_msg_ids = [msg_id2, msg_id4, msg_id6, msg_id8, msg_id10, msg_id1, msg_id3, msg_id5, msg_id7, msg_id9, msg_id0]
# Test: raoul received notifications
raoul_notification_ids = self.mail_notification.search(cr, user_raoul.id, [('is_read', '=', False), ('message_id', 'in', msg_ids), ('partner_id', '=', user_raoul.partner_id.id)])
self.assertEqual(len(raoul_notification_ids), 11, 'message_post: wrong number of produced notifications')
# Test: read some specific ids
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, ids=msg_ids[2:4], domain=[('body', 'like', 'dummy')], context={'mail_read_set_read': True})
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(msg_ids[2:4], read_msg_ids, 'message_read with direct ids should read only the requested ids')
# Test: read messages of Pigs through a domain, being thread or not threaded
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, domain=pigs_domain, limit=200)
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(msg_ids, read_msg_ids, 'message_read flat with domain on Pigs should equal all messages of Pigs')
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, domain=pigs_domain, limit=200, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(ordered_msg_ids, read_msg_ids,
'message_read threaded with domain on Pigs should equal all messages of Pigs, and sort them with newer thread first, last message last in thread')
# ----------------------------------------
# CASE1: message_read with domain, threaded
# We simulate an entire flow, using the expandables to test them
# ----------------------------------------
# Do: read last message, threaded
read_msg_list = self.mail_message.message_read(cr, uid, domain=pigs_domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# TDE TODO: test expandables order
type_list = map(lambda item: item.get('type'), read_msg_list)
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 4, 'message_read on last Pigs message should return 2 messages and 2 expandables')
self.assertEqual(set([msg_id2, msg_id10]), set(read_msg_ids), 'message_read on the last Pigs message should also get its parent')
self.assertEqual(read_msg_list[1].get('parent_id'), read_msg_list[0].get('id'), 'message_read should set the ancestor to the thread header')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
elif msg.get('type') == 'expandable':
new_msg_exp = msg
# Do: fetch new messages in first thread, domain from expandable
self.assertIsNotNone(new_msg_exp, 'message_read on last Pigs message should have returned a new messages expandable')
domain = new_msg_exp.get('domain', [])
# Test: expandable, conditions in domain
self.assertIn(('id', 'child_of', msg_id2), domain, 'new messages expandable domain should contain a child_of condition')
self.assertIn(('id', '>=', msg_id4), domain, 'new messages expandable domain should contain an id greater than condition')
self.assertIn(('id', '<=', msg_id8), domain, 'new messages expandable domain should contain an id less than condition')
self.assertEqual(new_msg_exp.get('parent_id'), msg_id2, 'new messages expandable should have parent_id set to the thread header')
# Do: message_read with domain, thread_level=0, parent_id=msg_id2 (should be imposed by JS), 2 messages
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=2, thread_level=0, parent_id=msg_id2)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
new_msg_exp = [msg for msg in read_msg_list if msg.get('type') == 'expandable'][0]
# Test: structure content, 2 messages and 1 thread expandable
self.assertEqual(len(read_msg_list), 3, 'message_read in Pigs thread should return 2 messages and 1 expandables')
self.assertEqual(set([msg_id6, msg_id8]), set(read_msg_ids), 'message_read in Pigs thread should return 2 more previous messages in thread')
# Do: read the last message
read_msg_list = self.mail_message.message_read(cr, uid, domain=new_msg_exp.get('domain'), limit=2, thread_level=0, parent_id=msg_id2)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, 1 message
self.assertEqual(len(read_msg_list), 1, 'message_read in Pigs thread should return 1 message')
self.assertEqual(set([msg_id4]), set(read_msg_ids), 'message_read in Pigs thread should return the last message in thread')
# Do: fetch a new thread, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read on last Pigs message should have returned a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'new threads expandable domain should contain the message_read domain parameter')
self.assertFalse(new_threads_exp.get('parent_id'), 'new threads expandable should not have an parent_id')
# Do: message_read with domain, thread_level=1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 4, 'message_read on Pigs should return 2 messages and 2 expandables')
self.assertEqual(set([msg_id1, msg_id9]), set(read_msg_ids), 'message_read on a Pigs message should also get its parent')
self.assertEqual(read_msg_list[1].get('parent_id'), read_msg_list[0].get('id'), 'message_read should set the ancestor to the thread header')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
elif msg.get('type') == 'expandable':
new_msg_exp = msg
# Do: fetch new messages in second thread, domain from expandable
self.assertIsNotNone(new_msg_exp, 'message_read on Pigs message should have returned a new messages expandable')
domain = new_msg_exp.get('domain', [])
# Test: expandable, conditions in domain
self.assertIn(('id', 'child_of', msg_id1), domain, 'new messages expandable domain should contain a child_of condition')
self.assertIn(('id', '>=', msg_id3), domain, 'new messages expandable domain should contain an id greater than condition')
self.assertIn(('id', '<=', msg_id7), domain, 'new messages expandable domain should contain an id less than condition')
self.assertEqual(new_msg_exp.get('parent_id'), msg_id1, 'new messages expandable should have ancestor_id set to the thread header')
# Do: message_read with domain, thread_level=0, parent_id=msg_id1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=200, thread_level=0, parent_id=msg_id1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: other message in thread have been fetch
self.assertEqual(set([msg_id3, msg_id5, msg_id7]), set(read_msg_ids), 'message_read on the last Pigs message should also get its parent')
# Test: fetch a new thread, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read should have returned a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'general expandable domain should contain the message_read domain parameter')
# Do: message_read with domain, thread_level=1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 1, 'message_read on Pigs should return 1 message because everything else has been fetched')
self.assertEqual([msg_id0], read_msg_ids, 'message_read after 2 More should return only 1 last message')
# ----------------------------------------
# CASE2: message_read with domain, flat
# ----------------------------------------
# Do: read 2 lasts message, flat
read_msg_list = self.mail_message.message_read(cr, uid, domain=pigs_domain, limit=2, thread_level=0)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is not set, 1 expandable
self.assertEqual(len(read_msg_list), 3, 'message_read on last Pigs message should return 2 messages and 1 expandable')
self.assertEqual(set([msg_id9, msg_id10]), set(read_msg_ids), 'message_read flat on Pigs last messages should only return those messages')
self.assertFalse(read_msg_list[0].get('parent_id'), 'message_read flat should set the ancestor as False')
self.assertFalse(read_msg_list[1].get('parent_id'), 'message_read flat should set the ancestor as False')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
# Do: fetch new messages, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read flat on the 2 last Pigs messages should have returns a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'new threads expandable domain should contain the message_read domain parameter')
# Do: message_read with domain, thread_level=0 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=20, thread_level=0)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 9, 'message_read on Pigs should return 9 messages and 0 expandable')
self.assertEqual([msg_id8, msg_id7, msg_id6, msg_id5, msg_id4, msg_id3, msg_id2, msg_id1, msg_id0], read_msg_ids,
'message_read, More on flat, should return all remaning messages')
|
agpl-3.0
|
yinghuocho/firefly-proxy
|
DEPRECATED_PYTHON_SRC/gsocks/socks_relay.py
|
6
|
4538
|
# a relay forward local socks to remote socks,
import logging
from gevent import socket
from relay import RelayFactory, RelaySession, RelaySessionError
from utils import pipe_tcp, bind_local_udp, request_fail, send_request, \
sock_addr_info, read_reply, request_success, pipe_udp, read_init_request, \
read_init_reply, read_request
from msg import GENERAL_SOCKS_SERVER_FAILURE, UDP_ASSOCIATE, SUCCEEDED, \
CONNECT, BIND
log = logging.getLogger(__name__)
class SocksForwardSession(RelaySession):
def __init__(self, socksconn, remoteconn):
super(SocksForwardSession, self).__init__(socksconn)
self.remoteconn = remoteconn
self.track_sock(self.remoteconn)
self.remotetimeout = self.remoteconn.gettimeout()
self.client_associate = None
self.last_clientaddr = None
self.client2local_udpsock = None
self.local2remote_udpsock = None
def proc_tcp_request(self, req):
self.remoteconn.sendall(req.pack())
def relay_tcp(self):
pipe_tcp(self.socksconn, self.remoteconn, self.timeout, self.remotetimeout)
def proc_udp_request(self, req):
self.client_associate = (req.dstaddr, req.dstport)
self.last_clientaddr = self.client_associate
self.client2local_udpsock = bind_local_udp(self.socksconn)
self.local2remote_udpsock = bind_local_udp(self.remoteconn)
if not self.client2local_udpsock or not self.local2remote_udpsock:
request_fail(self.socksconn, req, GENERAL_SOCKS_SERVER_FAILURE)
return False
self.track_sock(self.client2local_udpsock)
self.track_sock(self.local2remote_udpsock)
send_request(self.remoteconn, UDP_ASSOCIATE, *sock_addr_info(self.local2remote_udpsock))
reply = read_reply(self.remoteconn)
if reply.rep != SUCCEEDED:
return False
self.remote_associate = (reply.bndaddr, reply.bndport)
request_success(self.socksconn, *sock_addr_info(self.client2local_udpsock))
return True
def relay_udp(self):
def addrchecker():
def _(ip, port):
if self.client_associate[0] == "0.0.0.0" or \
self.client_associate[0] == "::":
return True
if self.client_associate == (ip, port):
return True
return False
return _
def c2r():
def _(data, addr):
self.last_clientaddr = addr
return data, self.remote_associate
return _
def r2c():
def _(data, addr):
return data, self.last_clientaddr
return _
pipe_udp([self.socksconn, self.remoteconn],
self.client2local_udpsock, self.local2remote_udpsock,
self.timeout, self.remotetimeout,
addrchecker(), c2r(), r2c())
def cmd_udp_associate(self, req):
if self.proc_udp_request(req):
self.relay_udp()
def process(self):
try:
initreq = read_init_request(self.socksconn)
self.remoteconn.sendall(initreq.pack())
initreply = read_init_reply(self.remoteconn)
self.socksconn.sendall(initreply.pack())
req = read_request(self.socksconn)
{
CONNECT: self.cmd_connect,
BIND: self.cmd_bind,
UDP_ASSOCIATE : self.cmd_udp_associate
}[req.cmd](req)
self.clean()
except Exception, e:
log.error("[Exception][SocksForwardSession]: %s" % str(e))
self.clean()
class SocksForwardFactory(RelayFactory):
""" forward to another socks.
"""
def __init__(self, remoteip, remoteport, timeout=30):
self.remoteip = remoteip
self.remoteport = remoteport
self.timeout = timeout
def create_relay_session(self, socksconn, clientaddr):
try:
log.info("New socks connection from %s" % str(clientaddr))
remoteconn = socket.create_connection((self.remoteip, self.remoteport), self.timeout)
remoteconn.settimeout(self.timeout)
return SocksForwardSession(socksconn, remoteconn)
except socket.timeout, e: # @UndefinedVariable
log.error("[Exception][create_relay_session]: %s" % str(e))
raise RelaySessionError("Remote Timeout.")
|
bsd-2-clause
|
odicraig/kodi2odi
|
addons/plugin.video.salts/scrapers/solar_scraper.py
|
1
|
4876
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
from salts_lib.constants import VIDEO_TYPES
import scraper
QUALITY_MAP = {'HD': QUALITIES.HIGH, 'DVD': QUALITIES.HIGH, 'TV': QUALITIES.HIGH, 'LQ DVD': QUALITIES.MEDIUM, 'CAM': QUALITIES.LOW}
BASE_URL = 'http://www.tvsolarmovie.com'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'SolarMovie'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
for tr in dom_parser.parse_dom(html, 'tr', {'id': 'link_\d+'}):
link_pattern = 'href="[^"]+go.php\?url=([^"]+).*?class="qualityCell[^>]*>\s*([^<]+)'
link_match = re.search(link_pattern, tr, re.DOTALL)
if link_match:
stream_url, quality = link_match.groups()
host = urlparse.urlparse(stream_url).hostname
if host:
quality = QUALITY_MAP.get(quality.strip().upper(), QUALITIES.MEDIUM)
hoster = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': scraper_utils.get_quality(video, host, quality), 'views': None, 'rating': None, 'direct': False}
hosters.append(hoster)
return hosters
def search(self, video_type, title, year, season=''): # @UnusedVariable
if video_type == VIDEO_TYPES.TVSHOW:
return self.__tv_search(title, year)
else:
results = []
html = self. _http_get(self.base_url, params={'s': title}, cache_limit=8)
titles = dom_parser.parse_dom(html, 'a', {'class': 'coverImage'}, ret='title')
links = dom_parser.parse_dom(html, 'a', {'class': 'coverImage'}, ret='href')
norm_title = scraper_utils.normalize_title(title)
for match_title_year, match_url in zip(titles, links):
if 'Season' in match_title_year and 'Episode' in match_title_year: continue
match_title, match_year = scraper_utils.extra_year(match_title_year)
match_norm_title = scraper_utils.normalize_title(match_title)
if (norm_title not in match_norm_title) and (match_norm_title not in norm_title): continue
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
def __tv_search(self, title, year): # @UnusedVariable
results = []
url = urlparse.urljoin(self.base_url, '/watch-series')
html = self._http_get(url, cache_limit=48)
norm_title = scraper_utils.normalize_title(title)
for fragment in dom_parser.parse_dom(html, 'ul', {'class': 'letter-box-container'}):
for match in re.finditer('href="([^"]+)[^>]*>(.*?)</a>', fragment):
match_url, match_title = match.groups()
if norm_title in scraper_utils.normalize_title(match_title):
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
episode_pattern = 'href="([^"]+season-%s-episode-%s(?!\d)[^"]*)' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
|
gpl-3.0
|
mabseher/htd
|
test/googletest/googletest-release-1.10.0/googlemock/scripts/generator/cpp/gmock_class_test.py
|
69
|
11712
|
#!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = '[email protected] (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0_T(Bar,\nint());',
self.GenerateMethodSource(source))
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C*));',
self.GenerateMethodSource(source))
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C&));',
self.GenerateMethodSource(source))
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C[]));',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedef(self):
source = """
class Test {
public:
typedef std::vector<std::list<int>> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedefWithComma(self):
source = """
class Test {
public:
typedef std::function<void(
const vector<std::list<int>>&, int> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testEnumClass(self):
source = """
class Test {
public:
enum class Baz { BAZINGA };
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
goldsborough/.emacs
|
.emacs.d/.python-environments/default/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py
|
2919
|
2652
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
mit
|
axm108/CPWResonator
|
cpwprop/cpw_resonator.py
|
1
|
8789
|
from scipy.special import ellipk
from scipy.constants import epsilon_0, mu_0, pi
import numpy as np
import pandas as pd
from .materials import *
class CPWResonator:
"""
Coplanar wave-guide resonator class
"""
def __init__(self, length, conductorWidth, gapWidth, conductorThickness, resonatorType,
conductorMaterial, substrateMaterial,
temperature=4, couplingCapacitance=0, loadImpedance=50, loadBoundaryCondition='Short', mode=1):
# Supplied parameters
self.length = np.array(length)
self.conductorWidth = np.array(conductorWidth)
self.gapWidth = np.array(gapWidth)
self.conductorThickness = np.array(conductorThickness)
self.resonatorType = np.array(resonatorType)
self.conductor = Conductor(conductorMaterial)
self.substrate = Substrate(substrateMaterial)
self.temperature = np.array(temperature)
self.couplingCapacitance = np.array(couplingCapacitance)
self.loadImpedance = np.array(loadImpedance)
self.loadBoundaryCondition = np.array(loadBoundaryCondition)
self.mode = np.array(mode)
def effectivePermittivity(self):
return (1 + self.substrate.relativePermittivity)/2
def capacitancePerUnitLength(self):
# Complete elliptic integral of the first kind
k = self.conductorWidth / (self.conductorWidth + 2 * self.gapWidth)
k2 = np.sqrt(1 - k**2)
# Total CPW capacitance p.u.l.
return 4 * epsilon_0 * (self.effectivePermittivity() + 0) * (ellipk(k) / ellipk(k2))
def totalInductancePerUnitLength(self):
if self.conductor.superconductor:
return self.geometricInductancePerUnitLength() + self.kineticInductancePerUnitLength()
else:
return self.geometricInductancePerUnitLength()
def geometricInductancePerUnitLength(self):
# Complete elliptic integral of the first kind
k = self.conductorWidth / (self.conductorWidth + 2 * self.gapWidth)
k2 = np.sqrt(1 - k**2)
# Total conductor geometric inductance p.u.l.
return (mu_0 / 4) * (ellipk(k2) / ellipk(k))
def kineticInductancePerUnitLength(self):
# Complete elliptic integral of the first kind
k = self.conductorWidth / (self.conductorWidth + 2 * self.gapWidth)
K = ellipk(k)
# Geometrical factor
s, W, T = self.gapWidth, self.conductorWidth, self.conductorThickness
geometricFactor = (1 / (2 * k**2 * K**2)) * (- np.log(T / (4 * W)) + ((2 * (W + s))
/ (W + 2 * s)) * np.log(s / (W + s)) - (W / (W + 2 * s)) * np.log(T / (4 * (W + 2 * s))))
# Kinetic Inductance p.u.l.
return mu_0 * (self.londonPenetrationDepthT()**2 / (W * T)) * geometricFactor
def londonPenetrationDepthT(self):
return self.conductor.londonPenetrationDepthZero / np.sqrt(1 - (self.temperature / self.conductor.criticalTemperature)**4)
def characteristicImpedance(self, resistance=0, conductance=0, frequency=1):
return np.sqrt(
(resistance + 1j*2 * pi * frequency * self.totalInductancePerUnitLength() ) /
(conductance + 1j*2 * pi * frequency * self.capacitancePerUnitLength()))
def inputImpedance(self):
gamma = np.sqrt(
(self.conductor.resistancePerUnitLength +
1j*2*pi*self.coupledResonantFrequency()*self.totalInductancePerUnitLength() ) *
(self.conductor.conductancePerUnitLength +
1j*2*pi*self.coupledResonantFrequency()*self.capacitancePerUnitLength()))
if self.loadBoundaryCondition == 'Short':
return self.characteristicImpedance() * np.tanh(gamma * self.length)
elif self.loadBoundaryCondition == 'Open':
return self.characteristicImpedance() / np.tanh(gamma * self.length)
else:
print('Error: Load boundary condition no valid!')
return -1
def uncoupledResonantFrequency(self):
m = self.getModeFactor()
return 1 / (np.sqrt(self.totalInductancePerUnitLength()*self.capacitancePerUnitLength()) * m * self.length)
def coupledResonantFrequency(self):
m = self.getModeFactor()
return 1 / (np.sqrt((self.totalInductancePerUnitLength() * self.length) * (
(self.capacitancePerUnitLength() * self.length) + self.effectiveCouplingCapacitance())) * m)
def effectiveCouplingCapacitance(self):
return self.couplingCapacitance / (1 +
(self.uncoupledResonantFrequency() * self.couplingCapacitance * self.loadImpedance * pi)**2)
def internalQualityFactor(self):
m = self.getModeFactor()
return (1/m) * (pi/(self.conductor.amplitudeAttenuationPerUnitLength * self.length))
def externalQualityFactor(self, method=0):
if method == 0:
return self.externalQualityFactorMain()
elif method == 1:
return self.externalQualityFactorApprox()
elif method == 2:
return self.externalQualityFactorQWref()
def externalQualityFactorMain(self, loadResistance=50):
omega_n = 2 * pi * self.uncoupledResonantFrequency()
r_star = (1+(omega_n*self.couplingCapacitance*loadResistance)**2) / ((omega_n*self.couplingCapacitance)**2 * loadResistance)
C = (self.capacitancePerUnitLength() * self.length)/2
return omega_n * r_star * C
def externalQualityFactorApprox(self):
m = self.getModeFactor()
q_in = 2 * pi * self.uncoupledResonantFrequency() * self.couplingCapacitance * self.characteristicImpedance()
return (1/m) * (pi/(q_in**2))
def externalQualityFactorQWref(self, inputPortImpedance = 50):
m = self.getModeFactor()
omega_0 = 2 * pi * self.uncoupledResonantFrequency()
mBody = 1/(omega_0**2 * self.couplingCapacitance**2 * self.characteristicImpedance() * inputPortImpedance)
return (pi/m) * mBody
def loadedQualityFactor(self):
return 1 / ( (1/self.internalQualityFactor()) + (1/self.externalQualityFactor()) )
def getModeFactor(self):
if self.resonatorType == 'half':
m = 4.0 / (2.0 * self.mode)
elif self.resonatorType == 'quarter':
m = 4.0 / ((2.0 * self.mode) - 1)
else:
print('Error: Incorrect resonator type provided!')
return -1
return m
def insertionLoss(self):
g = self.internalQualityFactor()/self.externalQualityFactor()
return -20 * np.log10(g/(g+1))
def beta(self):
omega_n = 2 * pi * self.uncoupledResonantFrequency()
return omega_n * np.sqrt(self.totalInductancePerUnitLength()*self.capacitancePerUnitLength())
def info(self):
supplied_parameters = {
'length': self.length,
'conductorWidth': self.conductorWidth,
'gapWidth': self.gapWidth,
'conductorThickness': self.conductorThickness,
'resonatorType': [self.resonatorType],
'conductor': [self.conductor.material],
'substrate': [self.substrate.material],
'temperature': self.temperature,
'couplingCapacitance': self.couplingCapacitance,
'loadImpedance': self.loadImpedance,
'loadBoundaryCondition': [self.loadBoundaryCondition],
'mode': self.mode
}
calculated_parameters = {
'effectivePermittivity': self.effectivePermittivity(),
'capacitancePerUnitLength': self.capacitancePerUnitLength(),
'totalInductancePerUnitLength': self.totalInductancePerUnitLength(),
'geometricInductancePerUnitLength': self.geometricInductancePerUnitLength(),
'kineticInductancePerUnitLength': self.kineticInductancePerUnitLength(),
'londonPenetrationDepthT': self.londonPenetrationDepthT(),
'characteristicImpedance': self.characteristicImpedance(),
'inputImpedance': self.inputImpedance(),
'uncoupledResonantFrequency': self.uncoupledResonantFrequency(),
'coupledResonantFrequency': self.coupledResonantFrequency(),
'effectiveCouplingCapacitance': self.effectiveCouplingCapacitance(),
'internalQualityFactor': self.internalQualityFactor(),
'externalQualityFactor': self.externalQualityFactor(),
'loadedQualityFactor': self.loadedQualityFactor(),
'insertionLoss': self.insertionLoss(),
'beta': self.beta()
}
return [pd.DataFrame.transpose(pd.DataFrame(supplied_parameters, index=['Supplied parameters'])),
pd.DataFrame.transpose(pd.DataFrame(calculated_parameters, index=['Calculated parameters']))]
|
mit
|
vFense/vFenseAgent-nix
|
agent/deps/rpm-32/Python-2.7.5/lib/python2.7/test/sortperf.py
|
232
|
4746
|
"""Sort performance test.
See main() for command line syntax.
See tabulate() for output format.
"""
import sys
import time
import random
import marshal
import tempfile
import os
td = tempfile.gettempdir()
def randfloats(n):
"""Return a list of n random floats in [0, 1)."""
# Generating floats is expensive, so this writes them out to a file in
# a temp directory. If the file already exists, it just reads them
# back in and shuffles them a bit.
fn = os.path.join(td, "rr%06d" % n)
try:
fp = open(fn, "rb")
except IOError:
r = random.random
result = [r() for i in xrange(n)]
try:
try:
fp = open(fn, "wb")
marshal.dump(result, fp)
fp.close()
fp = None
finally:
if fp:
try:
os.unlink(fn)
except os.error:
pass
except IOError, msg:
print "can't write", fn, ":", msg
else:
result = marshal.load(fp)
fp.close()
# Shuffle it a bit...
for i in range(10):
i = random.randrange(n)
temp = result[:i]
del result[:i]
temp.reverse()
result.extend(temp)
del temp
assert len(result) == n
return result
def flush():
sys.stdout.flush()
def doit(L):
t0 = time.clock()
L.sort()
t1 = time.clock()
print "%6.2f" % (t1-t0),
flush()
def tabulate(r):
"""Tabulate sort speed for lists of various sizes.
The sizes are 2**i for i in r (the argument, a list).
The output displays i, 2**i, and the time to sort arrays of 2**i
floating point numbers with the following properties:
*sort: random data
\sort: descending data
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
%sort: ascending, then randomly replace 1% of the elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
"""
cases = tuple([ch + "sort" for ch in r"*\/3+%~=!"])
fmt = ("%2s %7s" + " %6s"*len(cases))
print fmt % (("i", "2**i") + cases)
for i in r:
n = 1 << i
L = randfloats(n)
print "%2d %7d" % (i, n),
flush()
doit(L) # *sort
L.reverse()
doit(L) # \sort
doit(L) # /sort
# Do 3 random exchanges.
for dummy in range(3):
i1 = random.randrange(n)
i2 = random.randrange(n)
L[i1], L[i2] = L[i2], L[i1]
doit(L) # 3sort
# Replace the last 10 with random floats.
if n >= 10:
L[-10:] = [random.random() for dummy in range(10)]
doit(L) # +sort
# Replace 1% of the elements at random.
for dummy in xrange(n // 100):
L[random.randrange(n)] = random.random()
doit(L) # %sort
# Arrange for lots of duplicates.
if n > 4:
del L[4:]
L = L * (n // 4)
# Force the elements to be distinct objects, else timings can be
# artificially low.
L = map(lambda x: --x, L)
doit(L) # ~sort
del L
# All equal. Again, force the elements to be distinct objects.
L = map(abs, [-0.5] * n)
doit(L) # =sort
del L
# This one looks like [3, 2, 1, 0, 0, 1, 2, 3]. It was a bad case
# for an older implementation of quicksort, which used the median
# of the first, last and middle elements as the pivot.
half = n // 2
L = range(half - 1, -1, -1)
L.extend(range(half))
# Force to float, so that the timings are comparable. This is
# significantly faster if we leave tham as ints.
L = map(float, L)
doit(L) # !sort
print
def main():
"""Main program when invoked as a script.
One argument: tabulate a single row.
Two arguments: tabulate a range (inclusive).
Extra arguments are used to seed the random generator.
"""
# default range (inclusive)
k1 = 15
k2 = 20
if sys.argv[1:]:
# one argument: single point
k1 = k2 = int(sys.argv[1])
if sys.argv[2:]:
# two arguments: specify range
k2 = int(sys.argv[2])
if sys.argv[3:]:
# derive random seed from remaining arguments
x = 1
for a in sys.argv[3:]:
x = 69069 * x + hash(a)
random.seed(x)
r = range(k1, k2+1) # include the end point
tabulate(r)
if __name__ == '__main__':
main()
|
lgpl-3.0
|
tpatil2/ConnectX
|
googletest/test/gtest_list_tests_unittest.py
|
1898
|
6515
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = '[email protected] (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
unlicense
|
iabdalkader/openmv
|
scripts/examples/12-Thermopile-Shield/MLX90640_overlay.py
|
1
|
1712
|
# MLX90640 Overlay Demo
#
# This example shows off how to overlay a heatmap onto your OpenMV Cam's
# live video output from the main camera.
import sensor, image, time, fir
ALT_OVERLAY = False # Set to True to allocate a second ir image.
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
# Initialize the thermal sensor
fir.init(type=fir.FIR_MLX90640, refresh=16) # Hz (higher end OpenMV Cam's may be able to run faster)
# Allocate another frame buffer for smoother video.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture an image
img = sensor.snapshot()
# Capture FIR data
# ta: Ambient temperature
# ir: Object temperatures (IR array)
# to_min: Minimum object temperature
# to_max: Maximum object temperature
ta, ir, to_min, to_max = fir.read_ir()
if not ALT_OVERLAY:
# Scale the image and belnd it with the framebuffer
fir.draw_ir(img, ir)
else:
# Create a secondary image and then blend into the frame buffer.
extra_fb.clear()
fir.draw_ir(extra_fb, ir, alpha=256)
img.blend(extra_fb, alpha=128)
# Draw ambient, min and max temperatures.
img.draw_string(8, 0, "Ta: %0.2f C" % ta, color = (255, 0, 0), mono_space = False)
img.draw_string(8, 8, "To min: %0.2f C" % to_min, color = (255, 0, 0), mono_space = False)
img.draw_string(8, 16, "To max: %0.2f C"% to_max, color = (255, 0, 0), mono_space = False)
# Force high quality streaming...
img.compress(quality=90)
# Print FPS.
print(clock.fps())
|
mit
|
reddymeghraj/showroom
|
erpnext/controllers/selling_controller.py
|
10
|
9656
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, flt, cstr, comma_or
from erpnext.setup.utils import get_company_currency
from frappe import _, throw
from erpnext.stock.get_item_details import get_available_qty
from erpnext.controllers.stock_controller import StockController
class SellingController(StockController):
def __setup__(self):
if hasattr(self, "taxes"):
self.print_templates = {
"taxes": "templates/print_formats/includes/taxes.html"
}
def get_feed(self):
return _("To {0} | {1} {2}").format(self.customer_name, self.currency,
self.grand_total)
def onload(self):
if self.doctype in ("Sales Order", "Delivery Note", "Sales Invoice"):
for item in self.get("items"):
item.update(get_available_qty(item.item_code,
item.warehouse))
def validate(self):
super(SellingController, self).validate()
self.validate_max_discount()
check_active_sales_items(self)
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
check_credit_limit(self.customer, self.company)
def set_missing_values(self, for_validate=False):
super(SellingController, self).set_missing_values(for_validate)
# set contact and address details for customer, if they are not mentioned
self.set_missing_lead_customer_details()
self.set_price_list_and_item_details()
def set_missing_lead_customer_details(self):
if getattr(self, "customer", None):
from erpnext.accounts.party import _get_party_details
party_details = _get_party_details(self.customer,
ignore_permissions=self.flags.ignore_permissions)
if not self.meta.get_field("sales_team"):
party_details.pop("sales_team")
self.update_if_missing(party_details)
elif getattr(self, "lead", None):
from erpnext.crm.doctype.lead.lead import get_lead_details
self.update_if_missing(get_lead_details(self.lead))
def set_price_list_and_item_details(self):
self.set_price_list_currency("Selling")
self.set_missing_item_details()
def apply_shipping_rule(self):
if self.shipping_rule:
shipping_rule = frappe.get_doc("Shipping Rule", self.shipping_rule)
value = self.base_net_total
# TODO
# shipping rule calculation based on item's net weight
shipping_amount = 0.0
for condition in shipping_rule.get("conditions"):
if not condition.to_value or (flt(condition.from_value) <= value <= flt(condition.to_value)):
shipping_amount = condition.shipping_amount
break
shipping_charge = {
"doctype": "Sales Taxes and Charges",
"charge_type": "Actual",
"account_head": shipping_rule.account,
"cost_center": shipping_rule.cost_center
}
existing_shipping_charge = self.get("taxes", filters=shipping_charge)
if existing_shipping_charge:
# take the last record found
existing_shipping_charge[-1].tax_amount = shipping_amount
else:
shipping_charge["tax_amount"] = shipping_amount
shipping_charge["description"] = shipping_rule.label
self.append("taxes", shipping_charge)
self.calculate_taxes_and_totals()
def remove_shipping_charge(self):
if self.shipping_rule:
shipping_rule = frappe.get_doc("Shipping Rule", self.shipping_rule)
existing_shipping_charge = self.get("taxes", {
"doctype": "Sales Taxes and Charges",
"charge_type": "Actual",
"account_head": shipping_rule.account,
"cost_center": shipping_rule.cost_center
})
if existing_shipping_charge:
self.get("taxes").remove(existing_shipping_charge[-1])
self.calculate_taxes_and_totals()
def set_total_in_words(self):
from frappe.utils import money_in_words
company_currency = get_company_currency(self.company)
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None,
"disable_rounded_total"))
if self.meta.get_field("base_in_words"):
self.base_in_words = money_in_words(disable_rounded_total and
self.base_grand_total or self.base_rounded_total, company_currency)
if self.meta.get_field("in_words"):
self.in_words = money_in_words(disable_rounded_total and
self.grand_total or self.rounded_total, self.currency)
def calculate_commission(self):
if self.meta.get_field("commission_rate"):
self.round_floats_in(self, ["base_net_total", "commission_rate"])
if self.commission_rate > 100.0:
throw(_("Commission rate cannot be greater than 100"))
self.total_commission = flt(self.base_net_total * self.commission_rate / 100.0,
self.precision("total_commission"))
def calculate_contribution(self):
if not self.meta.get_field("sales_team"):
return
total = 0.0
sales_team = self.get("sales_team")
for sales_person in sales_team:
self.round_floats_in(sales_person)
sales_person.allocated_amount = flt(
self.base_net_total * sales_person.allocated_percentage / 100.0,
self.precision("allocated_amount", sales_person))
total += sales_person.allocated_percentage
if sales_team and total != 100.0:
throw(_("Total allocated percentage for sales team should be 100"))
def validate_order_type(self):
valid_types = ["Sales", "Maintenance", "Shopping Cart"]
if not self.order_type:
self.order_type = "Sales"
elif self.order_type not in valid_types:
throw(_("Order Type must be one of {0}").format(comma_or(valid_types)))
def validate_max_discount(self):
for d in self.get("items"):
discount = flt(frappe.db.get_value("Item", d.item_code, "max_discount"))
if discount and flt(d.discount_percentage) > discount:
frappe.throw(_("Maxiumm discount for Item {0} is {1}%").format(d.item_code, discount))
def get_item_list(self):
il = []
for d in self.get("items"):
reserved_warehouse = ""
reserved_qty_for_main_item = 0
if d.qty is None:
frappe.throw(_("Row {0}: Qty is mandatory").format(d.idx))
if self.doctype == "Sales Order":
if (frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes' or
self.has_sales_bom(d.item_code)) and not d.warehouse:
frappe.throw(_("Reserved Warehouse required for stock Item {0} in row {1}").format(d.item_code, d.idx))
reserved_warehouse = d.warehouse
if flt(d.qty) > flt(d.delivered_qty):
reserved_qty_for_main_item = flt(d.qty) - flt(d.delivered_qty)
elif self.doctype == "Delivery Note" and d.against_sales_order:
# if SO qty is 10 and there is tolerance of 20%, then it will allow DN of 12.
# But in this case reserved qty should only be reduced by 10 and not 12
already_delivered_qty = self.get_already_delivered_qty(self.name,
d.against_sales_order, d.so_detail)
so_qty, reserved_warehouse = self.get_so_qty_and_warehouse(d.so_detail)
if already_delivered_qty + d.qty > so_qty:
reserved_qty_for_main_item = -(so_qty - already_delivered_qty)
else:
reserved_qty_for_main_item = -flt(d.qty)
if self.has_sales_bom(d.item_code):
for p in self.get("packed_items"):
if p.parent_detail_docname == d.name and p.parent_item == d.item_code:
# the packing details table's qty is already multiplied with parent's qty
il.append(frappe._dict({
'warehouse': p.warehouse,
'reserved_warehouse': reserved_warehouse,
'item_code': p.item_code,
'qty': flt(p.qty),
'reserved_qty': (flt(p.qty)/flt(d.qty)) * reserved_qty_for_main_item,
'uom': p.uom,
'batch_no': cstr(p.batch_no).strip(),
'serial_no': cstr(p.serial_no).strip(),
'name': d.name
}))
else:
il.append(frappe._dict({
'warehouse': d.warehouse,
'reserved_warehouse': reserved_warehouse,
'item_code': d.item_code,
'qty': d.qty,
'reserved_qty': reserved_qty_for_main_item,
'uom': d.stock_uom,
'stock_uom': d.stock_uom,
'batch_no': cstr(d.get("batch_no")).strip(),
'serial_no': cstr(d.get("serial_no")).strip(),
'name': d.name
}))
return il
def has_sales_bom(self, item_code):
return frappe.db.sql("""select name from `tabSales BOM`
where new_item_code=%s and docstatus != 2""", item_code)
def get_already_delivered_qty(self, dn, so, so_detail):
qty = frappe.db.sql("""select sum(qty) from `tabDelivery Note Item`
where so_detail = %s and docstatus = 1
and against_sales_order = %s
and parent != %s""", (so_detail, so, dn))
return qty and flt(qty[0][0]) or 0.0
def get_so_qty_and_warehouse(self, so_detail):
so_item = frappe.db.sql("""select qty, warehouse from `tabSales Order Item`
where name = %s and docstatus = 1""", so_detail, as_dict=1)
so_qty = so_item and flt(so_item[0]["qty"]) or 0.0
so_warehouse = so_item and so_item[0]["warehouse"] or ""
return so_qty, so_warehouse
def check_stop_sales_order(self, ref_fieldname):
for d in self.get("items"):
if d.get(ref_fieldname):
status = frappe.db.get_value("Sales Order", d.get(ref_fieldname), "status")
if status == "Stopped":
frappe.throw(_("Sales Order {0} is stopped").format(d.get(ref_fieldname)))
def check_active_sales_items(obj):
for d in obj.get("items"):
if d.item_code:
item = frappe.db.sql("""select docstatus, is_sales_item,
is_service_item, income_account from tabItem where name = %s""",
d.item_code, as_dict=True)[0]
if item.is_sales_item == 'No' and item.is_service_item == 'No':
frappe.throw(_("Item {0} must be Sales or Service Item in {1}").format(d.item_code, d.idx))
if getattr(d, "income_account", None) and not item.income_account:
frappe.db.set_value("Item", d.item_code, "income_account",
d.income_account)
|
agpl-3.0
|
AlanZatarain/cortex-vfx
|
test/IECore/CamelCaseTest.py
|
12
|
5185
|
##########################################################################
#
# Copyright (c) 2010, John Haddon. All rights reserved.
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class CamelCaseTest( unittest.TestCase ) :
def testSplit( self ) :
self.assertEqual( IECore.CamelCase.split( "A" ), [ "A" ] )
self.assertEqual( IECore.CamelCase.split( "a" ), [ "a" ] )
self.assertEqual( IECore.CamelCase.split( "AB" ), [ "AB" ] )
self.assertEqual( IECore.CamelCase.split( "ab" ), [ "ab" ] )
self.assertEqual( IECore.CamelCase.split( "aB" ), [ "a", "B" ] )
self.assertEqual( IECore.CamelCase.split( "Ab" ), [ "Ab" ] )
self.assertEqual( IECore.CamelCase.split( "TIFFImageReader" ), [ "TIFF", "Image", "Reader" ] )
self.assertEqual( IECore.CamelCase.split( "camelCase" ), [ "camel", "Case" ] )
self.assertEqual( IECore.CamelCase.split( "hsvToRGB" ), [ "hsv", "To", "RGB" ] )
def testJoin( self ) :
self.assertEqual( IECore.CamelCase.join( [ "camel", "case" ], IECore.CamelCase.Caps.Unchanged ), "camelcase" )
self.assertEqual( IECore.CamelCase.join( [ "camel", "case" ], IECore.CamelCase.Caps.First ), "Camelcase" )
self.assertEqual( IECore.CamelCase.join( [ "camel", "case" ], IECore.CamelCase.Caps.All ), "CamelCase" )
self.assertEqual( IECore.CamelCase.join( [ "camel", "case" ], IECore.CamelCase.Caps.AllExceptFirst ), "camelCase" )
self.assertEqual( IECore.CamelCase.join( [ "TIFF", "image", "reader" ], IECore.CamelCase.Caps.Unchanged ), "TIFFimagereader" )
self.assertEqual( IECore.CamelCase.join( [ "TIFF", "image", "reader" ], IECore.CamelCase.Caps.First ), "TIFFimagereader" )
self.assertEqual( IECore.CamelCase.join( [ "TIFF", "image", "reader" ], IECore.CamelCase.Caps.All ), "TIFFImageReader" )
self.assertEqual( IECore.CamelCase.join( [ "TIFF", "image", "reader" ], IECore.CamelCase.Caps.AllExceptFirst ), "tiffImageReader" )
def testToSpaced( self ) :
self.assertEqual( IECore.CamelCase.toSpaced( "camelCase" ), "Camel Case" )
self.assertEqual( IECore.CamelCase.toSpaced( "camelCase", IECore.CamelCase.Caps.All ), "Camel Case" )
self.assertEqual( IECore.CamelCase.toSpaced( "camelCase", IECore.CamelCase.Caps.First ), "Camel case" )
self.assertEqual( IECore.CamelCase.toSpaced( "camelCase", IECore.CamelCase.Caps.AllExceptFirst ), "camel Case" )
self.assertEqual( IECore.CamelCase.toSpaced( "TIFFImageReader" ), "TIFF Image Reader" )
self.assertEqual( IECore.CamelCase.toSpaced( "TIFFImageReader", IECore.CamelCase.Caps.All ), "TIFF Image Reader" )
self.assertEqual( IECore.CamelCase.toSpaced( "TIFFImageReader", IECore.CamelCase.Caps.First ), "TIFF image reader" )
self.assertEqual( IECore.CamelCase.toSpaced( "TIFFImageReader", IECore.CamelCase.Caps.AllExceptFirst ), "tiff Image Reader" )
def testFromSpaced( self ) :
self.assertEqual( IECore.CamelCase.fromSpaced( "camel case" ), "CamelCase" )
self.assertEqual( IECore.CamelCase.fromSpaced( "camel case", IECore.CamelCase.Caps.All ), "CamelCase" )
self.assertEqual( IECore.CamelCase.fromSpaced( "camel case", IECore.CamelCase.Caps.First ), "Camelcase" )
self.assertEqual( IECore.CamelCase.fromSpaced( "camel case", IECore.CamelCase.Caps.AllExceptFirst ), "camelCase" )
def testNumericCharacters( self ) :
self.assertEqual( IECore.CamelCase.split( "linearToRec709" ), [ "linear", "To", "Rec709" ] )
self.assertEqual( IECore.CamelCase.split( "Rec709ToLinear" ), [ "Rec709", "To", "Linear" ] )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
codeforeurope/Change-By-Us
|
controllers/project.py
|
3
|
20727
|
"""
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
from framework.controller import *
import framework.util as util
import giveaminute.project as mProject
import giveaminute.idea as mIdea
import giveaminute.projectResource as mProjectResource
import giveaminute.messaging as mMessaging
import giveaminute.models as models
import helpers.censor
import json
import re
import datetime
class Project(Controller):
def GET(self, action=None, param0=None, param1=None):
if (action == 'resource'):
if (param0 == 'info'):
return self.getResourceInfo()
else:
return self.not_found()
elif (action == 'resources'):
if (param0 == 'related'):
return self.getRelatedResources()
else:
return self.getResourcesAndLinks()
elif (action == 'messages'):
return self.getMessages()
elif (action == 'featured'):
return self.getFeaturedProjects()
elif (action == 'small'):
return self.getProjectData()
elif (action == 'rss'):
return self.showConversationRSS(param0)
else:
return self.showProject(action)
def POST(self, action=None, param0=None, param1=None):
if (action == 'join'):
return self.join()
elif (action == 'endorse'):
if (param0 == 'remove'):
return self.removeEndorsement()
else:
return self.endorse()
elif (action == 'link'):
if (param0 == 'add'):
return self.addLink()
elif (param0 == 'remove'):
return self.removeLink()
else:
return self.not_found()
elif (action == 'resource'):
if (param0 == 'add'):
return self.addResource()
elif (param0 == 'remove'):
return self.removeResource()
else:
return self.not_found()
elif (action == 'message'):
if (param0 == 'add'):
return self.addMessage()
elif (param0 == 'remove'):
return self.removeMessage()
else:
return self.not_found()
elif (action == 'tag'):
if (param0 == 'add'):
return self.addKeywords()
elif (param0 == 'remove'):
return self.removeKeyword()
else:
return self.not_found()
elif (action == 'invite'):
return self.invite()
elif (action == 'leave'):
return self.leaveProject()
elif (action == 'user'):
if (param0 == 'remove'):
return self.removeUser()
elif (param0 == 'admin'):
if (param1 == 'add'):
return self.setAdmin(True)
elif (param1 == 'remove'):
return self.setAdmin(False)
else:
return self.not_found()
else:
return self.not_found()
elif (action == 'photo'):
return self.updateImage()
elif (action == 'description'):
return self.updateDescription()
elif (action == 'title'):
return self.updateTitle()
else:
return self.not_found()
def getProject(self, project_id):
"""Get the SQL Alchemy project object"""
project = self.orm.query(models.Project).get(project_id)
return project
def showProject(self, projectId):
"""The main project detail view controller."""
if (projectId):
project = mProject.Project(self.db, projectId)
if (project.data):
projDictionary = project.getFullDictionary()
project_user = self.getProjectUser(projectId)
self.template_data['project_user'] = dict(data = project_user, json = json.dumps(project_user))
project_proxy = self.getProject(projectId)
project_proxy.json = json.dumps(projDictionary)
project_proxy.data = projDictionary
self.template_data['project'] = project_proxy
import giveaminute.filters as gam_filters
gam_filters.register_filters()
return self.render('project')
else:
return self.not_found()
else:
return self.not_found()
def showConversationRSS(self, projectId):
if (projectId):
project = mProject.Project(self.db, projectId)
projDictionary = project.getFullDictionary()
self.template_data['project'] = dict(json = json.dumps(projDictionary), data = projDictionary)
msgs = self.template_data['project']['data']['info']['messages']['items']
for item in msgs:
item['created'] = datetime.datetime.strptime(item['created'], '%Y-%m-%d %H:%M:%S').strftime('%a, %d %b %Y %H:%M:%S EST')
return self.render('project/conversation_rss', suffix='xml.rss', content_type = 'application/rss+xml')
else:
return self.not_found()
def getProjectUser(self, projectId):
projectUser = dict(is_project_admin = False, is_member = False, is_invited_by_idea = False, can_endorse = False)
if (self.user):
sqlInvited = """select pi.project_id from project_invite pi
inner join idea i on i.idea_id = pi.invitee_idea_id
where pi.project_id = $projectId and i.user_id = $userId
limit 1"""
dataInvited = list(self.db.query(sqlInvited, {'userId':self.user.id, 'email':self.user.email, 'projectId':projectId}))
projectUser['is_invited_by_idea'] = (len(dataInvited) == 1)
sqlMember = "select is_project_admin from project__user where user_id = $userId and project_id = $projectId limit 1"
dataMember = list(self.db.query(sqlMember, {'userId':self.user.id, 'projectId':projectId}))
if (len(dataMember)== 1):
projectUser['is_member'] = True
if (dataMember[0].is_project_admin == 1):
projectUser['is_project_admin'] = True
# # #
if (self.user.isLeader):
sqlEndorse = "select user_id from project_endorsement where project_id = $projectId and user_id = $userId limit 1"
dataEndorse = list(self.db.query(sqlEndorse, {'userId':self.user.id, 'projectId':projectId}))
projectUser['can_endorse'] = (len(dataEndorse) == 0)
else:
projectUser['can_endorse'] = False
return projectUser
def join(self):
projectId = self.request('project_id')
if (not self.user):
log.error("*** join submitted w/o logged in user")
return False
elif (not projectId):
log.error("*** join submitted w/o logged project id")
return False
else:
isJoined = mProject.join(self.db, projectId, self.user.id)
if (isJoined):
project = mProject.Project(self.db, projectId)
# add a message to the queue about the join
message = 'New Member! Your project now has %s total!' % project.data.num_members
# email admin
if (not mMessaging.emailProjectJoin(project.data.owner_email,
projectId,
project.data.title,
self.user.id,
mProject.userNameDisplay(self.user.firstName,
self.user.lastName,
self.user.affiliation,
mProject.isFullLastName(self.user.groupMembershipBitmask)))):
log.error("*** couldn't email admin on user_id = %s joining project %s" % (self.user.id, projectId))
if (not mProject.addMessage(self.db,
projectId,
message,
'join',
self.user.id)):
log.error("*** new message not created for user %s on joining project %s" % (self.user.id, projectId))
return isJoined
def invite(self):
projectId = self.request('project_id')
ideaId = self.request('idea_id')
emails = self.request('email_list')
message = self.request('message')
if (not self.user):
log.error("*** invite w/o logged in user")
return False
elif (not projectId):
log.error("***invite w/o project id")
return False
else:
if (ideaId):
return mProject.inviteByIdea(self.db, projectId, ideaId, message, self.user)
elif (emails):
return mProject.inviteByEmail(self.db, projectId, emails.split(','), message, self.user)
else:
log.error("*** invite w/o idea or email")
return False
def endorse(self):
projectId = self.request('project_id')
if (not self.user or not self.user.isLeader):
log.error("*** endorsement submitted w/o logged in user or with non-project leader user account")
return False
else:
isEndorsed = mProject.endorse(self.db, projectId, self.user.id)
if (isEndorsed):
# TODO do we need to get the whole project here?
project = mProject.Project(self.db, projectId)
# email admin
if (not mMessaging.emailProjectEndorsement(project.data.owner_email,
project.data.title,
"%s %s" % (self.user.firstName, self.user.lastName))):
log.error("*** couldn't email admin on user_id = %s endorsing project %s" % (self.user.id, projectId))
# add a message to the queue about the join
message = 'Congratulations! Your group has now been endorsed by %s %s.' % (self.user.firstName, self.user.lastName)
if (not mProject.addMessage(self.db,
projectId,
message,
'endorsement',
self.user.id)):
log.error("*** new message not created for user %s on endorsing project %s" % (self.user.id, projectId))
return isEndorsed
def removeEndorsement(self):
projectId = self.request('project_id')
userId = util.try_f(int, self.request('user_id'))
if (self.user and
((self.user.isLeader and self.user.id == userId) or
self.user.isAdmin)):
isRemoved = mProject.removeEndorsement(self.db, projectId, userId)
# if successfully removed, remove messages as well
if (isRemoved):
mProject.removeEndorsementMessage(self.db, projectId, userId)
return isRemoved
else:
log.error("*** attempt to remove endorsement w/o proper credentials")
return False
def addLink(self):
if (self.request('main_text')): return False
projectId = self.request('project_id')
title = self.request('title')
url = util.makeUrlAbsolute(self.request('url')) if self.request('url') else None
if (not projectId or util.strNullOrEmpty(title) or util.strNullOrEmpty(url)):
log.error("*** link submitted w/o id, title, or url")
return False
else:
return mProject.addLinkToProject(self.db, projectId, title, url)
def removeLink(self):
projectId = self.request('project_id')
linkId = self.request('link_id')
if (not linkId):
log.error("*** link removal submitted missing an id")
return False
else:
if (not self.user.isAdmin and
not self.user.isModerator and
not self.user.isProjectAdmin(projectId)):
log.warning("*** unauthorized link removal attempt by user_id = %s" % self.user.id)
return False
else:
return mProject.setLinkIsActive(self.db, linkId, 0)
def addResource(self):
projectId = self.request('project_id')
projectResourceId = self.request('project_resource_id')
if (not projectId or not projectResourceId):
log.error("*** resource submitted missing an id")
return False
else:
if (mProject.addResourceToProject(self.db, projectId, projectResourceId)):
# TODO do we need to get the whole project here?
project = mProject.Project(self.db, projectId)
res = mProjectResource.ProjectResource(self.db, projectResourceId)
if (not mMessaging.emailResourceNotification(res.data.contact_email, projectId, project.data.title, project.data.description, res.data.title)):
log.error("*** couldn't email resource id %s" % projectResourceId)
else:
log.error("*** couldn't add resource %s to project %s" % (projectResourceId, projectId))
return False
def removeResource(self):
projectId = self.request('project_id')
projectResourceId = self.request('project_resource_id')
if (not projectId or not projectResourceId):
log.error("*** resource removal submitted missing an id")
return False
else:
if (not self.user.isAdmin and
not self.user.isModerator and
not self.user.isProjectAdmin(projectId)):
log.warning("*** unauthorized resource removal attempt by user_id = %s" % self.user.id)
return False
else:
return mProject.removeResourceFromProject(self.db, projectId, projectResourceId)
def getResourceInfo(self):
projectResourceId = self.request('project_resource_id')
info = None
resource = mProjectResource.ProjectResource(self.db, projectResourceId)
if (resource.data):
info = self.json(resource.getFullDictionary())
return info
def getResourcesAndLinks(self):
projectId = self.request('project_id')
data = dict(links = mProject.getLinks(self.db, projectId),
resources = mProject.getResources(self.db, projectId))
return self.json(data)
def getRelatedResources(self):
projectId = self.request('project_id')
resources = []
project = mProject.Project(self.db, projectId)
keywords = project.data.keywords.split()
locationId = project.data.location_id
resources = mProjectResource.searchProjectResources(self.db, keywords, locationId)
obj = dict(resources = resources)
return self.json(obj)
def addMessage(self):
"""
Add a message to the project discussion stream.
POST Parameters:
---------------
project_id -- The id of the project
main_text -- The message contents
attachment_id -- (optional) The file attachment on the message. If no
file attachment is available, it should be an empty string or left
off of the request entirely.
"""
if (self.request('main_text')): return False
projectId = self.request('project_id')
message = self.request('message')
# If the file_id is None or empty string, record it as None.
attachmentId = self.request('attachment_id') or None
if (not projectId):
log.error("*** message add attempted w/o project id")
return False
elif (util.strNullOrEmpty(message)):
log.error("*** message add attempted w/ no message")
return False
else:
return mProject.addMessage(self.db, projectId, message,
'member_comment', self.user.id,
attachmentId=attachmentId)
def removeMessage(self):
messageId = self.request('message_id')
if (not messageId):
log.error("*** message remove attempted w/o ids")
return False
else:
return mProject.removeMessage(self.db, messageId)
def getMessages(self):
projectId = self.request('project_id')
limit = util.try_f(int, self.request('n_messages'), 10)
offset = util.try_f(int, self.request('offset'), 0)
filterBy = self.request('filter')
return self.json(mProject.getMessages(self.db, projectId, limit, offset, filterBy))
def getFeaturedProjects(self):
# overkill to get the full dictionary, but it's a small admin-only call
projects = mProject.getFeaturedProjectsDictionary(self.db)
return self.json(projects)
def getProjectData(self):
projectId = self.request('project_id')
project = mProject.Project(self.db, projectId)
return self.json(mProject.smallProject(project.id,
project.data.title,
project.data.description,
project.data.image_id,
project.data.num_members,
project.data.owner_user_id,
project.data.owner_first_name,
project.data.owner_last_name,
project.data.owner_image_id))
def addKeywords(self):
projectId = self.request('project_id')
keywords = self.request('text')
if (projectId and keywords):
return mProject.addKeywords(self.db, projectId, keywords.split(','))
else:
log.error("*** add keyword attempted w/o project id or keywords")
return False
def removeKeyword(self):
projectId = self.request('project_id')
keyword = self.request('text')
return mProject.removeKeyword(self.db, projectId, keyword)
def leaveProject(self):
userId = self.session.user_id
projectId = self.request('project_id')
return mProject.removeUserFromProject(self.db, projectId, userId)
def removeUser(self):
projectId = self.request('project_id')
userId = self.request('user_id')
return mProject.removeUserFromProject(self.db, projectId, userId)
def updateImage(self):
projectId = self.request('project_id')
imageId = self.request('image_id')
return mProject.updateProjectImage(self.db, projectId, imageId)
def updateDescription(self):
projectId = self.request('project_id')
description = self.request('text')
return mProject.updateProjectDescription(self.db, projectId, description)
def updateTitle(self):
project_id = self.request('project_id')
title = self.request('title')
num_flags = helpers.censor.badwords(self.db, title)
if num_flags == 2:
return False
project = self.orm.query(models.Project).get(project_id)
if project is None:
return False
project.title = title
self.orm.commit()
return True
def setAdmin(self, b):
projectId = self.request('project_id')
userId = self.request('user_id')
projectUser = self.orm.query(models.ProjectMember).get((userId, projectId))
# TODO prevent last admin from being deleted
# TODO on delete of creator, make oldest admin creator
if projectUser:
projectUser.is_project_admin = b
self.orm.commit()
return True
else:
return False
|
agpl-3.0
|
grupoprog3/proyecto_final
|
proyecto/flask/Lib/site-packages/wtforms/ext/appengine/fields.py
|
177
|
7574
|
from __future__ import unicode_literals
import decimal
import operator
from wtforms import fields, widgets
from wtforms.compat import text_type, string_types
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.get_label(obj)
yield (key, label, (self.data.key() == obj.key()) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if str(self.data.key()) == str(obj.key()):
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class KeyPropertyField(fields.SelectFieldBase):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=None, allow_blank=False, blank_text='', **kwargs):
super(KeyPropertyField, self).__init__(label, validators, **kwargs)
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, basestring):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.query()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key.id()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key.id())
label = self.get_label(obj)
yield (key, label, (self.data.key == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if self.data is not None:
for obj in self.query:
if self.data.key == obj.key:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext('Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and text_type("\n".join(self.data)) or ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext('Not a valid list'))
class IntegerListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return text_type('\n'.join(self.data)) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = [int(value) for value in valuelist[0].splitlines()]
except ValueError:
raise ValueError(self.gettext('Not a valid integer list'))
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = '%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError('Not a valid coordinate location')
|
apache-2.0
|
sharkykh/SickRage
|
sickbeard/providers/t411.py
|
1
|
7414
|
# coding=utf-8
# Author: djoole <[email protected]>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import time
import traceback
import six
from requests.auth import AuthBase
from sickbeard import logger, tvcache
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class T411Provider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "T411")
self.username = None
self.password = None
self.token = None
self.tokenLastUpdate = None
self.cache = tvcache.TVCache(self, min_time=10) # Only poll T411 every 10 minutes max
self.urls = {'base_url': 'https://www.t411.al/',
'search': 'https://api.t411.al/torrents/search/%s*?cid=%s&limit=100',
'rss': 'https://api.t411.al/torrents/top/today',
'login_page': 'https://api.t411.al/auth',
'download': 'https://api.t411.al/torrents/download/%s'}
self.url = self.urls['base_url']
self.subcategories = [433, 637, 455, 639]
self.minseed = 0
self.minleech = 0
self.confirmed = False
def login(self):
if self.token and self.tokenLastUpdate and time.time() < (self.tokenLastUpdate + 30 * 60):
return True
login_params = {'username': self.username,
'password': self.password}
response = self.get_url(self.urls['login_page'], post_data=login_params, returns='json', verify=False)
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if response and 'token' in response:
self.token = response['token']
self.tokenLastUpdate = time.time()
self.session.auth = T411Auth(self.token)
return True
else:
logger.log("Token not found in authentication response", logger.WARNING)
return False
def search(self, search_params, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
if not self.login():
return results
for mode in search_params:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_urlS = ([self.urls['search'] % (search_string, u) for u in self.subcategories], [self.urls['rss']])[mode == 'RSS']
for search_url in search_urlS:
data = self.get_url(search_url, returns='json', verify=False)
if not data:
continue
try:
if 'torrents' not in data and mode != 'RSS':
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrents = data['torrents'] if mode != 'RSS' else data
if not torrents:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for torrent in torrents:
if mode == 'RSS' and 'category' in torrent and try_int(torrent['category'], 0) not in self.subcategories:
continue
try:
title = torrent['name']
torrent_id = torrent['id']
download_url = (self.urls['download'] % torrent_id).encode('utf8')
if not all([title, download_url]):
continue
seeders = try_int(torrent['seeders'])
leechers = try_int(torrent['leechers'])
verified = bool(torrent['isVerified'])
torrent_size = torrent['size']
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
if self.confirmed and not verified and mode != 'RSS':
logger.log("Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
size = convert_size(torrent_size) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except Exception:
logger.log("Invalid torrent data, skipping result: {0}".format(torrent), logger.DEBUG)
logger.log("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.DEBUG)
continue
except Exception:
logger.log("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.ERROR)
# For each search mode sort all the items by seeders if available if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
class T411Auth(AuthBase): # pylint: disable=too-few-public-methods
"""Attaches HTTP Authentication to the given Request object."""
def __init__(self, token):
if isinstance(token, six.text_type):
self.token = token.encode('utf-8')
else:
self.token = token
def __call__(self, r):
r.headers[b'Authorization'] = self.token
return r
provider = T411Provider()
|
gpl-3.0
|
tengteng/learning-spark
|
src/python/MLlib.py
|
42
|
2787
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import LogisticRegressionWithSGD
from pyspark.mllib.feature import HashingTF
if __name__ == "__main__":
sc = SparkContext(appName="PythonBookExample")
# Load 2 types of emails from text files: spam and ham (non-spam).
# Each line has text from one email.
spam = sc.textFile("files/spam.txt")
ham = sc.textFile("files/ham.txt")
# Create a HashingTF instance to map email text to vectors of 100 features.
tf = HashingTF(numFeatures = 100)
# Each email is split into words, and each word is mapped to one feature.
spamFeatures = spam.map(lambda email: tf.transform(email.split(" ")))
hamFeatures = ham.map(lambda email: tf.transform(email.split(" ")))
# Create LabeledPoint datasets for positive (spam) and negative (ham) examples.
positiveExamples = spamFeatures.map(lambda features: LabeledPoint(1, features))
negativeExamples = hamFeatures.map(lambda features: LabeledPoint(0, features))
training_data = positiveExamples.union(negativeExamples)
training_data.cache() # Cache data since Logistic Regression is an iterative algorithm.
# Run Logistic Regression using the SGD optimizer.
# regParam is model regularization, which can make models more robust.
model = LogisticRegressionWithSGD.train(training_data)
# Test on a positive example (spam) and a negative one (ham).
# First apply the same HashingTF feature transformation used on the training data.
posTestExample = tf.transform("O M G GET cheap stuff by sending money to ...".split(" "))
negTestExample = tf.transform("Hi Dad, I started studying Spark the other ...".split(" "))
# Now use the learned model to predict spam/ham for new emails.
print "Prediction for positive test example: %g" % model.predict(posTestExample)
print "Prediction for negative test example: %g" % model.predict(negTestExample)
sc.stop()
|
mit
|
wdzhou/mantid
|
scripts/reduction/instruments/reflectometer/wks_utility.py
|
2
|
62637
|
#pylint: disable=too-many-lines,invalid-name,too-many-arguments, too-many-locals, unused-argument
from __future__ import (absolute_import, division, print_function)
from numpy import zeros, arctan2, arange, shape, sqrt, fliplr, asfarray, mean, sum, NAN
from mantid.simpleapi import *
import math
import os.path
from six.moves import range
h = 6.626e-34 #m^2 kg s^-1
m = 1.675e-27 #kg
ref_date = '2014-10-01' #when the detector has been rotated
def getSequenceRuns(run_numbers):
"""
This will return the sequence of runs
ex:
input: 10,11,12
output: 10,11,12
input: 10,13-15
output: 10,13,14,15
"""
final_list = []
for _run in run_numbers:
_run = str(_run)
_result = _run.find('-')
if _result == -1:
final_list.append(_run)
else:
_split = _run.split('-')
start = int(_split[0])
end = int(_split[1])
_range = arange(end-start+1)+start
for _r in _range:
final_list.append(_r)
return final_list
def getProtonCharge(st=None):
"""
Returns the proton charge of the given workspace in picoCoulomb
"""
if st is not None:
mt_run = st.getRun()
proton_charge_mtd_unit = mt_run.getProperty('gd_prtn_chrg').value
# proton_charge = proton_charge_mtd_unit / 2.77777778e-10
return proton_charge_mtd_unit
return None
def getIndex(value, array):
"""
returns the index where the value has been found
"""
# sz = len(array)
# for i in range(sz):
# if value == array[i]:
# return i
# return -1
return array.searchsorted(value)
def getSh(mt, top_tag, bottom_tag):
"""
returns the height and units of the given slit#
"""
mt_run = mt.getRun()
st = mt_run.getProperty(top_tag).value
sb = mt_run.getProperty(bottom_tag).value
sh = math.fabs(float(sb[0]) - float(st[0]))
units = mt_run.getProperty(top_tag).units
return sh, units
def getSheight(mt, index):
"""
return the DAS hardware slits height of slits # index
"""
mt_run = mt.getRun()
if index == 2:
isSi = False
try:
tag = 'SiVHeight'
value = mt_run.getProperty(tag).value
isSi = True
except:
tag = 'S2VHeight'
value = mt_run.getProperty(tag).value
return [isSi, value[0]]
else:
tag = 'S1VHeight'
value = mt_run.getProperty(tag).value
return value[0]
def getS1h(mt=None):
"""
returns the height and units of the slit #1
"""
if mt is not None:
# _h, units = getSh(mt, 's1t', 's1b')
_h = getSheight(mt, 1)
return _h
return None
def getS2h(mt=None):
"""
returns the height and units of the slit #2
"""
if mt is not None:
[isSi, _h] = getSheight(mt, 2)
return [isSi,_h]
return [False, None]
def getSwidth(mt, index):
"""
returns the width and units of the given index slits
defined by the DAS hardware
"""
mt_run = mt.getRun()
if index==2:
isSi = False
try:
tag = 'SiHWidth'
value = mt_run.getProperty(tag).value
isSi = True
except:
tag = 'S2HWidth'
value = mt_run.getProperty(tag).value
return [isSi, value[0]]
else:
tag = 'S1HWidth'
value = mt_run.getProperty(tag).value
return value[0]
def getSw(mt, left_tag, right_tag):
"""
returns the width and units of the given slits
"""
mt_run = mt.getRun()
sl = mt_run.getProperty(left_tag).value
sr = mt_run.getProperty(right_tag).value
sw = math.fabs(float(sl[0]) - float(sr[0]))
units = mt_run.getProperty(left_tag).units
return sw, units
def getS1w(mt=None):
"""
returns the width and units of the slit #1
"""
if mt is not None:
# _w, units = getSw(mt, 's1l', 's1r')
_w = getSwidth(mt, 1)
return _w
return None
def getS2w(mt=None):
"""
returns the width and units of the slit #2
"""
if mt is not None:
[isSi, _w] = getSwidth(mt, 2)
return [isSi,_w]
return [False,None]
def getLambdaValue(mt_name):
"""
return the lambdaRequest value
"""
mt_run = mtd[mt_name].getRun()
_lambda = mt_run.getProperty('LambdaRequest').value
return _lambda
def getPixelXPixelY(mt1, maxX=304, maxY=256):
"""
returns the PixelX_vs_PixelY array of the workspace data specified
"""
pixelX_vs_pixelY = zeros((maxY, maxX))
for x in range(maxX):
for y in range(maxY):
_index = maxY * x + y
_sum = sum(mt1.readY(_index)[:])
pixelX_vs_pixelY[y, x] = _sum
return pixelX_vs_pixelY
def getPixelXPixelYError(mt1):
"""
returns the PixelX_vs_PixelY_error array of the workspace data specified
"""
pixel_error = zeros((256, 304))
for x in range(304):
for y in range(256):
_index = 256 * x + y
_sum = sum(mt1.readE(_index)[:])
pixel_error[y, x] = _sum
return pixel_error
def getPixelXTOF(mt1, maxX=304, maxY=256):
"""
returns the PixelX_vs_TOF array of the workspace data specified
"""
_init = mt1.readY(0)[:]
pixelX_vs_tof = zeros((maxY, len(_init)))
for x in range(maxX):
for y in range(maxY):
_index = maxY * x + y
_array = mt1.readY(_index)[:]
pixelX_vs_tof[y, :] += _array
return pixelX_vs_tof
def findQaxisMinMax(q_axis):
"""
Find the position of the common Qmin and Qmax in
each q array
"""
nbr_row = shape(q_axis)[0]
nbr_col = shape(q_axis)[1]
q_min = min(q_axis[0])
q_max = max(q_axis[0])
for i in arange(nbr_row - 1) + 1:
_q_min = q_axis[i][-1]
_q_max = q_axis[i][0]
if _q_min > q_min:
q_min = _q_min
if _q_max < q_max:
q_max = _q_max
#find now the index of those min and max in each row
_q_axis_min_max_index = zeros((nbr_row, 2))
for i in arange(nbr_row):
for j in arange(nbr_col - 1):
_q = q_axis[i, j]
_q_next = q_axis[i, j + 1]
if (_q >= q_max) and (_q_next <= q_max):
_q_axis_min_max_index[i, 0] = j
if (_q >= q_min) and (_q_next <= q_min):
_q_axis_min_max_index[i, 1] = j
return _q_axis_min_max_index
def cleanup_data(InputWorkspace=None,
OutputWorkspace=None,
maxY=256):
mti = mtd[InputWorkspace]
_tof_axis = mti.readX(0)[:]
nbr_tof = shape(_tof_axis)[0]-1
_new_y = zeros((maxY, nbr_tof))
_new_e = zeros((maxY, nbr_tof))
for px in range(maxY):
for tof in range(nbr_tof-1):
_y = mti.readY(px)[tof]
if _y != 0:
_e = mti.readE(px)[tof]
# if _y < _e:
if _y < 0 or _y < _e:
_y = 0.
_e = 0.
_new_y[px,tof] = float(_y)
_new_e[px,tof] = float(_e)
_y_error_axis = _new_e.flatten()
_y_axis = _new_y.flatten()
CreateWorkspace(OutputWorkspace=OutputWorkspace,
DataX=_tof_axis,
DataY=_y_axis,
DataE=_y_error_axis,
Nspec=maxY,
UnitX="TOF",
ParentWorkspace=mti)
def createIntegratedWorkspace(mt1,
fromXpixel, toXpixel,
fromYpixel, toYpixel,
maxX=304, maxY=256,
bCleaning=False):
"""
This creates the integrated workspace over the second pixel range (304 here) and
returns the new workspace handle
"""
_tof_axis = mt1.readX(0)[:]
_fromXpixel = min([fromXpixel, toXpixel])
_toXpixel = max([fromXpixel, toXpixel])
fromXpixel = _fromXpixel
toXpixel = _toXpixel
_fromYpixel = min([fromYpixel, toYpixel])
_toYpixel = max([fromYpixel, toYpixel])
fromYpixel = _fromYpixel
toYpixel = _toYpixel
_y_axis = zeros((maxY, len(_tof_axis) - 1))
_y_error_axis = zeros((maxY, len(_tof_axis) - 1))
x_size = toXpixel - fromXpixel + 1
x_range = arange(x_size) + fromXpixel
y_size = toYpixel - fromYpixel + 1
y_range = arange(y_size) + fromYpixel
for x in x_range:
for y in y_range:
_index = int((maxY) * x + y)
_y_axis[y, :] += mt1.readY(_index)[:]
_y_error_axis[y, :] += ((mt1.readE(_index)[:]) * (mt1.readE(_index)[:]))
_y_axis = _y_axis.flatten()
_y_error_axis = sqrt(_y_error_axis)
_y_error_axis = _y_error_axis.flatten()
outputWorkspace = CreateWorkspace(DataX=_tof_axis,
DataY=_y_axis,
DataE=_y_error_axis,
Nspec=maxY,
UnitX="TOF",
ParentWorkspace=mt1.name())
return outputWorkspace
def convertWorkspaceToQ(ws_data,
fromYpixel, toYpixel,
maxX=304, maxY=256,
cpix=None,
source_to_detector=None,
sample_to_detector=None,
theta=None,
geo_correction=False,
q_binning=None):
"""
This creates the integrated workspace over the second pixel range (304 here) and
returns the new workspace handle
"""
mt1 = ws_data
_tof_axis = mt1.readX(0)[:]
_fromYpixel = min([fromYpixel, toYpixel])
_toYpixel = max([fromYpixel, toYpixel])
fromYpixel = _fromYpixel
toYpixel = _toYpixel
if geo_correction:
yrange = arange(toYpixel - fromYpixel + 1) + fromYpixel
_q_axis = convertToRvsQWithCorrection(mt1,
dMD=source_to_detector,
theta=theta,
tof=_tof_axis,
yrange=yrange,
cpix=cpix)
#find the common Qmin and Qmax values and their index (position)
#in each _q_axis row
_q_axis_min_max_index = findQaxisMinMax(_q_axis)
#replace the _q_axis of the yrange of interest by the new
#individual _q_axis
y_size = toYpixel - fromYpixel + 1
y_range = arange(y_size) + fromYpixel
_y_axis = zeros((y_size, len(_tof_axis) - 1))
_y_error_axis = zeros((y_size, len(_tof_axis) - 1))
#now determine the y_axis
for _q_index in range(y_size):
_tmp_q_axis = _q_axis[_q_index]
q_axis = _tmp_q_axis[::-1] #reverse the axis (now increasing order)
_a = yrange[_q_index]
_y_axis_tmp = list(mt1.readY(int(_a))[:])
_y_error_axis_tmp = list(mt1.readE(int(_a))[:])
#keep only the overlap region of Qs
_q_min = _q_axis_min_max_index[_q_index, 0]
if _q_min != 0:
_y_axis_tmp[0:_q_min] = 0
_y_error_axis_tmp[0:_q_min] = 0
_q_max = int(_q_axis_min_max_index[_q_index, 1])
sz = shape(_y_axis_tmp)[0]
if _q_max != sz:
_index_q_max_range = arange(sz - _q_max) + _q_max
for i in _index_q_max_range:
_y_axis_tmp[i] = 0
_y_error_axis_tmp[i] = 0
_y_axis[_q_index, :] = _y_axis_tmp[::-1]
_y_error_axis[_q_index, :] = _y_error_axis_tmp[::-1]
x_axis = q_axis.flatten()
y_axis = _y_axis.flatten()
y_error_axis = _y_error_axis.flatten()
outputWorkspace = CreateWorkspace(DataX=x_axis,
DataY=y_axis,
DataE=y_error_axis,
Nspec=int(y_size),
UnitX="MomentumTransfer",
ParentWorkspace=mt1.name())
outputWorkspace.setDistribution(True)
outputWorkspace = Rebin(InputWorkspace=outputWorkspace,
Params=q_binning)
else:
if source_to_detector is not None and theta is not None:
_const = float(4) * math.pi * m * source_to_detector / h
_q_axis = 1e-10 * _const * math.sin(theta) / (_tof_axis * 1e-6)
else:
_q_axis = _tof_axis
print('should not reach this condition !')
y_size = toYpixel - fromYpixel + 1
y_range = arange(y_size) + fromYpixel
_y_axis = zeros((y_size, len(_q_axis) -1 ))
_y_error_axis = zeros((y_size, len(_q_axis) - 1))
for y in range(y_size):
a = y_range[y]
_tmp_y_axis = mt1.readY(int(a))[:]
_y_axis[int(y), :] = _tmp_y_axis
_tmp_y_error_axis = mt1.readE(int(a))[:]
_y_error_axis[int(y),:] = _tmp_y_error_axis
_x_axis = _q_axis.flatten()
_y_axis = _y_axis.flatten()
_y_error_axis = _y_error_axis.flatten()
# reverse order
_x_axis = _x_axis[::-1]
_y_axis = _y_axis[::-1]
_y_error_axis = _y_error_axis[::-1]
outputWorkspace = CreateWorkspace(DataX=_x_axis,
DataY=_y_axis,
DataE=_y_error_axis,
Nspec=int(y_size),
UnitX="MomentumTransfer",
ParentWorkspace=mt1.name())
outputWorkspace.setDistribution(True)
outputWorkspace = Rebin(InputWorkspace=outputWorkspace,
Params=q_binning)
return outputWorkspace
def create_grouping(workspace=None, xmin=0, xmax=None, filename=".refl_grouping.xml"):
# This should be read from the
npix_x = 304
npix_y = 256
if workspace is not None:
if mtd[workspace].getInstrument().hasParameter("number-of-x-pixels"):
npix_x = int(mtd[workspace].getInstrument().getNumberParameter("number-of-x-pixels")[0])
if mtd[workspace].getInstrument().hasParameter("number-of-y-pixels"):
npix_y = int(mtd[workspace].getInstrument().getNumberParameter("number-of-y-pixels")[0])
f = open(filename, 'w')
f.write("<detector-grouping description=\"Integrated over X\">\n")
if xmax is None:
xmax = npix_x
for y in range(npix_y):
# index = max_y * x + y
indices = []
for x in range(xmin, xmax + 1):
indices.append(str(npix_y * x + y))
# Detector IDs start at zero, but spectrum numbers start at 1
# Grouping works on spectrum numbers
indices_str = ','.join(indices)
f.write(" <group name='%d'>\n" % y)
f.write(" <ids val='%s'/>\n" % indices_str)
f.write(" </group>\n")
f.write("</detector-grouping>\n")
f.close()
def angleUnitConversion(value, from_units='degree', to_units='rad'):
"""
This function converts the angle units
"""
if from_units == to_units:
return value
from_factor = 1.0
#convert everything into rad
if from_units == 'degree':
from_factor = 1.745329252e-2
value_rad = from_factor * value
if to_units == 'rad':
return value_rad
else:
to_factor = 57.2957795
return to_factor * value_rad
def convertToThetaVsLambda(_tof_axis,
_pixel_axis,
central_pixel,
pixel_size=0.0007,
theta= -1,
dSD= -1,
dMD= -1):
"""
This function converts the pixel/tof array
to theta/lambda
"""
# h = 6.626e-34 #m^2 kg s^-1
# m = 1.675e-27 #kg
#convert tof_axis into seconds
_tof_axis = _tof_axis * 1e-6
vel_array = dMD / _tof_axis #mm/ms = m/s
_lambda = h / (m * vel_array) #m
_lambda = _lambda * 1e10 #angstroms
d_vec = (_pixel_axis - central_pixel) * pixel_size
theta_vec = arctan2(d_vec, dSD) + theta
dico = {'lambda_vec': _lambda, 'theta_vec': theta_vec}
return dico
def convertToRvsQWithCorrection(mt, dMD= -1, theta= -1.0, tof=None, yrange=None, cpix=None):
"""
This function converts the pixel/TOF array to the R(Q) array
using Q = (4.Pi.Mn)/h * L.sin(theta/2)/TOF
with L: distance central_pixel->source
TOF: TOF of pixel
theta: angle of detector
"""
# h = 6.626e-34 #m^2 kg s^-1
# m = 1.675e-27 #kg
sample = mt.getInstrument().getSample()
maxY = 256
dPS_array = zeros(maxY)
for y in range(maxY):
detector = mt.getDetector(y)
dPS_array[y] = sample.getDistance(detector)
#distance sample->center of detector
dSD = dPS_array[maxY / 2]
_const = float(4) * math.pi * m * dMD / h
sz_tof = len(tof)
q_array = zeros((len(yrange), sz_tof - 1))
for _px in range(len(yrange)):
dangle = ref_beamdiv_correct(cpix, mt, dSD, _px)
if dangle is not None:
_theta = theta + dangle
else:
_theta = theta
for t in range(sz_tof - 1):
tof1 = tof[t]
tof2 = tof[t+1]
tofm = (tof1+tof2)/2.
_Q = _const * math.sin(_theta) / (tofm*1e-6)
q_array[_px, t] = _Q * 1e-10
return q_array
def getQHisto(source_to_detector, theta, tof_array):
_const = float(4) * math.pi * m * source_to_detector / h
sz_tof = len(tof_array)
q_array = zeros(sz_tof)
for t in range(sz_tof):
_Q = _const * math.sin(theta) / (tof_array[t] * 1e-6)
q_array[t] = _Q * 1e-10
return q_array
def ref_beamdiv_correct(cpix, det_secondary,
pixel_index,
pixel_width = 0.0007,
first_slit_size = None,
last_slit_size = None):
"""
This function calculates the acceptance diagram, determines pixel overlap
and computes the offset to the scattering angle.
"""
# This is currently set to the same number for both REF_L and REF_M
epsilon = 0.5 * 1.3 * 1.0e-3
# Set the center pixel
if cpix is None:
cpix = 133.5
# first_slit_size = getSheight(mt, '1')
# last_slit_size = getSheight(mt,'2')
last_slit_dist = 0.654 #m
slit_dist = 0.885000050068 #m
first_slit_size = float(first_slit_size) * 0.001
last_slit_size = float(last_slit_size) * 0.001
_y = 0.5 * (first_slit_size + last_slit_size)
_x = slit_dist
gamma_plus = math.atan2(_y, _x)
_y = 0.5 * (first_slit_size - last_slit_size)
_x = slit_dist
gamma_minus = math.atan2(_y, _x)
half_last_aperture = 0.5 * last_slit_size
neg_half_last_aperture = -1.0 * half_last_aperture
last_slit_to_det = last_slit_dist + det_secondary
dist_last_aper_det_sin_gamma_plus = last_slit_to_det * math.sin(gamma_plus)
dist_last_aper_det_sin_gamma_minus = last_slit_to_det * math.sin(gamma_minus)
#set the delta theta coordinates of the acceptance polygon
accept_poly_x = []
accept_poly_x.append(-1.0 * gamma_minus)
accept_poly_x.append(gamma_plus)
accept_poly_x.append(gamma_plus)
accept_poly_x.append(gamma_minus)
accept_poly_x.append(-1.0 * gamma_plus)
accept_poly_x.append(-1.0 * gamma_plus)
accept_poly_x.append(accept_poly_x[0])
#set the z coordinates of the acceptance polygon
accept_poly_y = []
accept_poly_y.append(half_last_aperture - dist_last_aper_det_sin_gamma_minus + epsilon)
accept_poly_y.append(half_last_aperture + dist_last_aper_det_sin_gamma_plus + epsilon)
accept_poly_y.append(half_last_aperture + dist_last_aper_det_sin_gamma_plus - epsilon)
accept_poly_y.append(neg_half_last_aperture + dist_last_aper_det_sin_gamma_minus - epsilon)
accept_poly_y.append(neg_half_last_aperture - dist_last_aper_det_sin_gamma_plus - epsilon)
accept_poly_y.append(neg_half_last_aperture - dist_last_aper_det_sin_gamma_plus + epsilon)
accept_poly_y.append(accept_poly_y[0])
cur_index = pixel_index
#set the z band for the pixel
xMinus = (cur_index - cpix - 0.5) * pixel_width
xPlus = (cur_index - cpix + 0.5) * pixel_width
#calculate the intersection
yLeftCross = -1
yRightCross = -1
xI = accept_poly_x[0]
yI = accept_poly_y[0]
int_poly_x = []
int_poly_y = []
for i in range(len(accept_poly_x)):
xF = accept_poly_y[i]
yF = accept_poly_x[i]
if xI < xF:
if xI < xMinus and xF > xMinus:
yLeftCross = yI + (yF - yI) * (xMinus - xI) / (xF - xI)
int_poly_x.append(yLeftCross)
int_poly_y.append(xMinus)
if xI < xPlus and xF >= xPlus:
yRightCross = yI + (yF - yI) * (xPlus - xI) / (xF - xI)
int_poly_x.append(yRightCross)
int_poly_y.append(xPlus)
else:
if xF < xPlus and xI >= xPlus:
yRightCross = yI + (yF - yI) * (xPlus - xI) / (xF - xI)
int_poly_x.append(yRightCross)
int_poly_y.append(xPlus)
if xF < xMinus and xI >= xMinus:
yLeftCross = yI + (yF - yI) * (xMinus - xI) / (xF - xI)
int_poly_x.append(yLeftCross)
int_poly_y.append(xMinus)
#This catches points on the polygon inside the range of interest
if xF >= xMinus and xF < xPlus:
int_poly_x.append(yF)
int_poly_y.append(xF)
xI = xF
yI = yF
if len(int_poly_x) > 2:
int_poly_x.append(int_poly_x[0])
int_poly_y.append(int_poly_y[0])
int_poly_x.append(int_poly_x[1])
int_poly_y.append(int_poly_y[1])
else:
#Intersection polygon is null, point or line, so has no area
#therefore there is no angle corrction
return None
#Calculate intersection polygon area
area = calc_area_2D_polygon(int_poly_x,
int_poly_y,
len(int_poly_x) - 2)
center_of_mass = calc_center_of_mass(int_poly_x,
int_poly_y,
area)
return center_of_mass
def calc_area_2D_polygon(x_coord, y_coord, size_poly):
"""
Calculation of the area defined by the 2D polygon
"""
_range = arange(size_poly) + 1
area = 0
for i in _range:
area += (x_coord[i] * (y_coord[i + 1] - y_coord[i - 1]))
return area / 2.
def calc_center_of_mass(arr_x, arr_y, A):
"""
Function that calculates the center-of-mass for the given polygon
@param arr_x: The array of polygon x coordinates
@param arr_y: The array of polygon y coordinates
@param A: The signed area of the polygon
@return: The polygon center-of-mass
"""
center_of_mass = 0.0
SIXTH = 1. / 6.
for j in arange(len(arr_x) - 2):
center_of_mass += (arr_x[j] + arr_x[j + 1]) \
* ((arr_x[j] * arr_y[j + 1]) -
(arr_x[j + 1] * arr_y[j]))
if A != 0.0:
return (SIXTH * center_of_mass) / A
else:
return 0.0
def getFieldValue(table, row, column):
_tag_value = table[row][column]
_tag_value_split = _tag_value.split('=')
return _tag_value_split[1]
def isWithinPrecisionRange(value_file, value_run, precision):
diff = abs(float(value_file)) - abs(float(value_run))
if abs(diff) <= precision:
return True
else:
return False
def _applySFtoArray(workspace, a, b, a_error, b_error):
"""
This function will create for each x-axis value the corresponding
scaling factor using the formula y=a+bx and
"""
mt = mtd[workspace]
x_axis = mt.readX(0)[:]
sz = len(x_axis)
x_axis_factors = zeros(sz)
x_axis_factors_error = zeros(sz)
for i in range(sz):
_x_value = float(x_axis[i])
_factor = _x_value * b + a
x_axis_factors[i] = _factor
_factor_error = _x_value * b_error + a_error
x_axis_factors_error[i] = _factor_error
#create workspace
CreateWorkspace(OutputWorkspace='sfWorkspace',
DataX=x_axis,
DataY=x_axis_factors,
DataE=x_axis_factors_error,
Nspec=1,
UnitX="TOF")
Divide(workspace, 'sfWorkspace', workspace)
return workspace
def loadNeXus(runNumbers, type):
"""
will retrieve the data from the runNumbers specify and will
add them or just return the workspace created
"""
wks_name = ''
if type == 'data':
wks_name = 'ws_event_data'
else:
wks_name = 'ws_event_norm'
print('-> loading ', type)
if (type == 'data') and len(runNumbers) > 1:
_list = []
for _run in runNumbers:
_list.append(str(_run))
list_run = ','.join(_list)
print('--> working with runs:', str(list_run))
_index = 0
for _run in runNumbers:
# Find full path to event NeXus data file
try:
data_file = FileFinder.findRuns("REF_L%d" %_run)[0]
except RuntimeError:
msg = "RefLReduction: could not find run %d\n" % _run
msg += "Add your data folder to your User Data Directories in the File menu"
raise RuntimeError(msg)
if _index == 0:
ws_event_data = LoadEventNexus(Filename=data_file,OutputWorskpace=wks_name)
_index += 1
else:
tmp = LoadEventNexus(Filename=data_file)
Plus(LHSWorkspace=ws_event_data,
RHSWorkspace=tmp,
OutputWorkspace=wks_name)
DeleteWorkspace(tmp)
else:
print('--> Working with run: ' + str(runNumbers))
try:
data_file = FileFinder.findRuns("REF_L%d" %runNumbers)[0]
except RuntimeError:
msg = "RefLReduction: could not find run %d\n" %runNumbers[0]
msg += "Add your data folder to your User Data Directories in the File menu"
raise RuntimeError(msg)
ws_event_data = LoadEventNexus(Filename=data_file, OutputWorkspace=wks_name)
return ws_event_data
def rebinNeXus(inputWorkspace, params, type):
"""
will rebin the event workspace according to the params
params[0]: min value
params[1]: bin size
params[2]: max value
"""
print('--> rebin ', type)
ws_histo_data = Rebin(InputWorkspace=inputWorkspace,
Params=params,
PreserveEvents=True)
return ws_histo_data
def cropTOF(inputWorkspace, min, max, type):
"""
will crop the nexus (workspace) using min and max value
used here to crop the TOF range
"""
print('--> crop ' , type , ' workspace in TOF')
ws_histo_data = CropWorkspace(InputWorkspace = inputWorkspace,
XMin = min,
XMax = max)
return ws_histo_data
def normalizeNeXus(inputWorkspace, type):
"""
normalize nexus by proton charge
"""
print('--> normalize ', type)
ws_histo_data = NormaliseByCurrent(InputWorkspace=inputWorkspace)
return ws_histo_data
def integrateOverLowResRange(mt1,
dataLowResRange,
type,
is_nexus_detector_rotated_flag):
"""
This creates the integrated workspace over the low resolution range leaving
us with a [256,nbr TOF] workspace
returns the new workspace handle
BUT this algorithm also makes sure that the error value is 1 when counts
is 0 !
"""
print('--> integrated over low res range of ', type)
_tof_axis = mt1.readX(0)[:].copy()
# t_range = arange(nbr_tof-1)
# -1 to work with index directly
fromXpixel = min(dataLowResRange) - 1
toXpixel = max(dataLowResRange) - 1
if is_nexus_detector_rotated_flag:
sz_y_axis = 304
else:
sz_y_axis = 256
_y_axis = zeros((sz_y_axis, len(_tof_axis) - 1))
_y_error_axis = zeros((sz_y_axis, len(_tof_axis) - 1))
x_size = toXpixel - fromXpixel + 1
x_range = arange(x_size) + fromXpixel
y_range = arange(sz_y_axis)
for x in x_range:
for y in y_range:
_index = int((sz_y_axis) * x + y)
_y_axis[y, :] += mt1.readY(_index)[:].copy()
_tmp_error_axis = mt1.readE(_index)[:].copy()
# 0 -> 1
# index_where_0 = where(_tmp_error_axis == 0)
# _tmp_error_axis[index_where_0] = 1
_y_error_axis[y, :] += _tmp_error_axis * _tmp_error_axis
# _y_error_axis[y, :] += ((mt1.readE(_index)[:]) * (mt1.readE(_index)[:]))
_y_error_axis = sqrt(_y_error_axis)
return [_tof_axis, _y_axis, _y_error_axis]
def substractBackground(tof_axis, y_axis, y_error_axis,
peakRange, backFlag, backRange,
error_0, type):
"""
shape of y_axis : [sz_y_axis, nbr_tof]
This routine will calculate the background, remove it from the peak
and will return only the range of peak -> [peak_size, nbr_tof]
"""
# give a friendly name to peak and back ranges
# -1 because we are working with 0 index arrays
peakMin = peakRange[0]-1
peakMax = peakRange[1]-1
backMin = backRange[0]-1
backMax = backRange[1]-1
if not backFlag:
print('---> no ', type, ' background requested!')
return [y_axis[peakMin:peakMax+1,:], y_error_axis[peakMin:peakMax+1,:]]
print('--> background subtraction of ', type)
# retrieve data
_tofAxis = tof_axis
nbrTof = len(_tofAxis)
# size peak
szPeak = peakMax - peakMin + 1
# init arrays
final_y_axis = zeros((szPeak, nbrTof))
final_y_error_axis = zeros((szPeak, nbrTof))
# final_y_axis = empty((szPeak, nbrTof))
# final_y_error_axis = empty((szPeak, nbrTof))
# final_y_axis[:] = NAN
# final_y_error_axis[:] = NAN
for t in range(nbrTof):
# by default, no space for background subtraction below and above peak
bMinBack = False
bMaxBack = False
if backMin < (peakMin):
bMinBack = True
_backMinArray = y_axis[backMin:peakMin, t]
_backMinErrorArray = y_error_axis[backMin:peakMin, t]
[_backMin, _backMinError] = weightedMean(_backMinArray,
_backMinErrorArray, error_0)
if (peakMax) < backMax:
bMaxBack = True
_backMaxArray = y_axis[peakMax+1:backMax+1, t]
_backMaxErrorArray = y_error_axis[peakMax+1:backMax+1, t]
[_backMax, _backMaxError] = weightedMean(_backMaxArray, _backMaxErrorArray, error_0)
# if no max background use min background
if not bMaxBack:
background = _backMin
background_error = _backMinError
# if no min background use max background
if not bMinBack:
background = _backMax
background_error = _backMaxError
if bMinBack and bMaxBack:
[background, background_error] = weightedMean([_backMin, _backMax], [_backMinError, _backMaxError], error_0)
# remove background for each pixel of the peak
for x in range(szPeak):
final_y_axis[x,t] = float(y_axis[peakMin + x,t]) - float(background)
final_y_error_axis[x,t] = float(math.sqrt(pow(y_error_axis[peakMin+x,t],2) + pow(background_error,2)))
# if t == nbrTof-2:
# print(float(y_axis[peakMin + x,t]) - float(background))
return [final_y_axis, final_y_error_axis]
def weightedMean(data_array, error_array, error_0):
sz = len(data_array)
# calculate the numerator of mean
dataNum = 0
for i in range(sz):
if error_array[i] == 0:
error_array[i] = error_0
tmpFactor = float(data_array[i]) / float((pow(error_array[i],2)))
dataNum += tmpFactor
# calculate denominator
dataDen = 0
for i in range(sz):
if error_array[i] == 0:
error_array[i] = error_0
tmpFactor = 1./float((pow(error_array[i],2)))
dataDen += tmpFactor
if dataDen == 0:
data_mean = NAN
mean_error = NAN
else:
data_mean = float(dataNum) / float(dataDen)
mean_error = math.sqrt(1/dataDen)
return [data_mean, mean_error]
def weightedMeanOfRange(norm_y_axis, norm_y_error_axis):
"""
will calculate the weighted Mean of the region given
"""
# get nbr tof
dim = norm_y_axis.shape
nbr_tof = dim[1]
final_array = zeros(nbr_tof)
final_array_error = zeros(nbr_tof)
for t in range(nbr_tof):
_tmp_range = norm_y_axis[:, t]
_tmp_range_error = norm_y_error_axis[:,t]
[_mean,_mean_error] = weightedMean(_tmp_range, _tmp_range_error)
final_array[t] = _mean
final_array_error[t] = _mean_error
return [final_array, final_array_error]
def meanOfRange(norm_y_axis, norm_y_error_axis):
"""
will calculate the mean of range
"""
# get nbr tof
dim = norm_y_axis.shape
nbr_tof = dim[1]
final_array = zeros(nbr_tof)
final_array_error = zeros(nbr_tof)
for t in range(nbr_tof):
_tmp_range = norm_y_axis[:,t]
_tmp_range_error = norm_y_error_axis[:,t]
[_mean,_mean_error] = myMean(_tmp_range, _tmp_range_error)
final_array[t] = _mean
final_array_error[t] = _mean_error
return [final_array, final_array_error]
def myMean(data_array, error_array):
sz=size(data_array)
_mean = mean(data_array)
_mean_error = sqrt(sum(_mean*_mean))/float(sz[0])
return [_mean, _mean_error]
def divideDataByNormalization(data_y_axis,
data_y_error_axis,
av_norm,
av_norm_error):
print('-> divide data by normalization')
data_size = data_y_axis.shape
nbr_pixel = data_size[0]
nbr_tof = data_size[1]
new_data_y_axis = zeros((nbr_pixel, nbr_tof))
new_data_y_error_axis = zeros((nbr_pixel, nbr_tof))
for t in range(nbr_tof):
for x in range(nbr_pixel):
if (av_norm[t] != 0) and (data_y_axis[x, t] != 0):
tmp_value = float(data_y_axis[x,t]) / float(av_norm[t])
tmp_error_1 = pow(float(data_y_error_axis[x,t]) / float(data_y_axis[x,t]),2)
tmp_error_2 = pow(float(av_norm_error[t]) / float(av_norm[t]),2)
tmp_error = sqrt(tmp_error_1 + tmp_error_2) * abs(float(data_y_axis[x,t]) / float(av_norm[t]))
new_data_y_axis[x,t] = tmp_value
new_data_y_error_axis[x,t] = tmp_error
return [new_data_y_axis, new_data_y_error_axis]
def sumWithError(value, error):
""" will sume the array of values and will return the sum and the
error that goes with it
"""
sum_value = sum(value)
tmp_sum_error = 0
for i in range(len(value)):
tmp_value = pow(error[i],2)
tmp_sum_error += tmp_value
sum_error = math.sqrt(tmp_sum_error)
return [sum_value, sum_error]
def integratedOverPixelDim(data_y_axis, data_y_error_axis):
size = data_y_axis.shape
nbr_tof = size[1]
final_data = zeros(nbr_tof)
final_data_error = zeros(nbr_tof)
for t in range(nbr_tof):
[data, error] = sumWithError(data_y_axis[:,t], data_y_error_axis[:,t])
final_data[t] = data
final_data_error[t] = error
return [final_data, final_data_error]
def fullSumWithError(data_y_axis, data_y_error_axis):
size = data_y_axis.shape
nbr_tof = size[1]
final_data = zeros(nbr_tof)
final_data_error = zeros(nbr_tof)
# final_data = empty(nbr_tof)
# final_data_error = empty(nbr_tof)
# final_data[:] = NAN
# final_data_error[:] = NAN
for t in range(nbr_tof):
[data, error] = sumWithError(data_y_axis[:,t], data_y_error_axis[:,t])
final_data[t] = data
final_data_error[t] = error
return [final_data, final_data_error]
def ouput_ascii_file(file_name,
x_axis,
y_axis,
y_error_axis):
f=open(file_name,'w')
sz_x_axis = len(x_axis)
for i in range(sz_x_axis-1):
f.write(str(x_axis[i]) + "," + str(y_axis[i]) + "," + str(y_error_axis[i]) + "\n")
f.close()
def ouput_big_ascii_file(file_name,
x_axis,
y_axis,
y_error_axis):
f=open(file_name,'w')
sz = y_axis.shape # (nbr_pixel, nbr_tof)
nbr_tof = sz[1]
nbr_pixel = sz[0]
for t in range(nbr_tof):
_tmp_str = str(x_axis[t])
for x in range(nbr_pixel):
_tmp_str += ' ,' + str(y_axis[x,t]) + " ," + str(y_error_axis[x,t])
_tmp_str += '\n'
f.write(_tmp_str)
f.close()
def ouput_big_Q_ascii_file(file_name,
x_axis,
y_axis,
y_error_axis):
f=open(file_name,'w')
sz = y_axis.shape # (nbr_pixel, nbr_tof)
nbr_tof = sz[1]
nbr_pixel = sz[0]
for t in range(nbr_tof):
_tmp_str = ''
for x in range(nbr_pixel):
_tmp_str += str(x_axis[x,t]) + ',' + str(y_axis[x,t]) + " ," + str(y_error_axis[x,t]) + ',,'
_tmp_str += '\n'
f.write(_tmp_str)
f.close()
def divideData1DbyNormalization(inte_data_y_axis,
inte_data_y_error_axis,
av_norm,
av_norm_error):
print('-> divide data by normalization')
nbrPixel = inte_data_y_axis.shape
final_data = zeros(nbrPixel)
final_data_error = zeros(nbrPixel)
for x in range(nbrPixel[0]):
if av_norm[x] != 0:
final_data[x] = inte_data_y_axis[x] / av_norm[x]
tmp1 = pow(float(inte_data_y_error_axis[x]) / float(inte_data_y_axis[x]),2)
tmp2 = pow(float(av_norm_error[x]) / float(av_norm[x]),2)
tmp_error = sqrt(tmp1 + tmp2) * (float(inte_data_y_axis[x] / av_norm[x]))
final_data_error[x] = tmp_error
return [final_data, final_data_error]
def applyScalingFactor(tof_axis,
y_data,
y_data_error,
incident_medium,
sf_file,
valuePrecision,
slitsWidthFlag):
""""
function that apply scaling factor to data using sfCalculator.txt
file created by the sfCalculator procedure
"""
isSFfound = False
#sf_file = 'NaN'
if os.path.isfile(sf_file):
print('-> scaling factor file FOUND! (', sf_file, ')')
#parse file and put info into array
f = open(sf_file, 'r')
sfFactorTable = []
for line in f.read().split('\n'):
if len(line) > 0 and line[0] != '#':
sfFactorTable.append(line.split(' '))
f.close()
sz_table = shape(sfFactorTable)
nbr_row = sz_table[0]
_incidentMedium = incident_medium.strip()
_lr = getLambdaValue('ws_event_data')
_lr_value = _lr[0]
_lr_value = float("{0:.2f}".format(_lr_value))
#retrieve s1h and s2h or sih values
s1h = getS1h(mtd['ws_event_data'])
[isSih, s2h] = getS2h(mtd['ws_event_data'])
s1h_value = abs(s1h)
s2h_value = abs(s2h)
#retrieve s1w and s2w values
s1w = getS1w(mtd['ws_event_data'])
[isSiw, s2w] = getS2w(mtd['ws_event_data'])
s1w_value = abs(s1w)
s2w_value = abs(s2w)
print('--> Data Lambda Requested: {0:2f}'.format(_lr_value))
print('--> Data S1H: {0:2f}'.format(s1h_value))
if isSih:
print('--> Data SiH: {0:2f}'.format(s2h_value))
else:
print('--> Data S2H: {0:2f}'.format(s2h_value))
print('--> Data S1W: {0:2f}'.format(s1w_value))
if isSiw:
print('--> Data SiW: {0:2f}'.format(s2w_value))
else:
print('--> Data S2W: {0:2f}'.format(s2w_value))
for i in range(nbr_row):
_file_incidentMedium = getFieldValue(sfFactorTable,i,0)
if _file_incidentMedium.strip() == _incidentMedium.strip():
print('*** incident medium match ***')
_file_lambdaRequested = getFieldValue(sfFactorTable,i,1)
if (isWithinPrecisionRange(_file_lambdaRequested,
_lr_value,
valuePrecision)):
print('*** lambda requested match ***')
_file_s1h = getFieldValue(sfFactorTable,i,2)
if(isWithinPrecisionRange(_file_s1h,
s1h_value,
valuePrecision)):
print('*** s1h match ***')
_file_s2h = getFieldValue(sfFactorTable,i,3)
if(isWithinPrecisionRange(_file_s2h,
s2h_value,
valuePrecision)):
print('*** s2h match ***')
if slitsWidthFlag:
print('*** (with slits width flag) ***')
_file_s1w = getFieldValue(sfFactorTable,i,4)
if(isWithinPrecisionRange(_file_s1w,
s1w_value,
valuePrecision)):
print('*** s1w match ***')
_file_s2w = getFieldValue(sfFactorTable,i,5)
if(isWithinPrecisionRange(_file_s2w,
s2w_value,
valuePrecision)):
print('*** s2w match ***')
print('--> Found a perfect match')
a = float(getFieldValue(sfFactorTable,i,6))
b = float(getFieldValue(sfFactorTable,i,7))
a_error = float(getFieldValue(sfFactorTable,i,8))
b_error = float(getFieldValue(sfFactorTable,i,9))
[y_data, y_data_error] = applyScalingFactorToArray(tof_axis,
y_data,
y_data_error,
a, b,
a_error, b_error)
return [tof_axis, y_data, y_data_error, True]
else:
print('--> Found a perfect match')
a = float(getFieldValue(sfFactorTable,i,6))
b = float(getFieldValue(sfFactorTable,i,7))
a_error = float(getFieldValue(sfFactorTable,i,8))
b_error = float(getFieldValue(sfFactorTable,i,9))
[y_data, y_data_error] = applyScalingFactorToArray(tof_axis,
y_data,
y_data_error,
a, b,
a_error, b_error)
isSFfound = True
return [tof_axis, y_data, y_data_error, isSFfound]
else:
print('-> scaling factor file for requested lambda NOT FOUND!')
return [tof_axis, y_data, y_data_error]
def applyScalingFactorToArray(tof_axis, y_data, y_data_error, a, b, a_error, b_error):
"""
This function will create for each x-axis value the corresponding
scaling factor using the formula y=a+bx and
"""
x_axis = tof_axis
nbr_tof = len(x_axis)-1
x_axis_factors = zeros(nbr_tof)
x_axis_factors_error = zeros(nbr_tof)
# x_axis_factors = empty(nbr_tof)
# x_axis_factors_error = empty(nbr_tof)
# x_axis_factors[:] = NAN
# x_axis_factors_error[:] = NAN
for i in range(nbr_tof):
_x_value = float(x_axis[i])
_factor = _x_value * b + a
x_axis_factors[i] = _factor
_factor_error = _x_value * b_error + a_error
x_axis_factors_error[i] = _factor_error
sz = y_data.shape
nbr_pixel = sz[0]
final_y_data = zeros((nbr_pixel, nbr_tof))
final_y_data_error = zeros((nbr_pixel, nbr_tof))
# final_y_data = empty((nbr_pixel, nbr_tof))
# final_y_data_error = empty((nbr_pixel, nbr_tof))
# final_y_data[:] = NAN
# final_y_data_error[:] = NAN
for x in range(nbr_pixel):
[ratio_array, ratio_array_error] = divideArrays(y_data[x,:],
y_data_error[x,:],
x_axis_factors,
x_axis_factors_error)
final_y_data[x,:] = ratio_array[:]
final_y_data_error[x,:] = ratio_array_error
return [final_y_data, final_y_data_error]
def divideArrays(num_array, num_error_array, den_array, den_error_array):
"""
This function calculates the ratio of two arrays and calculate the
respective error values
"""
sz = num_array.shape
nbr_elements = sz[0]
# calculate the ratio array
ratio_array = zeros(nbr_elements)
for i in range(nbr_elements):
if den_array[i] is 0:
_tmp_ratio = 0
else:
_tmp_ratio = num_array[i] / den_array[i]
ratio_array[i] = _tmp_ratio
# calculate the error of the ratio array
ratio_error_array = zeros(nbr_elements)
for i in range(nbr_elements):
if (num_array[i] == 0) or (den_array[i] == 0):
ratio_error_array[i] = 0
else:
tmp1 = pow(num_error_array[i] / num_array[i],2)
tmp2 = pow(den_error_array[i] / den_array[i],2)
ratio_error_array[i] = sqrt(tmp1+tmp2)*(num_array[i]/den_array[i])
return [ratio_array, ratio_error_array]
def getCentralPixel(ws_event_data, dataPeakRange, is_new_geometry):
"""
This function will calculate the central pixel position
"""
if is_new_geometry:
_maxX = 256
_maxY = 304
else:
_maxX = 304
_maxY = 256
pixelXtof_data = getPixelXTOF(ws_event_data, maxX=_maxX, maxY=_maxY)
pixelXtof_1d = pixelXtof_data.sum(axis=1)
# Keep only range of pixels
pixelXtof_roi = pixelXtof_1d[dataPeakRange[0]:dataPeakRange[1]]
sz = pixelXtof_roi.size
_num = 0
_den = 0
start_pixel = dataPeakRange[0]
for i in range(sz):
_num += (start_pixel * pixelXtof_roi[i])
start_pixel = start_pixel + 1
_den += pixelXtof_roi[i]
data_cpix = _num / _den
print('--> central pixel is {0:.1f}'.format(data_cpix))
return data_cpix
def getDistances(ws_event_data):
"""
calculates the distance between the moderator and the detector (dMD)
and the distance between the sample and the detector
"""
print('--> calculating dMD (moderator-detector) and dSD (sample-detector)')
sample = ws_event_data.getInstrument().getSample()
source = ws_event_data.getInstrument().getSource()
dSM = sample.getDistance(source)
# Create array of distances pixel->sample
dPS_array = zeros((256, 304))
for x in range(304):
for y in range(256):
_index = 256 * x + y
detector = ws_event_data.getDetector(_index)
dPS_array[y, x] = sample.getDistance(detector)
# Distance sample->center of detector
dSD = dPS_array[256//2,304//2]
# Distance source->center of detector
dMD = dSD + dSM
return [dMD, dSD]
def getTheta(ws_event_data, angleOffsetDeg):
"""
will calculate the theta angle offset
"""
print('--> retrieving thi and tthd')
mt_run = ws_event_data.getRun()
thi_value = mt_run.getProperty('thi').value[0]
thi_units = mt_run.getProperty('thi').units
tthd_value = mt_run.getProperty('tthd').value[0]
tthd_units = mt_run.getProperty('tthd').units
thi_rad = angleUnitConversion(value=thi_value,
from_units=thi_units,
to_units='rad')
print('---> thi (rad): ', thi_rad)
tthd_rad = angleUnitConversion(value=tthd_value,
from_units=tthd_units,
to_units='rad')
print('---> tthd (rad): ', tthd_rad)
theta = math.fabs(tthd_rad - thi_rad)/2.
angleOffsetRad = (angleOffsetDeg * math.pi) / 180.
theta += angleOffsetRad
print('---> theta (rad): ', theta)
return theta
def getSlitsSize(mt):
print('---> retrieving slits size')
first_slit_size = getSheight(mt, '1')
last_slit_size = getSheight(mt,'2')
print('----> first_slit_size: ' , first_slit_size)
print('----> last_slit_size: ' , last_slit_size)
return [first_slit_size, last_slit_size]
def getQrange(ws_histo_data, theta, dMD, q_min, q_step):
"""
will determine the true q axis according to the qMin and qStep specified
and the geometry of the instrument
"""
print('---> calculating Qrange')
_tof_axis = ws_histo_data.readX(0)
_const = float(4) * math.pi * m * dMD / h
sz_tof = shape(_tof_axis)[0]
_q_axis = zeros(sz_tof-1)
for t in range(sz_tof-1):
tof1 = _tof_axis[t]
tof2 = _tof_axis[t+1]
tofm = (tof1+tof2)/2.
_Q = _const * math.sin(theta) / (tofm*1e-6)
_q_axis[t] = _Q*1e-10
q_max = max(_q_axis)
if q_min >= q_max:
q_min = min(_q_axis)
print('----> q_min: ', q_min)
print('----> q_step: ', q_step)
print('----> q_max: ', q_max)
return [q_min, q_step, q_max]
def convertToQ(tof_axis,
y_axis,
y_error_axis,
peak_range = None,
central_pixel = None,
source_to_detector_distance = None,
sample_to_detector_distance = None,
theta = None,
first_slit_size = None,
last_slit_size = None):
"""
will convert the tof_axis into q_axis according to q range specified
"""
y_size = (peak_range[1] - peak_range[0] + 1)
y_range = arange(y_size) + peak_range[0]
_q_axis = getQaxis(source_to_detector_distance,
sample_to_detector_distance,
theta,
tof_axis,
y_range,
central_pixel,
first_slit_size,
last_slit_size)
_q_axis_min_max_index = findQaxisMinMax(_q_axis)
# now we need to put the various counts from y_axis into the right
# boxes
_y_axis = zeros((y_size, len(tof_axis)-1))
_y_error_axis = zeros((y_size, len(tof_axis)-1))
# _y_axis = empty((y_size, len(tof_axis)-1))
# _y_error_axis = empty((y_size, len(tof_axis)-1))
# _y_axis[:] = NAN
# _y_error_axis[:] = NAN
# now determine the _y_axis and _y_error_axis
for _y_index in range(y_size):
# get the q_axis of the given peak pixel
_tmp_q_axis = _q_axis[_y_index]
_q_axis = _tmp_q_axis[::-1] #reverse the axis (now in increasing order)
_y_axis_tmp = y_axis[_y_index,:]
_y_error_axis_tmp = y_error_axis[_y_index,:]
# keep only the overlap region of Qs
_q_min = _q_axis_min_max_index[_y_index, 0]
if _q_min != 0:
_y_axis_tmp[0:_q_min] = 0
_y_error_axis_tmp[0:_q_min] = 0
_q_max = int(_q_axis_min_max_index[_y_index, 1])
sz = shape(_y_axis_tmp)[0]
if _q_max != sz:
_index_q_max_range = arange(sz - _q_max) + _q_max
for i in _index_q_max_range:
_y_axis_tmp[i] = 0
_y_error_axis_tmp[i] = 0
_y_axis[_y_index, :] = _y_axis_tmp[::-1]
_y_error_axis[_y_index, :] = _y_error_axis_tmp[::-1]
# reverse the _q_axis here as well
q_axis_reverse = reverseQAxis(_q_axis)
return [q_axis_reverse, _y_axis, _y_error_axis]
def convertToQWithoutCorrection(tof_axis,
y_axis,
y_error_axis,
peak_range = None,
source_to_detector_distance = None,
sample_to_detector_distance = None,
theta = None,
first_slit_size = None,
last_slit_size = None):
"""
will convert the tof_axis into q_axis according to q range specified
but without using any geometry correction
"""
_const = float(4) * math.pi * m * source_to_detector_distance / h
_q_axis = 1e-10 * _const * math.sin(theta) / (tof_axis[0:-1] * 1e-6)
sz = y_axis.shape
nbr_pixel = sz[0]
sz_q_axis = _q_axis.shape
nbr_q = sz_q_axis[0]
q_axis_2d = zeros((nbr_pixel, nbr_q))
for p in range(nbr_pixel):
q_axis_2d[p,:] = _q_axis
q_axis_reverse = reverseQAxis(q_axis_2d)
y_axis_reverse = fliplr(y_axis)
y_error_axis_reverse = fliplr(y_error_axis)
return [q_axis_reverse, y_axis_reverse, y_error_axis_reverse]
def reverseQAxis(q_axis):
"""
will reverse each q_axis for the respective pixels
"""
new_q_axis = fliplr(q_axis)
return new_q_axis
def getQaxis(dMD, dSD, theta,
tof_axis, y_range, central_pixel,
first_slit_size,
last_slit_size):
"""
This function converts the pixel/TOF array to the R(Q) array
using Q = (4.Pi.Mn)/h * L.sin(theta/2)/TOF
with L: distance central_pixel->source
TOF: TOF of pixel
theta: angle of detector
"""
_const = float(4) * math.pi * m * dMD / h
sz_tof = len(tof_axis)
q_array = zeros((len(y_range), sz_tof))
for y in range(len(y_range)):
_px = y_range[y]
dangle = ref_beamdiv_correct(central_pixel,
dSD,
_px,
0.0007,
first_slit_size,
last_slit_size)
if dangle is not None:
_theta = theta + dangle
else:
_theta = theta
for t in range(sz_tof):
# tof1 = tof_axis[t]
# tof2 = tof_axis[t+1]
# tofm = (tof1+tof2)/2.
tof = tof_axis[t]
# _Q = _const * math.sin(_theta) / (tofm*1e-6)
_Q = _const * math.sin(_theta) / (tof*1e-6)
q_array[y, t] = _Q * 1e-10
return q_array
def integrateOverPeakRange(wks, dataPeakRange):
"""
getting just the mean of the peak
"""
final_x_axis = wks.readX(0)[:]
sz = final_x_axis.shape
nbr_q = sz[0]
# make temp big array
nbrPixel = dataPeakRange[1] - dataPeakRange[0] + 1
bigY = zeros((nbrPixel, nbr_q))
bigE = zeros((nbrPixel, nbr_q))
# bigY = empty((nbrPixel, nbr_q))
# bigE = empty((nbrPixel, nbr_q))
# bigY[:]= NAN
# bigE[:]= NAN
for x in range(nbrPixel):
_tmp_y = wks.readY(x)[:]
bigY[x,:] = _tmp_y
_tmp_e = wks.readE(x)[:]
bigE[x,:] = _tmp_e
final_y_axis = zeros(nbr_q)
final_y_error_axis = zeros(nbr_q)
#
# final_y_axis = empty(nbr_q)
# final_y_error_axis = empty(nbr_q)
# final_y_axis[:] = NAN
# final_y_error_axis[:] = NAN
# range(nbr_q -2) + 1 to get rid of first and last q values (edge effect)
rangeOfQ = arange(nbr_q-1)
# for q in rangeOfQ[1:-1]:
for q in rangeOfQ:
_tmp_y = bigY[:,q]
_tmp_y_error = bigE[:,q]
# [_y, _y_error] = myMean(_tmp_y, _tmp_y_error)
[_y, _y_error] = sumWithError(_tmp_y, _tmp_y_error)
final_y_axis[q] = _y
final_y_error_axis[q] = _y_error
return [final_x_axis, final_y_axis, final_y_error_axis]
def createQworkspace(q_axis, y_axis, y_error_axis):
sz = q_axis.shape
nbr_pixel = sz[0]
q_axis_1d = q_axis.flatten()
y_axis_1d = y_axis.flatten()
y_error_axis_1d = y_error_axis.flatten()
q_workspace = CreateWorkspace(DataX=q_axis_1d,
DataY=y_axis_1d,
DataE=y_error_axis_1d,
Nspec=nbr_pixel,
UnitX="Wavelength")
q_workspace.setDistribution(True)
return q_workspace
def createFinalWorkspace(q_axis, final_y_axis, final_error_axis, name_output_ws, parent_workspace):
final_workspace = CreateWorkspace(OutputWorkspace=name_output_ws,
DataX=q_axis,
DataY=final_y_axis,
DataE=final_error_axis,
Nspec=1,
UnitX="Wavelength",
ParentWorkspace=parent_workspace)
final_workspace.setDistribution(True)
return final_workspace
def cropAxisToOnlyNonzeroElements(q_rebin, dataPeakRange):
"""
This function will only keep the range of Q that have only nonzero counts
"""
nbrPixel = dataPeakRange[1] - dataPeakRange[0] + 1
x_axis = q_rebin.readX(0)[:]
sz = x_axis.shape[0]-1
index_first_non_zero_value = sz
index_last_non_zero_value = 0
for x in range(nbrPixel):
_pixel_axis = q_rebin.readY(x)[:]
for t in range(sz):
_value = _pixel_axis[t]
if _value != float(0):
if index_first_non_zero_value > t:
index_first_non_zero_value = t
break
for t in range(sz-1,-1,-1):
_value = _pixel_axis[t]
if _value != float(0):
if index_last_non_zero_value < t:
index_last_non_zero_value = t
break
# crop data
new_x_axis = x_axis[index_first_non_zero_value:index_last_non_zero_value+1]
new_xrange = index_last_non_zero_value - index_first_non_zero_value + 1
new_y_axis = zeros((nbrPixel, new_xrange))
new_y_error_axis = zeros((nbrPixel, new_xrange))
# new_y_axis = empty((nbrPixel, new_xrange))
# new_y_error_axis = empty((nbrPixel, new_xrange))
# new_y_axis[:] = NAN
# new_y_error_axis[:] = NAN
for x in range(nbrPixel):
_tmp = q_rebin.readY(x)[:]
_tmp_E = q_rebin.readE(x)[:]
new_y_axis[x,:] = _tmp[index_first_non_zero_value:index_last_non_zero_value+1]
new_y_error_axis[x,:] = _tmp_E[index_first_non_zero_value:index_last_non_zero_value+1]
new_y_axis = new_y_axis.flatten()
new_y_error_axis = new_y_error_axis.flatten()
new_x_axis = asfarray(new_x_axis)
new_y_axis = asfarray(new_y_axis)
new_y_error_axis = asfarray(new_y_error_axis)
nonzero_q_rebin_wks = CreateWorkspace(DataX=new_x_axis,
DataY=new_y_axis,
DataE=new_y_error_axis,
Nspec=int(nbrPixel),
UnitX="Wavelength")
return nonzero_q_rebin_wks
def cleanupData(final_data_y_axis, final_data_y_error_axis):
sz = final_data_y_axis.shape
nbrPixel = sz[0]
nbrQ = sz[1]
for x in range(nbrPixel):
for q in range(nbrQ):
_data = final_data_y_axis[x,q]
_error = final_data_y_error_axis[x,q]
# if error is > value, remove point
if _error >= _data:
_data = 0
_error = 1
# if value is below 10^-12
if _data < 1e-12:
_data = 0
_error = 1
final_data_y_axis[x,q] = _data
final_data_y_error_axis[x,q] = _error
return [final_data_y_axis, final_data_y_error_axis]
def cleanupData1D(final_data_y_axis, final_data_y_error_axis):
sz = final_data_y_axis.shape
nbrTof = sz[0]
notYetRemoved = True
for t in range(nbrTof):
_data = final_data_y_axis[t]
_error = final_data_y_error_axis[t]
if _data > 0 and notYetRemoved:
notYetRemoved = False
final_data_y_axis[t] = 0
final_data_y_error_axis[t] = 1
continue
# if error is > value, remove point
if abs(_error) >= abs(_data):
_data_tmp = 0
_error_tmp = 1
elif _data< 1e-12:
# if value is below 10^-12
_data_tmp = 0
_error_tmp = 1
else:
_data_tmp = _data
_error_tmp = _error
final_data_y_axis[t] = _data_tmp
final_data_y_error_axis[t] = _error_tmp
# print('final_data_y_axis[t]: ' , _data_tmp , ' final_data_y_error_axis[t]: ' , _error_tmp)
return [final_data_y_axis, final_data_y_error_axis]
def isNexusTakeAfterRefDate(nexus_date):
'''
This function parses the output.date and returns true if this date is after the ref date
'''
nexus_date_acquistion = nexus_date.split('T')[0]
if nexus_date_acquistion > ref_date:
return True
else:
return False
|
gpl-3.0
|
davelab6/nototools
|
nototools/extract_ohchr_attributions.py
|
4
|
8886
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract attribution data from the ohchr UDHR site."""
# This tool generates a .tsv file of attribution data based on information at the ohchr
# site, but first you have to manually extract that data from the html on the site, as
# there's no convenient way to get it. This block of comments describes the process.
#
# The idea is to find out which data on the ohchr site is 'official United Nations' data
# and which is not. The data itself doesn't say, so we need to look at the attributions
# listed on the ohchr.org site.
#
# Note that the data we actually use is not directly from ohchr.org, but from
# www.unicode.org/udhr. That site has cleaned up the data a little and converted it to
# xml format. We are assuming that any data with a matching language code shares the
# original attribution, but we could be wrong. The unicode.org site does not have the
# attribution data in any kind of organized form. Instead, they put a comment at the top
# of each document giving copyright to "The Office of the High Commisioner for Human
# Rights."
#
# Unfortunately, the data at www.ohchr.org is not readily available. At
# http://www.ohchr.org/EN/UDHR/Pages/SearchByLang.aspx you can page through the data using
# the dropdown under 'Search by Translation', but there's no visible url for a single page
# or for the data as a whole.
#
# If you try to view each page and then 'save as...', chrome fetches the url for the page
# it is showing, which returns the first (default) page no matter what data you are
# actually viewing. 'View as source' works, but it provides a formatted page, and if you
# choose 'save as...' from there, you get the source for that formatted page, not the raw
# source. The only way to get the source is to select and copy it from the source view
# into another document.
#
# At this point it makes more sense to just grab the portion of the data we can use
# instead of the whole file. So the process is to use the dropdown to show one of the
# pages of translations and then choose view source for it. Copy the contents of the
# <table> tag that lists the languages and sources into a stub html file. Repeat this for
# each of the six dropdown pages. The stub contains a single table element with the id
# 'ohchr_alldata', after this the table contains the data from all six ohchr pages.
#
# This data is still odd, in particular it nests <tr> and <td> tags. Fortunately
# HTMLParser doesn't care, and we don't need to care. The three pieces of data are the
# 'ohchr code', the 'language name', and the 'source'. The ohchr code is how they link to
# the page for the translation, mostly it is a three-letter language code but sometimes it
# is just whatever their server uses. The 'language name' is more or less an English
# translation of the language, sometimes with notes on script or region or the native name
# of the language, and the attribution is a string. The data is structured so that the
# ohchr code is part of an anchor tag that wraps the language string, and the source is
# part of a span in the following td. There are no other anchor tags or spans in the
# data, so we can just look for these. Separating each set is a close tr tag, so we can
# emit the data then.
#
# The output is a list of records with tab-separated fields: ohchr_code, lang_name, and
# source_name. The udhr index at unicode.org references the 'ohchr' code, so this is how
# we tie the attributions to the data from unicode.org.
import argparse
import codecs
import HTMLParser as html
import re
from nototools import tool_utils
class ParseOhchr(html.HTMLParser):
def __init__(self, trace=False):
html.HTMLParser.__init__(self)
self.trace = trace
self.result_list = []
self.restart()
def restart(self):
self.margin = ''
self.state = 'before_table'
self.tag_stack = []
self.collect_lang = False
self.collect_source = False
self.ohchr_code = ''
self.lang_name = ''
self.source_name = ''
def results(self):
return self.result_list
def indent(self):
self.margin += ' '
def outdent(self):
if not self.margin:
print '*** cannot outdent ***'
else:
self.margin = self.margin[:-2]
def get_attr(self, attr_list, attr_id):
for t in attr_list:
if t[0] == attr_id:
return t[1]
return None
def handle_starttag(self, tag, attrs):
if tag not in ['link', 'meta', 'area', 'img', 'br']:
if self.trace:
print self.margin + tag + '>'
self.tag_stack.append((tag, self.getpos()))
self.indent()
elif self.trace:
print self.margin + tag
if self.state == 'before_table' and tag == 'table':
table_id = self.get_attr(attrs, 'id')
if table_id == 'ohchr_alldata':
self.state = 'in_table'
elif self.state == 'in_table':
if tag == 'tr':
self.ohchr_code = ''
self.lang_name = ''
self.source_name = ''
elif tag == 'a':
a_id = self.get_attr(attrs, 'id')
if a_id and a_id.endswith('_hpLangTitleID'):
ohchr_code = self.get_attr(attrs, 'href')
ix = ohchr_code.rfind('=')
self.ohchr_code = ohchr_code[ix+1:]
self.collect_lang = True
elif tag == 'span':
span_id = self.get_attr(attrs, 'id')
if span_id and span_id.endswith('_lblSourceID'):
self.collect_source = True
elif tag == 'td':
self.collect_lang = False
self.collect_source = False
def handle_endtag(self, tag):
while self.tag_stack:
prev_tag, prev_pos = self.tag_stack.pop()
self.outdent()
if tag != prev_tag:
if self.trace:
print 'no close tag for %s at %s' % (prev_tag, prev_pos)
else:
break
if self.trace:
print self.margin + '<'
if self.state == 'in_table':
if tag == 'table':
self.state = 'after_table'
elif tag == 'tr':
if self.ohchr_code:
self.lang_name = re.sub(r'\s+', ' ', self.lang_name).strip()
self.source_name = re.sub(r'\s+', ' ', self.source_name).strip()
if not self.source_name:
self.source_name = '(no attribution)'
self.result_list.append((self.ohchr_code, self.lang_name, self.source_name))
self.ohchr_code = ''
self.lang_name = ''
self.source_name = ''
def handle_data(self, data):
if self.collect_lang:
self.lang_name += data
elif self.collect_source:
self.source_name += data
pass
def get_ohchr_status(ohchr_code, lang, attrib):
"""Decide the status based on the attribution text.
'original' are in the public domain and need no attribution.
'UN' are official UN translations and should be attributed as such.
'other' are not official UN translations and should be attributed as such."""
if ohchr_code in ['eng', 'frn', 'spn', 'rus', 'chn', 'arz']:
return 'original'
if (attrib.find('United Nations') != -1 or
attrib.find('High Commissioner for Human Rights') != -1):
return 'UN'
return 'other'
def parse_ohchr_html_file(htmlfile, outfile):
parser = ParseOhchr(False)
with open(htmlfile) as f:
parser.feed(f.read())
lines = []
for ohchr_code, lang, attrib in parser.results():
s = get_ohchr_status(ohchr_code, lang, attrib)
lines.append('\t'.join([ohchr_code, s, lang, attrib]))
data = '\n'.join(lines) + '\n'
print 'outfile: "%s"' % outfile
if not outfile or outfile == '-':
print data
else:
with open(outfile, 'w') as f:
f.write(data)
def main():
default_input = '[tools]/third_party/ohchr/ohchr_all.html'
default_output = '[tools]/third_party/ohchr/attributions.tsv'
parser = argparse.ArgumentParser()
parser.add_argument('--src', help='input ohchr html file (default %s)' % default_input,
default=default_input, metavar='file', dest='htmlfile')
parser.add_argument('--dst', help='output tsv file (default %s)' % default_output,
default=default_output, metavar='file', dest='outfile')
args = parser.parse_args()
htmlfile = tool_utils.resolve_path(args.htmlfile)
outfile = tool_utils.resolve_path(args.outfile)
parse_ohchr_html_file(htmlfile, outfile)
if __name__ == '__main__':
main()
|
apache-2.0
|
Celedhrim/persomov
|
libs/html5lib/treewalkers/dom.py
|
1229
|
1457
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
import gettext
_ = gettext.gettext
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
|
gpl-3.0
|
patelb10/RackHD
|
test/tests/api/v2_0/os_install_tests.py
|
19
|
21614
|
from config.api2_0_config import *
from config.settings import *
from modules.logger import Log
from workflows_tests import WorkflowsTests as workflows
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import after_class
from proboscis import before_class
from proboscis.asserts import fail
from json import dumps, loads, load
from collections import Mapping
from on_http_api2_0 import ApiApi as Api
import os
import datetime as date
import time
from on_http_api2_0.rest import ApiException
LOG = Log(__name__)
DEFAULT_TIMEOUT_SEC = 2700
ENABLE_FORMAT_DRIVE=False
if os.getenv('RACKHD_ENABLE_FORMAT_DRIVE', 'false') == 'true':
ENABLE_FORMAT_DRIVE=True
IS_EMC = defaults.get('RACKHD_REDFISH_EMC_OEM', False)
# Select one node to run OS install
NODE_INDEX = defaults.get('NODE_INDEX', None)
@test(groups=['os-install.v2.0.tests'], depends_on_groups=['set-ipmi-obm_api2'])
class OSInstallTests(object):
def __init__(self):
self.__client = config.api_client
self.__base = defaults.get('RACKHD_BASE_REPO_URL', \
'http://{0}:{1}'.format(HOST_IP, HOST_PORT))
self.__obm_options = {
'obmServiceName': defaults.get('RACKHD_GLOBAL_OBM_SERVICE_NAME', \
'ipmi-obm-service')
}
if self.__obm_options['obmServiceName'] == 'redfish-obm-service':
self.__obm_options['force'] = 'true'
self.__sampleDir = defaults.get('RACKHD_SAMPLE_PAYLOADS_PATH', '../example/samples/')
@before_class()
def setup(self):
pass
@after_class(always_run=True)
def teardown(self):
self.__format_drives()
def __get_data(self):
return loads(self.__client.last_response.data)
def __get_compute_nodes(self):
Api().nodes_get_all()
nodes = self.__get_data()
compute_nodes = []
for n in nodes:
type = n.get('type')
if type == 'compute':
compute_nodes.append(n)
LOG.info('compute nodes count {0}'.format(len(compute_nodes)))
return sorted(compute_nodes, key=lambda k: k['id'])
def __wait_for_completion(self, node, graph_name, graph_instance):
id = node.get('id')
start_time = date.datetime.now()
current_time = date.datetime.now()
# Collect BMC info
Api().nodes_get_catalog_source_by_id(identifier=id, source='bmc')
bmc_data = self.__get_data().get('data')
bmc_ip = bmc_data.get('IP Address')
LOG.info('running test on node : {0} with BMC IP: {1}'.format(id, bmc_ip))
while True:
if (current_time - start_time).total_seconds() > DEFAULT_TIMEOUT_SEC:
raise Exception('Timed out after {0} seconds'.format(DEFAULT_TIMEOUT_SEC))
break
Api().workflows_get()
workflows = self.__get_data()
for w in workflows:
# LOG.info('print w : {0}'.format(w))
if (w.get('node') == id and w.get('injectableName') == graph_name and
w.get('instanceId') == graph_instance):
status = w.get('status')
# LOG.info('{0} - target: {1}, status: {2}'.format(w.get('injectableName'), id, status))
if status == 'succeeded' or status == 'failed' or status == 'canceled':
msg = {
'graph_name': w.get('injectableName'),
'target': id,
'status': status,
'graph_instance': graph_instance
}
if status == 'failed' or status == 'canceled':
msg['active_task'] = w['tasks']
LOG.error(msg, json=True)
else:
LOG.info(msg, json=True)
assert_equal(status, 'succeeded', message='test failed')
return
time.sleep(10)
current_time = date.datetime.now()
# LOG.info('current time {0} vs start time {1}'.format(current_time, start_time))
def __post_workflow(self, graph_name, nodes, body):
# check if NODE_INDEX is set
index = None
try:
index = int(NODE_INDEX)
except:
LOG.info('NODE_INDEX env is not set')
workflows().post_workflows(graph_name, timeout_sec=DEFAULT_TIMEOUT_SEC, nodes=nodes, data=body)
return
# check if index is in the array range
nodes = self.__get_compute_nodes()
if index >= len(nodes):
raise Exception('index is outside the array range index: {0} vs nodes len {1}'.format(index, len(nodes)))
return
LOG.info('node index is set to {0}'.format(index))
node = nodes[index]
id = node.get('id')
# delete active workflow on the selected node
try:
Api().nodes_workflow_action_by_id(id, {'command': 'cancel'})
except ApiException as e:
assert_equal(404, e.status, message='status should be 404')
Api().nodes_post_workflow_by_id(id, name=graph_name, body=body)
log_context = self.__get_data().get('logContext')
if log_context is None:
raise Exception('Could not find logContext in {0}'.format(self.__get_data()))
return
# load graph instance id
graph_instance = log_context.get('graphInstance')
return self.__wait_for_completion(node, graph_name, graph_instance)
def __format_drives(self):
# Clear disk MBR and partitions
command = 'for disk in `lsblk | grep disk | awk \'{print $1}\'`; do '
command = command + 'sudo dd if=/dev/zero of=/dev/$disk bs=512 count=1 ; done'
body = {
'options': {
'shell-commands': {
'commands': [
{ 'command': command }
]
},
'set-boot-pxe': self.__obm_options,
'reboot-start': self.__obm_options,
'reboot-end': self.__obm_options
}
}
self.__post_workflow('Graph.ShellCommands', [], body)
def __get_os_install_payload(self, payload_file_name):
payload = open(self.__sampleDir + payload_file_name, 'r')
body = load(payload)
payload.close()
return body
def __update_body(self, body, updates):
#check each key, value pair in the updates
for key, value in updates.iteritems():
#if value is a dict, recursivly call __update_body
if isinstance(value, Mapping):
r = self.__update_body(body.get(key, {}), value)
body[key] = r
elif isinstance(value, list) and key in body.keys():
body[key] = body[key] + updates[key]
else:
body[key] = updates[key]
return body
def __test_link_up(self, network_devices):
for entry in network_devices:
if entry['ipv4'] != None:
hostname = entry['ipv4']['ipAddr']
response = os.system('ping -c 1 -w 20 ' + hostname)
assert_equal(response, 0, message='link {0} device {1} is down'.format(entry['device'], hostname))
@test(enabled=ENABLE_FORMAT_DRIVE, groups=['format-drives.v2.0.test'])
def test_format_drives(self):
""" Drive Format Test """
self.__format_drives()
def install_centos(self, version, nodes=[], options=None, payloadFile=None):
graph_name = 'Graph.InstallCentOS'
os_repo = defaults.get('RACKHD_CENTOS_REPO_PATH', \
self.__base + '/repo/centos/{0}'.format(version))
# load the payload from the specified file
if payloadFile != None:
body = self.__get_os_install_payload(payloadFile)
else:
body = {}
# if no options are specified, fill in the minimum required options
if options == None:
options = {
'options': {
'defaults': {
'repo': os_repo
}
}
}
# add additional options to the body
self.__update_body(body, options);
# run the workflow
self.__post_workflow(graph_name, nodes, body)
#test network devices
if 'networkDevices' in body['options']['defaults']:
self.__test_link_up(body['options']['defaults']['networkDevices'])
def install_esxi(self, version, nodes=[], options=None, payloadFile=None):
graph_name = 'Graph.InstallESXi'
os_repo = defaults.get('RACKHD_ESXI_REPO_PATH', \
self.__base + '/repo/esxi/{0}'.format(version))
# load the payload from the specified file
if payloadFile != None:
body = self.__get_os_install_payload(payloadFile)
else:
body = {}
# if no options are specified, fill in the minimum required options
if options == None:
options = {
'options':{
'defaults':{
'installDisk': 'firstdisk',
'version': version,
'repo': os_repo
},
'set-boot-pxe': self.__obm_options,
'reboot': self.__obm_options,
'install-os': {
'_taskTimeout': 3600000
}
}
}
# add additional options to the body
self.__update_body(body, options)
if self.__obm_options['obmServiceName'] == 'redfish-obm-service' and IS_EMC:
body['options']['install-os']['kargs'] = {'acpi':'off'}
self.__post_workflow(graph_name, nodes, body)
if 'networkDevices' in body['options']['defaults']:
self.__test_link_up(body['options']['defaults']['networkDevices'])
def install_suse(self, version, nodes=[], options=None, payloadFile=None):
graph_name = 'Graph.InstallSUSE'
os_repo = defaults.get('RACKHD_SUSE_REPO_PATH', \
self.__base + '/repo/suse/{0}/'.format(version))
# load the payload from the specified file
if payloadFile != None:
body = self.__get_os_install_payload(payloadFile)
else:
body = {}
if options == None:
options = {
'options': {
'defaults': {
'version': version,
'repo': os_repo,
'kargs' : {'NetWait': '10'}
}
}
}
# add additional options to the body
self.__update_body(body, options);
# run the workflow
self.__post_workflow(graph_name, nodes, body)
def install_ubuntu(self, version, payloadFile, nodes=[]):
graph_name = 'Graph.InstallUbuntu'
os_repo = defaults.get('RACKHD_UBUNTU_REPO_PATH', \
self.__base + '/repo/ubuntu')
# load the payload from the specified file
body = {}
body = self.__get_os_install_payload(payloadFile)
kargs = {}
# Ubuntu installer requirs from the dhcp both options:
# - routers
# - domain-name-servers
# using static ip instead
try:
# if node index is set then we can use hard coded static IP
# this is temporary for now.
# We need to implement a CMDB to manage static IPs specially for
# full payload implementation
int(NODE_INDEX)
kargs = {
'live-installer/net-image': os_repo + '/install/filesystem.squashfs',
'netcfg/get_netmask': '255.255.255.0',
'netcfg/get_gateway': '172.31.128.1',
'netcfg/get_ipaddress': '172.31.128.240',
'netcfg/get_domain': 'my-domain',
'netcfg/get_nameservers': '172.31.128.1',
'netcfg/disable_dhcp': 'true',
'netcfg/confirm_static': 'true'
}
except:
LOG.info('NODE_INDEX env is not set, use DHCP')
kargs = {
'live-installer/net-image': os_repo + '/install/filesystem.squashfs'
}
extra_options = {
'options':{
'defaults':{
'repo': os_repo ,
'kargs': kargs
},
'set-boot-pxe': self.__obm_options,
'reboot': self.__obm_options,
'install-ubuntu': {
'_taskTimeout': 3600000
}
}
}
self.__update_body(body, extra_options)
self.__post_workflow(graph_name, nodes, body)
#test network devices
if 'networkDevices' in body['options']['defaults']:
self.__test_link_up(body['options']['defaults']['networkDevices'])
def install_windowsServer2012(self, version, payloadFile, nodes=[]):
graph_name = 'Graph.InstallWindowsServer'
os_repo = defaults.get('RACKHD_SMB_WINDOWS_REPO_PATH', None)
if None == os_repo:
fail('user must set RACKHD_SMB_WINDOWS_REPO_PATH')
# load the payload from the specified file
body = {}
body = self.__get_os_install_payload(payloadFile)
# The value of the productkey below is not a valid product key. It is a KMS client
# key that was generated to run the workflows without requiring a real product key.
# This key is available to public on the Microsoft site.
extra_options = {
'options': {
'defaults': {
'productkey': 'D2N9P-3P6X9-2R39C-7RTCD-MDVJX',
'smbUser': defaults.get('RACKHD_SMB_USER' , 'onrack'),
'smbPassword': defaults.get('RACKHD_SMB_PASSWORD' , 'onrack'),
'smbRepo': os_repo,
'repo' : defaults.get('RACKHD_WINPE_REPO_PATH', \
self.__base + '/repo/winpe')
}
}
}
self.__update_body(body, extra_options)
new_body = dumps(body)
self.__post_workflow(graph_name, nodes, body)
if 'networkDevices' in body['options']['defaults']:
self.__test_link_up(body['options']['defaults']['networkDevices'])
def install_coreos(self, payloadFile, nodes=[], options=None):
graph_name = 'Graph.InstallCoreOS'
os_repo = defaults.get('RACKHD_COREOS_REPO_PATH', \
self.__base + '/repo/coreos')
if options == None:
options = {
'options': {
'defaults': {
'repo': os_repo
}
}
}
if(payloadFile):
body = self.__get_os_install_payload(payloadFile)
else:
body = self.__get_os_install_payload('install_coreos_payload_minimum.json')
self.__update_body(body, options)
self.__post_workflow(graph_name, nodes, body)
@test(enabled=True, groups=['centos-6-5-install.v2.0.test'])
def test_install_centos_6(self, nodes=[], options=None):
""" Testing CentOS 6.5 Installer Workflow """
options = {
'options': {
'defaults': {
'installDisk': '/dev/sda',
'version': '6.5',
'repo': defaults.get('RACKHD_CENTOS_REPO_PATH', \
self.__base + '/repo/centos/6.5'),
'users': [{'name': 'onrack', 'password': 'Onr@ck1!', 'uid': 1010}]
},
'set-boot-pxe': self.__obm_options,
'reboot': self.__obm_options,
'install-os': {
'schedulerOverrides': {
'timeout': 3600000
}
}
}
}
self.install_centos('6.5')
@test(enabled=True, groups=['centos-7-install.v2.0.test'])
def test_install_centos_7(self, nodes=[], options=None):
""" Testing CentOS 7 Installer Workflow """
options = {
'options': {
'defaults': {
'installDisk': '/dev/sda',
'version': '7.0',
'repo': defaults.get('RACKHD_CENTOS_REPO_PATH', \
self.__base + '/repo/centos/7.0'),
'users': [{'name': 'onrack', 'password': 'Onr@ck1!', 'uid': 1010}]
},
'set-boot-pxe': self.__obm_options,
'reboot': self.__obm_options,
'install-os': {
'schedulerOverrides': {
'timeout': 3600000
}
}
}
}
self.install_centos('7.0', options=options)
@test(enabled=True, groups=['ubuntu-minimal-install.v2.0.test'])
def test_install_min_ubuntu(self, nodes=[], options=None):
""" Testing Ubuntu 14.04 Installer Workflow With Minimal Payload """
self.install_ubuntu('trusty', 'install_ubuntu_payload_iso_minimal.json')
@test(enabled=True, groups=['ubuntu-maximal-install.v2.0.test'])
def test_install_max_ubuntu(self, nodes=[], options=None):
""" Testing Ubuntu 14.04 Installer Workflow With Maximal Payload """
self.install_ubuntu('trusty', 'install_ubuntu_payload_iso_full.json')
@test(enabled=True, groups=['suse-minimal-install.v2.0.test'])
def test_install_suse_min(self, nodes=[], options=None):
""" Testing OpenSuse Leap 42.1 Installer Workflow With Min Payload"""
self.install_suse('42.1', payloadFile='install_suse_payload_minimal.json')
@test(enabled=True, groups=['suse-full-install.v2.0.test'])
def test_install_suse_max(self, nodes=[], options=None):
""" Testing OpenSuse Leap 42.1 Installer Workflow With Max Payload"""
self.install_suse('42.1', payloadFile='install_suse_payload_full.json')
@test(enabled=True, groups=['esxi-5-5-min-install.v2.0.test'])
def test_install_min_esxi_5_5(self, nodes=[], options=None):
""" Testing ESXi 5.5 Installer Workflow With Minimal Payload """
self.install_esxi('5.5', payloadFile='install_esx_payload_minimal.json')
@test(enabled=True, groups=['esxi-5-5-max-install.v2.0.test'])
def test_install_max_esxi_5_5(self, nodes=[], options=None):
""" Testing ESXi 5.5 Installer Workflow With Maximum Payload """
self.install_esxi('5.5', payloadFile='install_esx_payload_full.json')
@test(enabled=True, groups=['esxi-6-min-install.v2.0.test'])
def test_install_min_esxi_6(self, nodes=[], options=None):
""" Testing ESXi 6 Installer Workflow With Minimal Payload """
self.install_esxi('6.0', payloadFile='install_esx_payload_minimal.json')
@test(enabled=True, groups=['esxi-6-max-install.v2.0.test'])
def test_install_max_esxi_6(self, nodes=[], options=None):
""" Testing ESXi 6 Installer Workflow With Maximum Payload """
self.install_esxi('6.0', payloadFile='install_esx_payload_full.json')
@test(enabled=True, groups=['windowsServer2012-maximum-install.v2.0.test'])
def test_install_max_windowsServer2012(self, nodes=[], options=None):
""" Testing Windows Server 2012 Installer Workflow with Max payload"""
self.install_windowsServer2012('10.40','install_windows_payload_full.json')
@test(enabled=True, groups=['windowsServer2012-minimum-install.v2.0.test'])
def test_install_min_windowsServer2012(self, nodes=[], options=None):
""" Testing Windows Server 2012 Installer Workflow with Min payload"""
self.install_windowsServer2012('10.40','install_windows_payload_minimal.json')
@test(enabled=True, groups=['coreos-minimum-install.v2.0.test'])
def test_install_coreos_min(self, nodes=[]):
""" Testing CoreOS Installer Workflow with Minimum Payload"""
self.install_coreos(payloadFile='install_coreos_payload_minimum.json')
@test(enabled=True, groups=['coreos-full-install.v2.0.test'])
def test_install_coreos_full(self, nodes=[] ):
""" Testing CoreOS Installer Workflow with Full Payload"""
self.install_coreos(payloadFile='install_coreos_payload_full.json')
@test(enabled=True, groups=['centos-6-5-minimal-install.v2.0.test'])
def test_install_centos_6_minimal(self):
""" Testing CentOS 6.5 Installer Workflow """
self.install_centos('6.5', payloadFile='install_centos_6_payload_minimal.json')
@test(enabled=True, groups=['centos-6-5-full-install.v2.0.test'])
def test_install_centos_6_full(self, nodes=[], options=None):
""" Testing CentOS 6.5 Installer Workflow """
self.install_centos('6.5', payloadFile='install_centos_6_payload_full.json')
@test(enabled=True, groups=['centos-7-minimal-install.v2.0.test'])
def test_install_centos_7_minimal(self, nodes=[], options=None):
""" Testing CentOS 7 Installer Workflow """
self.install_centos('7.0', payloadFile='install_centos_7_payload_minimal.json')
@test(enabled=True, groups=['centos-7-full-install.v2.0.test'])
def test_install_centos_7_full(self, nodes=[], options=None):
""" Testing CentOS 7 Installer Workflow """
self.install_centos('7.0', payloadFile='install_centos_7_payload_full.json')
|
apache-2.0
|
wbinventor/openmc
|
examples/python/reflective/build-xml.py
|
4
|
2845
|
import numpy as np
import openmc
###############################################################################
# Simulation Input File Parameters
###############################################################################
# OpenMC simulation parameters
batches = 500
inactive = 10
particles = 10000
###############################################################################
# Exporting to OpenMC materials.xml file
###############################################################################
# Instantiate a Material and register the Nuclide
fuel = openmc.Material(material_id=1, name='fuel')
fuel.set_density('g/cc', 4.5)
fuel.add_nuclide('U235', 1.)
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([fuel])
materials_file.export_to_xml()
###############################################################################
# Exporting to OpenMC geometry.xml file
###############################################################################
# Instantiate Surfaces
surf1 = openmc.XPlane(surface_id=1, x0=-1, name='surf 1')
surf2 = openmc.XPlane(surface_id=2, x0=+1, name='surf 2')
surf3 = openmc.YPlane(surface_id=3, y0=-1, name='surf 3')
surf4 = openmc.YPlane(surface_id=4, y0=+1, name='surf 4')
surf5 = openmc.ZPlane(surface_id=5, z0=-1, name='surf 5')
surf6 = openmc.ZPlane(surface_id=6, z0=+1, name='surf 6')
surf1.boundary_type = 'vacuum'
surf2.boundary_type = 'vacuum'
surf3.boundary_type = 'reflective'
surf4.boundary_type = 'reflective'
surf5.boundary_type = 'reflective'
surf6.boundary_type = 'reflective'
# Instantiate Cell
cell = openmc.Cell(cell_id=1, name='cell 1')
# Use surface half-spaces to define region
cell.region = +surf1 & -surf2 & +surf3 & -surf4 & +surf5 & -surf6
# Register Material with Cell
cell.fill = fuel
# Instantiate Universes
root = openmc.Universe(universe_id=0, name='root universe')
# Register Cell with Universe
root.add_cell(cell)
# Instantiate a Geometry, register the root Universe, and export to XML
geometry = openmc.Geometry(root)
geometry.export_to_xml()
###############################################################################
# Exporting to OpenMC settings.xml file
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Create an initial uniform spatial source distribution over fissionable zones
uniform_dist = openmc.stats.Box(*cell.region.bounding_box,
only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
|
mit
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/network/fortios/fortios_firewall_ippool6.py
|
7
|
8049
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ippool6
short_description: Configure IPv6 IP pools in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and ippool6 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_ippool6:
description:
- Configure IPv6 IP pools.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comments:
description:
- Comment.
endip:
description:
- "Final IPv6 address (inclusive) in the range for the address pool (format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx, Default: ::)."
name:
description:
- IPv6 IP pool name.
required: true
startip:
description:
- "First IPv6 address (inclusive) in the range for the address pool (format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx, Default: ::)."
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv6 IP pools.
fortios_firewall_ippool6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_ippool6:
state: "present"
comments: "<your_own_comment>"
endip: "<your_own_value>"
name: "default_name_5"
startip: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_ippool6_data(json):
option_list = ['comments', 'endip', 'name',
'startip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_ippool6(data, fos):
vdom = data['vdom']
firewall_ippool6_data = data['firewall_ippool6']
filtered_data = filter_firewall_ippool6_data(firewall_ippool6_data)
if firewall_ippool6_data['state'] == "present":
return fos.set('firewall',
'ippool6',
data=filtered_data,
vdom=vdom)
elif firewall_ippool6_data['state'] == "absent":
return fos.delete('firewall',
'ippool6',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_ippool6']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_ippool6": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comments": {"required": False, "type": "str"},
"endip": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"startip": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
trnewman/VT-USRP-daughterboard-drivers
|
gnuradio-examples/python/digital/pick_bitrate.py
|
12
|
5999
|
#
# Copyright 2005,2006 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import eng_notation
_default_bitrate = 500e3
_valid_samples_per_symbol = (2,3,4,5,6,7)
def _gen_tx_info(converter_rate, xrates):
results = []
for samples_per_symbol in _valid_samples_per_symbol:
for interp in xrates:
bitrate = converter_rate / interp / samples_per_symbol
results.append((bitrate, samples_per_symbol, interp))
results.sort()
return results
def _gen_rx_info(converter_rate, xrates):
results = []
for samples_per_symbol in _valid_samples_per_symbol:
for decim in xrates:
bitrate = converter_rate / decim / samples_per_symbol
results.append((bitrate, samples_per_symbol, decim))
results.sort()
return results
def _filter_info(info, samples_per_symbol, xrate):
if samples_per_symbol is not None:
info = [x for x in info if x[1] == samples_per_symbol]
if xrate is not None:
info = [x for x in info if x[2] == xrate]
return info
def _pick_best(target_bitrate, bits_per_symbol, info):
"""
@returns tuple (bitrate, samples_per_symbol, interp_rate_or_decim_rate)
"""
if len(info) == 0:
raise RuntimeError, "info is zero length!"
if target_bitrate is None: # return the fastest one
return info[-1]
# convert bit rate to symbol rate
target_symbolrate = target_bitrate / bits_per_symbol
# Find the closest matching symbol rate.
# In the event of a tie, the one with the lowest samples_per_symbol wins.
# (We already sorted them, so the first one is the one we take)
best = info[0]
best_delta = abs(target_symbolrate - best[0])
for x in info[1:]:
delta = abs(target_symbolrate - x[0])
if delta < best_delta:
best_delta = delta
best = x
# convert symbol rate back to bit rate
return ((best[0] * bits_per_symbol),) + best[1:]
def _pick_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
xrate, converter_rate, xrates, gen_info):
"""
@returns tuple (bitrate, samples_per_symbol, interp_rate_or_decim_rate)
"""
if not isinstance(bits_per_symbol, int) or bits_per_symbol < 1:
raise ValueError, "bits_per_symbol must be an int >= 1"
if samples_per_symbol is not None and xrate is not None: # completely determined
return (float(converter_rate) / xrate / samples_per_symbol,
samples_per_symbol, xrate)
if bitrate is None and samples_per_symbol is None and xrate is None:
bitrate = _default_bitrate
# now we have a target bitrate and possibly an xrate or
# samples_per_symbol constraint, but not both of them.
ret = _pick_best(bitrate, bits_per_symbol,
_filter_info(gen_info(converter_rate, xrates), samples_per_symbol, xrate))
print "Actual Bitrate:", eng_notation.num_to_str(ret[0])
return ret
# ---------------------------------------------------------------------------------------
def pick_tx_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
interp_rate, converter_rate, possible_interps):
"""
Given the 4 input parameters, return at configuration that matches
@param bitrate: desired bitrate or None
@type bitrate: number or None
@param bits_per_symbol: E.g., BPSK -> 1, QPSK -> 2, 8-PSK -> 3
@type bits_per_symbol: integer >= 1
@param samples_per_symbol: samples/baud (aka samples/symbol)
@type samples_per_symbol: number or None
@param interp_rate: USRP interpolation factor
@type interp_rate: integer or None
@param converter_rate: converter sample rate in Hz
@type converter_rate: number
@param possible_interps: a list of possible rates
@type possible_interps: a list of integers
@returns tuple (bitrate, samples_per_symbol, interp_rate)
"""
print "Requested TX Bitrate:", bitrate and eng_notation.num_to_str(bitrate) or 'Auto',
return _pick_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
interp_rate, converter_rate, possible_interps, _gen_tx_info)
def pick_rx_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
decim_rate, converter_rate, possible_decims):
"""
Given the 4 input parameters, return at configuration that matches
@param bitrate: desired bitrate or None
@type bitrate: number or None
@param bits_per_symbol: E.g., BPSK -> 1, QPSK -> 2, 8-PSK -> 3
@type bits_per_symbol: integer >= 1
@param samples_per_symbol: samples/baud (aka samples/symbol)
@type samples_per_symbol: number or None
@param decim_rate: USRP decimation factor
@type decim_rate: integer or None
@param converter_rate: converter sample rate in Hz
@type converter_rate: number
@param possible_decims: a list of possible rates
@type possible_decims: a list of integers
@returns tuple (bitrate, samples_per_symbol, decim_rate)
"""
print "Requested RX Bitrate:", bitrate and eng_notation.num_to_str(bitrate) or 'Auto'
return _pick_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
decim_rate, converter_rate, possible_decims, _gen_rx_info)
|
gpl-3.0
|
lotube/lotube
|
lotube/videos/abstract_models.py
|
1
|
3269
|
from django.db import models
from django.db.models import OneToOneField
from config import constants as globalConstants
from core.fields import RestrictedNonAnimatedImageField, RestrictedVideoField
from core.models import LowerCaseCharField
from core.validators import Common
from users.models import User
from .managers import TagManager
from . import constants
class AbstractTimeStamped(models.Model):
"""
Auto-updated created and modified fields
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class AbstractVideo(AbstractTimeStamped):
"""
Representation of Video model.
Video was uploaded on our platform if source and id_source are empty
"""
id_source = models.CharField(max_length=100, blank=True)
source = models.CharField(max_length=30, blank=True)
user = models.ForeignKey(User)
title = models.CharField(max_length=300)
description = models.CharField(max_length=10000, blank=True, default='')
duration = models.PositiveIntegerField(default=0)
filename = RestrictedVideoField(
null=True, # because other sources may not have a filename
upload_to=globalConstants.VIDEO_FILE_PATH,
max_upload_size=globalConstants.VIDEO_FILE_MAX_SIZE)
tags = models.ManyToManyField('videos.Tag', related_name='videos')
thumbnail = RestrictedNonAnimatedImageField(
upload_to=globalConstants.VIDEO_THUMBNAIL_PATH,
blank=True, null=True,
max_upload_size=globalConstants.VIDEO_THUMBNAIL_MAX_SIZE)
def __str__(self):
return self.title
class Meta:
abstract = True
class AbstractAnalytic(models.Model):
video = OneToOneField('videos.Video', primary_key=True,
related_name='analytic')
views = models.PositiveIntegerField(default=0)
unique_views = models.PositiveIntegerField(default=0)
shares = models.PositiveIntegerField(default=0)
def __str__(self):
return str(self.views)
class Meta:
abstract = True
class AbstractRating(models.Model):
video = OneToOneField('videos.Video', primary_key=True,
related_name='rating')
likes = models.PositiveIntegerField(default=0)
likes_register = models.ManyToManyField(User, through='Like',
related_name='video_likes')
def like(self):
self.likes += 1
self.save()
return self.likes
def undo_like(self):
self.likes -= 1
self.save()
return self.likes
def __str__(self):
return u'{0}'.format(self.likes)
class Meta:
abstract = True
class AbstractLike(AbstractTimeStamped):
user = models.ForeignKey(User)
rating = models.ForeignKey('Rating')
class Meta:
abstract = True
unique_together = ('user', 'rating')
class AbstractTag(models.Model):
name = LowerCaseCharField(max_length=30,
unique=True,
validators=[Common.contains(constants.TAGS_ALLOWED_CHARACTERS)])
objects = TagManager()
def __str__(self):
return self.name
class Meta:
abstract = True
|
mit
|
tellapart/Diamond
|
src/diamond/handler/rabbitmq_pubsub.py
|
3
|
6682
|
# coding=utf-8
"""
Output the collected values to RabitMQ pub/sub channel
"""
from Handler import Handler
import time
try:
import pika
pika # Pyflakes
except ImportError:
pika = None
class rmqHandler (Handler):
"""
Implements the abstract Handler class
Sending data to a RabbitMQ pub/sub channel
"""
def __init__(self, config=None):
"""
Create a new instance of rmqHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if pika is None:
self.log.error('pika import failed. Handler disabled')
self.enabled = False
return
# Initialize Data
self.connections = {}
self.channels = {}
self.reconnect_interval = 1
# Initialize Options
tmp_rmq_server = self.config['rmq_server']
if type(tmp_rmq_server) is list:
self.rmq_server = tmp_rmq_server
else:
self.rmq_server = [tmp_rmq_server]
self.rmq_port = 5672
self.rmq_exchange = self.config['rmq_exchange']
self.rmq_user = None
self.rmq_password = None
self.rmq_vhost = '/'
self.rmq_exchange_type = 'fanout'
self.rmq_durable = True
self.rmq_heartbeat_interval = 300
self.get_config()
# Create rabbitMQ pub socket and bind
try:
self._bind_all()
except pika.exceptions.AMQPConnectionError:
self.log.error('Failed to bind to rabbitMQ pub socket')
def get_config(self):
""" Get and set config options from config file """
if 'rmq_port' in self.config:
self.rmq_port = int(self.config['rmq_port'])
if 'rmq_user' in self.config:
self.rmq_user = self.config['rmq_user']
if 'rmq_password' in self.config:
self.rmq_password = self.config['rmq_password']
if 'rmq_vhost' in self.config:
self.rmq_vhost = self.config['rmq_vhost']
if 'rmq_exchange_type' in self.config:
self.rmq_exchange_type = self.config['rmq_exchange_type']
if 'rmq_durable' in self.config:
self.rmq_durable = bool(self.config['rmq_durable'])
if 'rmq_heartbeat_interval' in self.config:
self.rmq_heartbeat_interval = int(
self.config['rmq_heartbeat_interval'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(rmqHandler, self).get_default_config_help()
config.update({
'server': '',
'rmq_exchange': '',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(rmqHandler, self).get_default_config()
config.update({
'server': '127.0.0.1',
'rmq_exchange': 'diamond',
})
return config
def _bind_all(self):
"""
Bind all RMQ servers defined in config
"""
for rmq_server in self.rmq_server:
self._bind(rmq_server)
def _bind(self, rmq_server):
"""
Create PUB socket and bind
"""
if (rmq_server in self.connections.keys()
and self.connections[rmq_server] is not None
and self.connections[rmq_server].is_open):
# It seems we already have this server, so let's try _unbind just
# to be safe.
self._unbind(rmq_server)
credentials = None
if self.rmq_user and self.rmq_password:
credentials = pika.PlainCredentials(
self.rmq_user,
self.rmq_password)
parameters = pika.ConnectionParameters(
host=rmq_server,
port=self.rmq_port,
virtual_host=self.rmq_vhost,
credentials=credentials,
heartbeat_interval=self.rmq_heartbeat_interval,
retry_delay=5,
connection_attempts=3)
self.connections[rmq_server] = None
while (self.connections[rmq_server] is None
or self.connections[rmq_server].is_open is False):
try:
self.connections[rmq_server] = pika.BlockingConnection(
parameters)
self.channels[rmq_server] = self.connections[
rmq_server].channel()
self.channels[rmq_server].exchange_declare(
exchange=self.rmq_exchange,
type=self.rmq_exchange_type,
durable=self.rmq_durable)
# Reset reconnect_interval after a successful connection
self.reconnect_interval = 1
except Exception, exception:
self.log.debug("Caught exception in _bind: %s", exception)
if rmq_server in self.connections.keys():
self._unbind(rmq_server)
if self.reconnect_interval >= 16:
break
if self.reconnect_interval < 16:
self.reconnect_interval = self.reconnect_interval * 2
time.sleep(self.reconnect_interval)
def _unbind(self, rmq_server=None):
""" Close AMQP connection and unset channel """
try:
self.connections[rmq_server].close()
except AttributeError:
pass
self.connections[rmq_server] = None
self.channels[rmq_server] = None
def __del__(self):
"""
Destroy instance of the rmqHandler class
"""
if hasattr(self, 'connections'):
for rmq_server in self.connections.keys():
self._unbind(rmq_server)
def process(self, metric):
"""
Process a metric and send it to RMQ pub socket
"""
for rmq_server in self.connections.keys():
try:
if (self.connections[rmq_server] is None
or self.connections[rmq_server].is_open is False):
self._bind(rmq_server)
channel = self.channels[rmq_server]
channel.basic_publish(exchange=self.rmq_exchange,
routing_key='', body="%s" % metric)
except Exception, exception:
self.log.error(
"Failed publishing to %s, attempting reconnect",
rmq_server)
self.log.debug("Caught exception: %s", exception)
self._unbind(rmq_server)
self._bind(rmq_server)
|
mit
|
Karosuo/Linux_tools
|
xls_handlers/xls_sum_venv/lib/python3.6/site-packages/pip/_internal/vcs/subversion.py
|
8
|
7081
|
from __future__ import absolute_import
import logging
import os
import re
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
display_path, make_vcs_requirement_url, rmtree, split_auth_from_netloc,
)
from pip._internal.vcs import VersionControl, vcs
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
def get_base_rev_args(self, rev):
return ['-r', rev]
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev_options = self.get_url_rev_options(self.url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
cmd_args = ['export'] + rev_options.to_args() + [url, location]
self.run_command(cmd_args, show_stdout=False)
def fetch_new(self, dest, url, rev_options):
rev_display = rev_options.to_display()
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
cmd_args = ['checkout', '-q'] + rev_options.to_args() + [url, dest]
self.run_command(cmd_args)
def switch(self, dest, url, rev_options):
cmd_args = ['switch'] + rev_options.to_args() + [url, dest]
self.run_command(cmd_args)
def update(self, dest, url, rev_options):
cmd_args = ['update'] + rev_options.to_args() + [dest]
self.run_command(cmd_args)
@classmethod
def get_revision(cls, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if cls.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(cls.dirname)
entries_fn = os.path.join(base, cls.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = cls._get_svn_url_rev(base)
if base == location:
base = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_netloc_and_auth(self, netloc, scheme):
"""
This override allows the auth information to be passed to svn via the
--username and --password options instead of via the URL.
"""
if scheme == 'ssh':
# The --username and --password options can't be used for
# svn+ssh URLs, so keep the auth information in the URL.
return super(Subversion, self).get_netloc_and_auth(
netloc, scheme)
return split_auth_from_netloc(netloc)
def get_url_rev_and_auth(self, url):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev, user_pass = super(Subversion, self).get_url_rev_and_auth(url)
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev, user_pass
def make_rev_args(self, username, password):
extra_args = []
if username:
extra_args += ['--username', username]
if password:
extra_args += ['--password', password]
return extra_args
@classmethod
def get_remote_url(cls, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return cls._get_svn_url_rev(location)[0]
@classmethod
def _get_svn_url_rev(cls, location):
from pip._internal.exceptions import InstallationError
entries_path = os.path.join(location, cls.dirname, 'entries')
if os.path.exists(entries_path):
with open(entries_path) as f:
data = f.read()
else: # subversion >= 1.7 does not have the 'entries' file
data = ''
if (data.startswith('8') or
data.startswith('9') or
data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = cls.run_command(
['info', '--xml', location],
show_stdout=False,
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
@classmethod
def get_src_requirement(cls, location, project_name):
repo = cls.get_remote_url(location)
if repo is None:
return None
repo = 'svn+' + repo
rev = cls.get_revision(location)
return make_vcs_requirement_url(repo, rev, project_name)
def is_commit_id_equal(self, dest, name):
"""Always assume the versions don't match"""
return False
vcs.register(Subversion)
|
gpl-3.0
|
gauravbose/digital-menu
|
django/contrib/gis/gdal/prototypes/generation.py
|
122
|
4122
|
"""
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_void_p
from functools import partial
from django.contrib.gis.gdal.prototypes.errcheck import (
check_arg_errcode, check_const_string, check_errcode, check_geom,
check_geom_offset, check_pointer, check_srs, check_str_arg, check_string,
)
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False, cpl=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck:
func.errcheck = partial(check_arg_errcode, cpl=cpl)
if strarg:
func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None, decoding=None, cpl=False):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
res = check_const_string(result, func, cargs, offset=offset, cpl=cpl)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False, decoding=None):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
res = check_string(result, func, cargs,
offset=offset, str_result=str_result)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True, cpl=False):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes:
func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = partial(check_errcode, cpl=cpl)
else:
func.restype = None
return func
def voidptr_output(func, argtypes, errcheck=True):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
if errcheck:
func.errcheck = check_pointer
return func
|
bsd-3-clause
|
felipsmartins/namebench
|
libnamebench/base_ui.py
|
172
|
10284
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base user-interface workflow, to be inherited by UI modules."""
import tempfile
import addr_util
import benchmark
import better_webbrowser
import config
import data_sources
import geoip
import nameserver
import reporter
import providers
import site_connector
import util
__author__ = '[email protected] (Thomas Stromberg)'
class BaseUI(object):
"""Common methods for all UI implementations."""
def __init__(self):
self.SetupDataStructures()
def SetupDataStructures(self):
"""Instead of requiring users to inherit __init__(), this sets up structures."""
self.reporter = None
self.nameservers = None
self.bmark = None
self.report_path = None
self.csv_path = None
self.geodata = None
self.sources = {}
self.url = None
self.share_state = None
self.test_records = []
def UpdateStatus(self, msg, **kwargs):
"""Update the little status message on the bottom of the window."""
if hasattr(self, 'status_callback') and self.status_callback:
self.status_callback(msg, **kwargs)
else:
print msg
def DebugMsg(self, message):
self.UpdateStatus(message, debug=True)
def LoadDataSources(self):
self.data_src = data_sources.DataSources(status_callback=self.UpdateStatus)
def PrepareTestRecords(self):
"""Figure out what data source a user wants, and create test_records."""
if self.options.input_source:
src_type = self.options.input_source
else:
src_type = self.data_src.GetBestSourceDetails()[0]
self.options.input_source = src_type
self.test_records = self.data_src.GetTestsFromSource(
src_type,
self.options.query_count,
select_mode=self.options.select_mode
)
def GatherNameServerData(self):
"""Build a nameserver data set from config and other sources."""
ns_data = config.GetNameServerData()
for i, ip in enumerate(self.options.servers):
ns = nameserver.NameServer(ip, tags=['specified'], name='USR%s-%s' % (i, ip))
ns_data.append(ns)
return ns_data
def GetExternalNetworkData(self):
"""Return a domain and ASN for myself."""
asn = None
domain = None
client_ip = providers.GetExternalIp()
if client_ip:
# self.UpdateStatus("Detected external IP as %s" % client_ip)
local_ns = providers.SystemResolver()
hostname = local_ns.GetReverseIp(client_ip)
if hostname != client_ip:
domain = addr_util.GetDomainFromHostname(hostname)
else:
domain = None
asn = local_ns.GetAsnForIp(client_ip)
return (domain, asn)
def PrepareNameServers(self):
"""Setup self.nameservers to have a list of healthy fast servers."""
self.nameservers = self.GatherNameServerData()
self.nameservers.max_servers_to_check = self.options.max_servers_to_check
self.nameservers.thread_count = self.options.health_thread_count
require_tags = set()
include_tags = self.options.tags
country_code = None
if self.options.ipv6_only:
require_tags.add('ipv6')
elif self.options.ipv4_only:
require_tags.add('ipv4')
if self.options.tags.intersection(set(['nearby', 'country', 'likely-isp', 'nearby'])):
country_code, country_name, lat, lon = self.ConfiguredLocationData()
if country_code:
self.nameservers.SetClientLocation(lat, lon, country_code)
if self.options.tags.intersection(set(['isp','network'])):
domain, asn = self.GetExternalNetworkData()
if asn:
self.nameservers.SetNetworkLocation(domain, asn)
self.UpdateStatus("Looking for nameservers within %s or AS%s" % (domain, asn))
self.nameservers.AddNetworkTags()
if 'country' in self.options.tags:
include_tags.discard('country')
include_tags.add('country_%s' % country_code.lower())
if 'nearby' in self.options.tags and lat:
distance = self.options.distance
if 'country' in self.options.tags:
if self.nameservers.HasEnoughInCountryServers() and self.options.distance > self.options.overload_distance:
self.UpdateStatus("Looks like we already have >%s in-country servers, shortening nearby distance." % self.options.max_servers_to_check)
distance = self.options.overload_distance
self.UpdateStatus("Adding locality flags for servers within %skm of %s,%s" % (distance, lat, lon))
self.nameservers.AddLocalityTags(max_distance=distance)
self.nameservers.status_callback = self.UpdateStatus
self.UpdateStatus("DNS server filter: %s %s" % (','.join(include_tags),
','.join(require_tags)))
self.nameservers.FilterByTag(include_tags=include_tags,
require_tags=require_tags)
def ConfiguredLocationData(self):
self.DiscoverLocation()
if self.options.country:
country_code, country_name, lat, lon = geoip.GetInfoForCountry(self.options.country)
self.UpdateStatus("Set country to %s - %s (%s,%s)" % (country_code, country_name, lat, lon))
else:
country_code = self.geodata.get('country_code')
if not country_code:
return None, None, None, None
country_code, country_name = geoip.GetInfoForCountry(country_code)[0:2]
region = self.geodata.get('region_name')
lat = self.geodata.get('latitude')
lon = self.geodata.get('longitude')
self.UpdateStatus("Determined location as %s: %s, %s (%s,%s)" % (country_code, region, country_name, lat, lon))
return country_code, country_name, lat, lon
def CheckNameServerHealth(self):
self.nameservers.SetTimeouts(self.options.timeout,
self.options.ping_timeout,
self.options.health_timeout)
self.nameservers.CheckHealth(sanity_checks=config.GetSanityChecks())
def PrepareBenchmark(self):
"""Setup the benchmark object with the appropriate dataset."""
if len(self.nameservers) == 1:
thread_count = 1
else:
thread_count = self.options.benchmark_thread_count
self.bmark = benchmark.Benchmark(self.nameservers,
query_count=self.options.query_count,
run_count=self.options.run_count,
thread_count=thread_count,
status_callback=self.UpdateStatus)
def RunBenchmark(self):
"""Run the benchmark."""
results = self.bmark.Run(self.test_records)
self.UpdateStatus("Benchmark finished.")
index = []
if self.options.upload_results in (1, True):
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
index_hosts = connector.GetIndexHosts()
if index_hosts:
index = self.bmark.RunIndex(index_hosts)
else:
index = []
self.DiscoverLocation()
self.reporter = reporter.ReportGenerator(self.options, self.nameservers,
results, index=index, geodata=self.geodata)
def DiscoverLocation(self):
if not getattr(self, 'geodata', None):
self.UpdateStatus("Determining your location...")
self.geodata = geoip.GetGeoData()
# Try again
if not self.geodata:
self.UpdateStatus("Determining your location (retry)...")
self.geodata = geoip.GetGeoData()
return self.geodata
def RunAndOpenReports(self):
"""Run the benchmark and open up the report on completion."""
self.RunBenchmark()
best = self.reporter.BestOverallNameServer()
self.CreateReports()
if self.options.template == 'html':
self.DisplayHtmlReport()
if self.url:
self.UpdateStatus('Complete! Your results: %s' % self.url)
else:
self.UpdateStatus('Complete! %s [%s] is the best.' % (best.name, best.ip))
def CreateReports(self):
"""Create CSV & HTML reports for the latest run."""
if self.options.output_file:
self.report_path = self.options.output_file
else:
self.report_path = util.GenerateOutputFilename(self.options.template)
if self.options.csv_file:
self.csv_path = self.options_csv_file
else:
self.csv_path = util.GenerateOutputFilename('csv')
if self.options.upload_results in (1, True):
# This is for debugging and transparency only.
self.json_path = util.GenerateOutputFilename('js')
self.UpdateStatus('Saving anonymized JSON to %s' % self.json_path)
json_data = self.reporter.CreateJsonData()
f = open(self.json_path, 'w')
f.write(json_data)
f.close()
self.UpdateStatus('Uploading results to %s' % self.options.site_url)
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
self.url, self.share_state = connector.UploadJsonResults(
json_data,
hide_results=self.options.hide_results
)
if self.url:
self.UpdateStatus('Your sharing URL: %s (%s)' % (self.url, self.share_state))
self.UpdateStatus('Saving report to %s' % self.report_path)
f = open(self.report_path, 'w')
self.reporter.CreateReport(format=self.options.template,
output_fp=f,
csv_path=self.csv_path,
sharing_url=self.url,
sharing_state=self.share_state)
f.close()
self.UpdateStatus('Saving detailed results to %s' % self.csv_path)
self.reporter.SaveResultsToCsv(self.csv_path)
def DisplayHtmlReport(self):
self.UpdateStatus('Opening %s' % self.report_path)
better_webbrowser.output = self.DebugMsg
better_webbrowser.open(self.report_path)
|
apache-2.0
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/PIL/ImageMode.py
|
47
|
1306
|
#
# The Python Imaging Library.
# $Id$
#
# standard mode descriptors
#
# History:
# 2006-03-20 fl Added
#
# Copyright (c) 2006 by Secret Labs AB.
# Copyright (c) 2006 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
# mode descriptor cache
_modes = {}
##
# Wrapper for mode strings.
class ModeDescriptor(object):
def __init__(self, mode, bands, basemode, basetype):
self.mode = mode
self.bands = bands
self.basemode = basemode
self.basetype = basetype
def __str__(self):
return self.mode
##
# Gets a mode descriptor for the given mode.
def getmode(mode):
if not _modes:
# initialize mode cache
from PIL import Image
# core modes
for m, (basemode, basetype, bands) in Image._MODEINFO.items():
_modes[m] = ModeDescriptor(m, bands, basemode, basetype)
# extra experimental modes
_modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L")
_modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L")
# mapping modes
_modes["I;16"] = ModeDescriptor("I;16", "I", "L", "L")
_modes["I;16L"] = ModeDescriptor("I;16L", "I", "L", "L")
_modes["I;16B"] = ModeDescriptor("I;16B", "I", "L", "L")
return _modes[mode]
|
bsd-3-clause
|
WhireCrow/openwrt-mt7620
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/OutputWindow.py
|
38
|
4392
|
from Tkinter import *
from idlelib.EditorWindow import EditorWindow
import re
import tkMessageBox
from idlelib import IOBinding
class OutputWindow(EditorWindow):
"""An editor window that can serve as an output file.
Also the future base class for the Python shell window.
This class has no input facilities.
"""
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line)
# Customize EditorWindow
def ispythonsource(self, filename):
# No colorization needed
return 0
def short_title(self):
return "Output"
def maybesave(self):
# Override base class method -- don't ask any questions
if self.get_saved():
return "yes"
else:
return "no"
# Act as output file
def write(self, s, tags=(), mark="insert"):
# Tk assumes that byte strings are Latin-1;
# we assume that they are in the locale's encoding
if isinstance(s, str):
try:
s = unicode(s, IOBinding.encoding)
except UnicodeError:
# some other encoding; let Tcl deal with it
pass
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
# Our own right-button menu
rmenu_specs = [
("Go to file/line", "<<goto-file-line>>"),
]
file_line_pats = [
# order of patterns matters
r'file "([^"]*)", line (\d+)',
r'([^\s]+)\((\d+)\)',
r'^(\s*\S.*?):\s*(\d+):', # Win filename, maybe starting with spaces
r'([^\s]+):\s*(\d+):', # filename or path, ltrim
r'^\s*(\S.*?):\s*(\d+):', # Win abs path with embedded spaces, ltrim
]
file_line_progs = None
def goto_file_line(self, event=None):
if self.file_line_progs is None:
l = []
for pat in self.file_line_pats:
l.append(re.compile(pat, re.IGNORECASE))
self.file_line_progs = l
# x, y = self.event.x, self.event.y
# self.text.mark_set("insert", "@%d,%d" % (x, y))
line = self.text.get("insert linestart", "insert lineend")
result = self._file_line_helper(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = self._file_line_helper(line)
if not result:
tkMessageBox.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
master=self.text)
return
filename, lineno = result
edit = self.flist.open(filename)
edit.gotoline(lineno)
def _file_line_helper(self, line):
for prog in self.file_line_progs:
match = prog.search(line)
if match:
filename, lineno = match.group(1, 2)
try:
f = open(filename, "r")
f.close()
break
except IOError:
continue
else:
return None
try:
return filename, int(lineno)
except TypeError:
return None
# These classes are currently not used but might come in handy
class OnDemandOutputWindow:
tagdefs = {
# XXX Should use IdlePrefs.ColorPrefs
"stdout": {"foreground": "blue"},
"stderr": {"foreground": "#007700"},
}
def __init__(self, flist):
self.flist = flist
self.owin = None
def write(self, s, tags, mark):
if not self.owin:
self.setup()
self.owin.write(s, tags, mark)
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write
|
gpl-2.0
|
lekston/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/embed.py
|
3
|
1467
|
#!/usr/bin/env python
'''
script to create embedded.c from a set of static files
Andrew Tridgell
May 2017
'''
import os
def write_encode(out, s):
out.write(s.encode())
def embed_file(out, f, idx):
'''embed one file'''
contents = open(f,'rb').read()
write_encode(out, 'static const uint8_t ap_romfs_%u[] = {' % idx)
for c in bytearray(contents):
write_encode(out, '%u,' % c)
write_encode(out, '};\n\n');
def create_embedded_h(filename, files):
'''create a ap_romfs_embedded.h file'''
this_dir = os.path.realpath(__file__)
rootdir = os.path.relpath(os.path.join(this_dir, "../../../../.."))
out = open(filename, "wb")
write_encode(out, '''// generated embedded files for AP_ROMFS\n\n''')
for i in range(len(files)):
(name, filename) = files[i]
filename = os.path.join(rootdir, filename)
embed_file(out, filename, i)
write_encode(out, '''const AP_ROMFS::embedded_file AP_ROMFS::files[] = {\n''')
for i in range(len(files)):
(name, filename) = files[i]
print(("Embedding file %s:%s" % (name, filename)).encode())
write_encode(out, '{ "%s", sizeof(ap_romfs_%u), ap_romfs_%u },\n' % (name, i, i))
write_encode(out, '};\n')
out.close()
if __name__ == '__main__':
import sys
flist = []
for i in range(1, len(sys.argv)):
f = sys.argv[i]
flist.append((f, f))
create_embedded_h("/tmp/ap_romfs_embedded.h", flist)
|
gpl-3.0
|
Pulgama/supriya
|
supriya/examples/grey_wash/synthdefs/allpass.py
|
1
|
2011
|
from supriya import synthdefs, ugens
from .. import project_settings
def signal_block_pre(builder, source, state):
source *= ugens.Line.kr(duration=0.1) # protect against clicks
return source
def signal_block(builder, source, state):
allpasses = []
maximum_delay = 1
iterations = state.get("iterations") or 3
for output in source:
for _ in range(iterations):
output = ugens.AllpassC.ar(
decay_time=ugens.LFDNoise3.kr(
frequency=ugens.ExpRand.ir(0.01, 0.1)
).scale(-1, 1, 0.01, 1),
delay_time=ugens.LFDNoise3.kr(
frequency=ugens.ExpRand.ir(0.01, 0.1)
).scale(-1, 1, 0.01, 1, exponential=True),
maximum_delay_time=maximum_delay,
source=output,
)
allpasses.append(output)
source = synthdefs.UGenArray(allpasses)
return source
def signal_block_post(builder, source, state):
source = ugens.LeakDC.ar(source=source)
source *= builder["gain"].db_to_amplitude()
source = ugens.Limiter.ar(duration=ugens.Rand.ir(0.005, 0.015), source=source)
return source
def feedback_loop(builder, source, state):
source = ugens.HPF.ar(source=source, frequency=1000)
source *= ugens.LFNoise1.kr(frequency=0.05).squared().s_curve()
source *= -0.99
return source
factory = (
synthdefs.SynthDefFactory(
channel_count=project_settings["server_options"]["output_bus_channel_count"],
gain=0,
)
.with_input()
.with_signal_block(signal_block_pre)
.with_signal_block(signal_block)
.with_signal_block(signal_block_post)
.with_feedback_loop(feedback_loop)
.with_rand_id()
)
allpass_synthdef = factory.with_output(crossfaded=True).build(name="allpass")
windowed_allpass_synthdef = factory.with_output(
crossfaded=True, leveled=True, windowed=True
).build(name="windowed_allpass")
__all__ = ["allpass_synthdef", "windowed_allpass_synthdef"]
|
mit
|
vbannai/neutron
|
neutron/tests/unit/nec/test_security_group.py
|
7
|
4054
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.api.v2 import attributes
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.plugins.nec.db import api as ndb # noqa
from neutron.tests.unit.nec import test_nec_plugin
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
PLUGIN_NAME = test_nec_plugin.PLUGIN_NAME
OFC_MANAGER = 'neutron.plugins.nec.nec_plugin.ofc_manager.OFCManager'
NOTIFIER = 'neutron.plugins.nec.nec_plugin.NECPluginV2AgentNotifierApi'
class NecSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
def setUp(self, plugin=None):
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
mock.patch(NOTIFIER).start()
mock.patch(OFC_MANAGER).start()
self._attribute_map_bk_ = {}
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
self._attribute_map_bk_[item] = (attributes.
RESOURCE_ATTRIBUTE_MAP[item].
copy())
super(NecSecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
plugin = manager.NeutronManager.get_plugin()
self.notifier = plugin.notifier
self.rpc = plugin.callback_sg
def tearDown(self):
super(NecSecurityGroupsTestCase, self).tearDown()
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_
class TestNecSGServerRpcCallBack(
test_sg_rpc.SGServerRpcCallBackMixinTestCase,
NecSecurityGroupsTestCase):
pass
class TestNecSGServerRpcCallBackXML(
test_sg_rpc.SGServerRpcCallBackMixinTestCaseXML,
NecSecurityGroupsTestCase):
pass
class TestNecSecurityGroups(NecSecurityGroupsTestCase,
test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin):
def test_security_group_get_port_from_device(self):
with contextlib.nested(self.network(),
self.security_group()) as (n, sg):
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
port_id = port['port']['id']
sg_id = sg['security_group']['id']
fixed_ips = port['port']['fixed_ips']
data = {'port': {'fixed_ips': fixed_ips,
'name': port['port']['name'],
ext_sg.SECURITYGROUPS: [sg_id]}}
req = self.new_update_request('ports', data, port_id)
res = self.deserialize(self.fmt,
req.get_response(self.api))
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin.callback_sg.get_port_from_device(port_id)
self.assertEqual(port_id, port_dict['id'])
self.assertEqual([sg_id],
port_dict[ext_sg.SECURITYGROUPS])
self.assertEqual([], port_dict['security_group_rules'])
self.assertEqual([fixed_ips[0]['ip_address']],
port_dict['fixed_ips'])
self._delete('ports', port_id)
class TestNecSecurityGroupsXML(TestNecSecurityGroups):
fmt = 'xml'
|
apache-2.0
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/python/training/evaluation_test.py
|
59
|
6429
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.training.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
_USE_GLOBAL_STEP = 0
def logistic_classifier(inputs):
return layers.dense(inputs, 1, activation=math_ops.sigmoid)
def local_variable(init_value, name):
return variable_scope.get_variable(
name,
dtype=dtypes.float32,
initializer=init_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
class EvaluateOnceTest(test.TestCase):
def setUp(self):
super(EvaluateOnceTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_op = losses.log_loss(labels=tf_labels, predictions=tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = optimizer.minimize(loss_op,
training.get_or_create_global_step())
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)]) as session:
loss = None
while not session.should_stop():
_, loss = session.run([train_op, loss_op])
if num_steps >= 300:
assert loss < .015
def testEvaluatePerfectModel(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_once')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics.accuracy(
predictions=predictions, labels=labels)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[evaluation._StopAfterNEvalsHook(1),])
self.assertTrue(final_ops_values['accuracy'] > .99)
def testEvalOpAndFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
num_evals = 5
final_increment = 9.0
my_var = local_variable(0.0, name='MyVar')
eval_ops = state_ops.assign_add(my_var, 1.0)
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=[evaluation._StopAfterNEvalsHook(num_evals),])
self.assertEqual(final_ops_values['value'], num_evals + final_increment)
def testOnlyFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
final_increment = 9.0
my_var = local_variable(0.0, name='MyVar')
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
self.assertEqual(final_ops_values['value'], final_increment)
if __name__ == '__main__':
test.main()
|
mit
|
buzztroll/cloudinit.d
|
cloudinitd/nosetests/__init__.py
|
2
|
1238
|
import cloudinitd
import os
dir = os.path.dirname(os.path.abspath(cloudinitd.__file__))
dir = os.path.dirname(dir)
g_plans_dir = os.path.join(dir, "tests/plans/")
if 'CLOUDINITD_IAAS_ACCESS_KEY' not in os.environ and 'CLOUDINITD_IAAS_SECRET_KEY' not in os.environ:
os.environ['CLOUDINITD_TESTENV'] = "1"
os.environ['CLOUDINITD_FAB'] = cloudinitd.find_true()
os.environ['CLOUDINITD_SSH'] = cloudinitd.find_true()
os.environ['CLOUDINITD_IAAS_ACCESS_KEY'] = "NOTHING"
os.environ['CLOUDINITD_IAAS_SECRET_KEY'] = "NOTHING"
os.environ['CLOUDINITD_IAAS_ACCESS_KEY'] = "notrealkey"
os.environ['CLOUDINITD_IAAS_SECRET_KEY'] = "notrealkey"
os.environ['CLOUDINITD_IAAS_URL'] = "NOTHING"
os.environ['CLOUDINITD_IAAS_IMAGE'] = "NOTHING"
#os.environ['CLOUDINITD_IAAS_TYPE'] =
os.environ['CLOUDINITD_IAAS_ALLOCATION'] = "NOTHING"
os.environ['CLOUDINITD_IAAS_SSHKEYNAME'] = "NOTHING"
# keep this one if it is set. for localhost tests.
os.environ['CLOUDINITD_IAAS_SSHKEY'] = os.environ.get('CLOUDINITD_IAAS_SSHKEY', "/etc/group")
os.environ['CLOUDINITD_SSH_USERNAME'] = "NOTHING"
def is_a_test():
return 'CLOUDINITD_TESTENV' in os.environ and os.environ['CLOUDINITD_TESTENV'] == "1"
|
apache-2.0
|
bhcopeland/ansible-modules-extras
|
system/alternatives.py
|
29
|
5333
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage symbolic link alternatives.
(c) 2014, Gabe Mulley <[email protected]>
(c) 2015, David Wittman <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
version_added: "1.6"
author:
- "David Wittman (@DavidWittman)"
- "Gabe Mulley (@mulby)"
options:
name:
description:
- The generic name of the link.
required: true
path:
description:
- The path to the real executable that the link should point to.
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is required on RHEL-based distributions
required: false
priority:
description:
- The priority of the alternative
required: false
default: 50
version_added: "2.2"
requirements: [ update-alternatives ]
'''
EXAMPLES = '''
- name: correct java version selected
alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: alternatives link created
alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible
- name: make java 32 bit an alternative with low priority
alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-i386/jre/bin/java priority=-10
'''
import re
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
path = dict(required=True, type='path'),
link = dict(required=False, type='path'),
priority = dict(required=False, type='int',
default=50),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
priority = params['priority']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True)
current_path = None
all_alternatives = []
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, _) = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
)
if rc == 0:
# Alternatives already exist for this link group
# Parse the output to determine the current path of the symlink and
# available alternatives
current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
current_path = current_path_regex.search(display_output).group(1)
all_alternatives = alternative_regex.findall(display_output)
if not link:
# Read the current symlink target from `update-alternatives --query`
# in case we need to install the new alternative before setting it.
#
# This is only compatible on Debian-based systems, as the other
# alternatives don't have --query available
rc, query_output, _ = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
)
if rc == 0:
for line in query_output.splitlines():
if line.startswith('Link:'):
link = line.split()[1]
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
if path not in all_alternatives:
if not link:
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
check_rc=True
)
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError:
e = get_exception()
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
main()
|
gpl-3.0
|
peastman/msmbuilder
|
msmbuilder/preprocessing/__init__.py
|
9
|
3185
|
# Author: Carlos Xavier Hernandez <[email protected]>
# Contributors:
# Copyright (c) 2016, Stanford University and the Authors
# All rights reserved.
from __future__ import print_function, division, absolute_import
from sklearn import preprocessing
from .base import (MultiSequencePreprocessingMixin,
MultiSequenceOnlinePreprocessingMixin)
from .timeseries import Butterworth, EWMA, DoubleEWMA
__all__ = ['Binarizer', 'Butterworth', 'DoubleEWMA', 'EWMA', 'Imputer',
'KernelCenterer', 'LabelBinarizer', 'MultiLabelBinarizer',
'Normalizer', 'PolynomialFeatures']
class Binarizer(MultiSequencePreprocessingMixin, preprocessing.Binarizer):
__doc__ = preprocessing.Binarizer.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing, 'FunctionTransformer'):
__all__.append('FunctionTransformer')
class FunctionTransformer(MultiSequencePreprocessingMixin,
preprocessing.FunctionTransformer):
__doc__ = preprocessing.FunctionTransformer.__doc__
class Imputer(MultiSequencePreprocessingMixin, preprocessing.Imputer):
__doc__ = preprocessing.Imputer.__doc__
class KernelCenterer(MultiSequencePreprocessingMixin,
preprocessing.KernelCenterer):
__doc__ = preprocessing.KernelCenterer.__doc__
class LabelBinarizer(MultiSequencePreprocessingMixin,
preprocessing.LabelBinarizer):
__doc__ = preprocessing.LabelBinarizer.__doc__
class MultiLabelBinarizer(MultiSequencePreprocessingMixin,
preprocessing.MultiLabelBinarizer):
__doc__ = preprocessing.MultiLabelBinarizer.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing.MinMaxScaler, 'partial_fit'):
__all__.append('MinMaxScaler')
class MinMaxScaler(MultiSequenceOnlinePreprocessingMixin,
preprocessing.MinMaxScaler):
__doc__ = preprocessing.MinMaxScaler.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing, 'MaxAbsScaler'):
__all__.append('MaxAbsScaler')
class MaxAbsScaler(MultiSequenceOnlinePreprocessingMixin,
preprocessing.MaxAbsScaler):
__doc__ = preprocessing.MaxAbsScaler.__doc__
class Normalizer(MultiSequencePreprocessingMixin, preprocessing.Normalizer):
__doc__ = preprocessing.Normalizer.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing, 'RobustScaler'):
__all__.append('RobustScaler')
class RobustScaler(MultiSequencePreprocessingMixin,
preprocessing.RobustScaler):
__doc__ = preprocessing.RobustScaler.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing.StandardScaler, 'partial_fit'):
__all__.append('StandardScaler')
class StandardScaler(MultiSequenceOnlinePreprocessingMixin,
preprocessing.StandardScaler):
__doc__ = preprocessing.StandardScaler.__doc__
class PolynomialFeatures(MultiSequencePreprocessingMixin,
preprocessing.PolynomialFeatures):
__doc__ = preprocessing.PolynomialFeatures.__doc__
|
lgpl-2.1
|
vitan/hue
|
desktop/libs/hadoop/gen-py/hadoop/api/hdfs/Namenode.py
|
37
|
155600
|
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import hadoop.api.common.HadoopServiceBase
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(hadoop.api.common.HadoopServiceBase.Iface):
"""
Provides an interface to a Hadoop Namenode. It is basically a Thrift
translation of org.apache.hadoop.hdfs.protocol.ClientProtocol.
"""
def chmod(self, ctx, path, perms):
"""
Set permissions of an existing file or directory.
Parameters:
- ctx
- path: Path of the file or directory.
- perms: New permissions for the file or directory.
"""
pass
def chown(self, ctx, path, owner, group):
"""
Set owner of a file or directory.
If either parameter 'owner' or 'group' is set to null, that
parameter is left unchanged.
Parameters 'owner' and 'group' cannot be both null.
Parameters:
- ctx
- path: Path to the file or directory
- owner: New owner.
- group: New group.
"""
pass
def df(self, ctx):
"""
Return a list containing:
(index 0) The total storage capacity of the file system (in bytes).
(index 1) The total used space of the file system (in bytes).
(index 2) The available storage of the file system (in bytes).
Parameters:
- ctx
"""
pass
def enterSafeMode(self, ctx):
"""
Enter safe mode.
Parameters:
- ctx
"""
pass
def getBlocks(self, ctx, path, offset, length):
"""
Get a list of all blocks containing a region of a file
Parameters:
- ctx
- path: Path to the file.
- offset: Offset of the region.
- length: Length of the region
"""
pass
def getPreferredBlockSize(self, ctx, path):
"""
Get the preferred block size for the given file.
The path must exist, or common.IOException is thrown.
Parameters:
- ctx
- path: Path to the file.
"""
pass
def isInSafeMode(self, ctx):
"""
Returns whether HDFS is in safe mode or not.
Parameters:
- ctx
"""
pass
def leaveSafeMode(self, ctx):
"""
Leave safe mode.
Parameters:
- ctx
"""
pass
def ls(self, ctx, path):
"""
Get a listing of the indicated directory.
Parameters:
- ctx
- path: Path to the directory.
"""
pass
def mkdirhier(self, ctx, path, perms):
"""
Create a directory (or hierarchy of directories).
Returns false if directory did not exist and could not be created,
true otherwise.
Parameters:
- ctx
- path: Path to the directory.
- perms: Access permissions of the directory.
"""
pass
def refreshNodes(self, ctx):
"""
Tells the name node to reread the hosts and exclude files.
Parameters:
- ctx
"""
pass
def rename(self, ctx, path, newPath):
"""
Rename an item in the file system namespace.
Returns true if successful, or
false if the old name does not exist or if the new name already
belongs to the namespace.
Parameters:
- ctx
- path: Path to existing file or directory.
- newPath: New path.
"""
pass
def reportBadBlocks(self, ctx, blocks):
"""
Report corrupted blocks.
Parameters:
- ctx
- blocks: List of corrupted blocks.
"""
pass
def stat(self, ctx, path):
"""
Get information about a path in HDFS.
Return value will be nul if path does not exist.
Parameters:
- ctx
- path: Path of the file or directory.
"""
pass
def getContentSummary(self, ctx, Path):
"""
Get the summary of a directory's contents.
Note that this has runtime linear in the total number of nodes
in the directory tree - this can be expensive for directories
near the top of a big HDFS. Use with care.
Parameters:
- ctx
- Path
"""
pass
def multiGetContentSummary(self, ctx, paths):
"""
Get ContentSummary objects for multiple directories simultaneously. The same warnings
apply as for getContentSummary(...) above.
Parameters:
- ctx
- paths
"""
pass
def setQuota(self, ctx, path, namespaceQuota, diskspaceQuota):
"""
Set the quota for a directory.
Quota parameters may have three types of values:
(1) 0 or more: Quota will be set to that value.
(2) QUOTA_DONT_SET: Quota will not be changed,
(3) QUOTA_RESET: Quota will be reset.
Any other value is a runtime error.
Parameters:
- ctx
- path: Path of the directory.
- namespaceQuota: Limit on the number of names in the directory.
- diskspaceQuota: Limit on disk space occupied by all the files in the
directory.
"""
pass
def setReplication(self, ctx, path, replication):
"""
Set replication factor for an existing file.
This call just updates the value of the replication factor. The actual
block replication is not expected to be performed during this method call.
The blocks will be populated or removed in the background as the result of
the routine block maintenance procedures.
Returns true if successful, false if file does not exist or is a
directory.
Parameters:
- ctx
- path: Path of the file.
- replication: New replication factor.
"""
pass
def unlink(self, ctx, path, recursive):
"""
Delete a file or directory from the file system.
Any blocks belonging to the deleted files will be garbage-collected.
Parameters:
- ctx
- path: Path of the file or directory.
- recursive: Delete a non-empty directory recursively.
"""
pass
def utime(self, ctx, path, atime, mtime):
"""
Sets the modification and access time of a file or directory.
Setting *one single time paramater* to -1 means that time parameter
must not be set by this call.
Setting *both time parameters* to -1 means both of them must be set to
the current time.
Parameters:
- ctx
- path: Path of the file or directory.
- atime: Access time in milliseconds since 1970-01-01 00:00 UTC
- mtime: Modification time in milliseconds since 1970-01-01 00:00 UTC
"""
pass
def datanodeUp(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has started.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
pass
def datanodeDown(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has stopped.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
pass
def getDelegationToken(self, ctx, renewer):
"""
Get an HDFS delegation token.
Parameters:
- ctx
- renewer
"""
pass
class Client(hadoop.api.common.HadoopServiceBase.Client, Iface):
"""
Provides an interface to a Hadoop Namenode. It is basically a Thrift
translation of org.apache.hadoop.hdfs.protocol.ClientProtocol.
"""
def __init__(self, iprot, oprot=None):
hadoop.api.common.HadoopServiceBase.Client.__init__(self, iprot, oprot)
def chmod(self, ctx, path, perms):
"""
Set permissions of an existing file or directory.
Parameters:
- ctx
- path: Path of the file or directory.
- perms: New permissions for the file or directory.
"""
self.send_chmod(ctx, path, perms)
self.recv_chmod()
def send_chmod(self, ctx, path, perms):
self._oprot.writeMessageBegin('chmod', TMessageType.CALL, self._seqid)
args = chmod_args()
args.ctx = ctx
args.path = path
args.perms = perms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chmod(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chmod_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def chown(self, ctx, path, owner, group):
"""
Set owner of a file or directory.
If either parameter 'owner' or 'group' is set to null, that
parameter is left unchanged.
Parameters 'owner' and 'group' cannot be both null.
Parameters:
- ctx
- path: Path to the file or directory
- owner: New owner.
- group: New group.
"""
self.send_chown(ctx, path, owner, group)
self.recv_chown()
def send_chown(self, ctx, path, owner, group):
self._oprot.writeMessageBegin('chown', TMessageType.CALL, self._seqid)
args = chown_args()
args.ctx = ctx
args.path = path
args.owner = owner
args.group = group
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def df(self, ctx):
"""
Return a list containing:
(index 0) The total storage capacity of the file system (in bytes).
(index 1) The total used space of the file system (in bytes).
(index 2) The available storage of the file system (in bytes).
Parameters:
- ctx
"""
self.send_df(ctx)
return self.recv_df()
def send_df(self, ctx):
self._oprot.writeMessageBegin('df', TMessageType.CALL, self._seqid)
args = df_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_df(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = df_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "df failed: unknown result");
def enterSafeMode(self, ctx):
"""
Enter safe mode.
Parameters:
- ctx
"""
self.send_enterSafeMode(ctx)
self.recv_enterSafeMode()
def send_enterSafeMode(self, ctx):
self._oprot.writeMessageBegin('enterSafeMode', TMessageType.CALL, self._seqid)
args = enterSafeMode_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_enterSafeMode(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = enterSafeMode_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def getBlocks(self, ctx, path, offset, length):
"""
Get a list of all blocks containing a region of a file
Parameters:
- ctx
- path: Path to the file.
- offset: Offset of the region.
- length: Length of the region
"""
self.send_getBlocks(ctx, path, offset, length)
return self.recv_getBlocks()
def send_getBlocks(self, ctx, path, offset, length):
self._oprot.writeMessageBegin('getBlocks', TMessageType.CALL, self._seqid)
args = getBlocks_args()
args.ctx = ctx
args.path = path
args.offset = offset
args.length = length
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBlocks(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getBlocks_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBlocks failed: unknown result");
def getPreferredBlockSize(self, ctx, path):
"""
Get the preferred block size for the given file.
The path must exist, or common.IOException is thrown.
Parameters:
- ctx
- path: Path to the file.
"""
self.send_getPreferredBlockSize(ctx, path)
return self.recv_getPreferredBlockSize()
def send_getPreferredBlockSize(self, ctx, path):
self._oprot.writeMessageBegin('getPreferredBlockSize', TMessageType.CALL, self._seqid)
args = getPreferredBlockSize_args()
args.ctx = ctx
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getPreferredBlockSize(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getPreferredBlockSize_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getPreferredBlockSize failed: unknown result");
def isInSafeMode(self, ctx):
"""
Returns whether HDFS is in safe mode or not.
Parameters:
- ctx
"""
self.send_isInSafeMode(ctx)
return self.recv_isInSafeMode()
def send_isInSafeMode(self, ctx):
self._oprot.writeMessageBegin('isInSafeMode', TMessageType.CALL, self._seqid)
args = isInSafeMode_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isInSafeMode(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = isInSafeMode_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "isInSafeMode failed: unknown result");
def leaveSafeMode(self, ctx):
"""
Leave safe mode.
Parameters:
- ctx
"""
self.send_leaveSafeMode(ctx)
self.recv_leaveSafeMode()
def send_leaveSafeMode(self, ctx):
self._oprot.writeMessageBegin('leaveSafeMode', TMessageType.CALL, self._seqid)
args = leaveSafeMode_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_leaveSafeMode(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = leaveSafeMode_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def ls(self, ctx, path):
"""
Get a listing of the indicated directory.
Parameters:
- ctx
- path: Path to the directory.
"""
self.send_ls(ctx, path)
return self.recv_ls()
def send_ls(self, ctx, path):
self._oprot.writeMessageBegin('ls', TMessageType.CALL, self._seqid)
args = ls_args()
args.ctx = ctx
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ls(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = ls_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "ls failed: unknown result");
def mkdirhier(self, ctx, path, perms):
"""
Create a directory (or hierarchy of directories).
Returns false if directory did not exist and could not be created,
true otherwise.
Parameters:
- ctx
- path: Path to the directory.
- perms: Access permissions of the directory.
"""
self.send_mkdirhier(ctx, path, perms)
return self.recv_mkdirhier()
def send_mkdirhier(self, ctx, path, perms):
self._oprot.writeMessageBegin('mkdirhier', TMessageType.CALL, self._seqid)
args = mkdirhier_args()
args.ctx = ctx
args.path = path
args.perms = perms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mkdirhier(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mkdirhier_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "mkdirhier failed: unknown result");
def refreshNodes(self, ctx):
"""
Tells the name node to reread the hosts and exclude files.
Parameters:
- ctx
"""
self.send_refreshNodes(ctx)
self.recv_refreshNodes()
def send_refreshNodes(self, ctx):
self._oprot.writeMessageBegin('refreshNodes', TMessageType.CALL, self._seqid)
args = refreshNodes_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_refreshNodes(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = refreshNodes_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def rename(self, ctx, path, newPath):
"""
Rename an item in the file system namespace.
Returns true if successful, or
false if the old name does not exist or if the new name already
belongs to the namespace.
Parameters:
- ctx
- path: Path to existing file or directory.
- newPath: New path.
"""
self.send_rename(ctx, path, newPath)
return self.recv_rename()
def send_rename(self, ctx, path, newPath):
self._oprot.writeMessageBegin('rename', TMessageType.CALL, self._seqid)
args = rename_args()
args.ctx = ctx
args.path = path
args.newPath = newPath
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rename(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rename_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "rename failed: unknown result");
def reportBadBlocks(self, ctx, blocks):
"""
Report corrupted blocks.
Parameters:
- ctx
- blocks: List of corrupted blocks.
"""
self.send_reportBadBlocks(ctx, blocks)
self.recv_reportBadBlocks()
def send_reportBadBlocks(self, ctx, blocks):
self._oprot.writeMessageBegin('reportBadBlocks', TMessageType.CALL, self._seqid)
args = reportBadBlocks_args()
args.ctx = ctx
args.blocks = blocks
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_reportBadBlocks(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = reportBadBlocks_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def stat(self, ctx, path):
"""
Get information about a path in HDFS.
Return value will be nul if path does not exist.
Parameters:
- ctx
- path: Path of the file or directory.
"""
self.send_stat(ctx, path)
return self.recv_stat()
def send_stat(self, ctx, path):
self._oprot.writeMessageBegin('stat', TMessageType.CALL, self._seqid)
args = stat_args()
args.ctx = ctx
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_stat(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = stat_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "stat failed: unknown result");
def getContentSummary(self, ctx, Path):
"""
Get the summary of a directory's contents.
Note that this has runtime linear in the total number of nodes
in the directory tree - this can be expensive for directories
near the top of a big HDFS. Use with care.
Parameters:
- ctx
- Path
"""
self.send_getContentSummary(ctx, Path)
return self.recv_getContentSummary()
def send_getContentSummary(self, ctx, Path):
self._oprot.writeMessageBegin('getContentSummary', TMessageType.CALL, self._seqid)
args = getContentSummary_args()
args.ctx = ctx
args.Path = Path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getContentSummary(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getContentSummary_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getContentSummary failed: unknown result");
def multiGetContentSummary(self, ctx, paths):
"""
Get ContentSummary objects for multiple directories simultaneously. The same warnings
apply as for getContentSummary(...) above.
Parameters:
- ctx
- paths
"""
self.send_multiGetContentSummary(ctx, paths)
return self.recv_multiGetContentSummary()
def send_multiGetContentSummary(self, ctx, paths):
self._oprot.writeMessageBegin('multiGetContentSummary', TMessageType.CALL, self._seqid)
args = multiGetContentSummary_args()
args.ctx = ctx
args.paths = paths
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_multiGetContentSummary(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = multiGetContentSummary_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "multiGetContentSummary failed: unknown result");
def setQuota(self, ctx, path, namespaceQuota, diskspaceQuota):
"""
Set the quota for a directory.
Quota parameters may have three types of values:
(1) 0 or more: Quota will be set to that value.
(2) QUOTA_DONT_SET: Quota will not be changed,
(3) QUOTA_RESET: Quota will be reset.
Any other value is a runtime error.
Parameters:
- ctx
- path: Path of the directory.
- namespaceQuota: Limit on the number of names in the directory.
- diskspaceQuota: Limit on disk space occupied by all the files in the
directory.
"""
self.send_setQuota(ctx, path, namespaceQuota, diskspaceQuota)
self.recv_setQuota()
def send_setQuota(self, ctx, path, namespaceQuota, diskspaceQuota):
self._oprot.writeMessageBegin('setQuota', TMessageType.CALL, self._seqid)
args = setQuota_args()
args.ctx = ctx
args.path = path
args.namespaceQuota = namespaceQuota
args.diskspaceQuota = diskspaceQuota
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setQuota(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setQuota_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def setReplication(self, ctx, path, replication):
"""
Set replication factor for an existing file.
This call just updates the value of the replication factor. The actual
block replication is not expected to be performed during this method call.
The blocks will be populated or removed in the background as the result of
the routine block maintenance procedures.
Returns true if successful, false if file does not exist or is a
directory.
Parameters:
- ctx
- path: Path of the file.
- replication: New replication factor.
"""
self.send_setReplication(ctx, path, replication)
return self.recv_setReplication()
def send_setReplication(self, ctx, path, replication):
self._oprot.writeMessageBegin('setReplication', TMessageType.CALL, self._seqid)
args = setReplication_args()
args.ctx = ctx
args.path = path
args.replication = replication
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setReplication(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setReplication_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "setReplication failed: unknown result");
def unlink(self, ctx, path, recursive):
"""
Delete a file or directory from the file system.
Any blocks belonging to the deleted files will be garbage-collected.
Parameters:
- ctx
- path: Path of the file or directory.
- recursive: Delete a non-empty directory recursively.
"""
self.send_unlink(ctx, path, recursive)
return self.recv_unlink()
def send_unlink(self, ctx, path, recursive):
self._oprot.writeMessageBegin('unlink', TMessageType.CALL, self._seqid)
args = unlink_args()
args.ctx = ctx
args.path = path
args.recursive = recursive
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_unlink(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = unlink_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "unlink failed: unknown result");
def utime(self, ctx, path, atime, mtime):
"""
Sets the modification and access time of a file or directory.
Setting *one single time paramater* to -1 means that time parameter
must not be set by this call.
Setting *both time parameters* to -1 means both of them must be set to
the current time.
Parameters:
- ctx
- path: Path of the file or directory.
- atime: Access time in milliseconds since 1970-01-01 00:00 UTC
- mtime: Modification time in milliseconds since 1970-01-01 00:00 UTC
"""
self.send_utime(ctx, path, atime, mtime)
self.recv_utime()
def send_utime(self, ctx, path, atime, mtime):
self._oprot.writeMessageBegin('utime', TMessageType.CALL, self._seqid)
args = utime_args()
args.ctx = ctx
args.path = path
args.atime = atime
args.mtime = mtime
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_utime(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = utime_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def datanodeUp(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has started.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
self.send_datanodeUp(name, storage, thriftPort)
self.recv_datanodeUp()
def send_datanodeUp(self, name, storage, thriftPort):
self._oprot.writeMessageBegin('datanodeUp', TMessageType.CALL, self._seqid)
args = datanodeUp_args()
args.name = name
args.storage = storage
args.thriftPort = thriftPort
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_datanodeUp(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = datanodeUp_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def datanodeDown(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has stopped.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
self.send_datanodeDown(name, storage, thriftPort)
self.recv_datanodeDown()
def send_datanodeDown(self, name, storage, thriftPort):
self._oprot.writeMessageBegin('datanodeDown', TMessageType.CALL, self._seqid)
args = datanodeDown_args()
args.name = name
args.storage = storage
args.thriftPort = thriftPort
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_datanodeDown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = datanodeDown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def getDelegationToken(self, ctx, renewer):
"""
Get an HDFS delegation token.
Parameters:
- ctx
- renewer
"""
self.send_getDelegationToken(ctx, renewer)
return self.recv_getDelegationToken()
def send_getDelegationToken(self, ctx, renewer):
self._oprot.writeMessageBegin('getDelegationToken', TMessageType.CALL, self._seqid)
args = getDelegationToken_args()
args.ctx = ctx
args.renewer = renewer
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getDelegationToken(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getDelegationToken_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getDelegationToken failed: unknown result");
class Processor(hadoop.api.common.HadoopServiceBase.Processor, Iface, TProcessor):
def __init__(self, handler):
hadoop.api.common.HadoopServiceBase.Processor.__init__(self, handler)
self._processMap["chmod"] = Processor.process_chmod
self._processMap["chown"] = Processor.process_chown
self._processMap["df"] = Processor.process_df
self._processMap["enterSafeMode"] = Processor.process_enterSafeMode
self._processMap["getBlocks"] = Processor.process_getBlocks
self._processMap["getPreferredBlockSize"] = Processor.process_getPreferredBlockSize
self._processMap["isInSafeMode"] = Processor.process_isInSafeMode
self._processMap["leaveSafeMode"] = Processor.process_leaveSafeMode
self._processMap["ls"] = Processor.process_ls
self._processMap["mkdirhier"] = Processor.process_mkdirhier
self._processMap["refreshNodes"] = Processor.process_refreshNodes
self._processMap["rename"] = Processor.process_rename
self._processMap["reportBadBlocks"] = Processor.process_reportBadBlocks
self._processMap["stat"] = Processor.process_stat
self._processMap["getContentSummary"] = Processor.process_getContentSummary
self._processMap["multiGetContentSummary"] = Processor.process_multiGetContentSummary
self._processMap["setQuota"] = Processor.process_setQuota
self._processMap["setReplication"] = Processor.process_setReplication
self._processMap["unlink"] = Processor.process_unlink
self._processMap["utime"] = Processor.process_utime
self._processMap["datanodeUp"] = Processor.process_datanodeUp
self._processMap["datanodeDown"] = Processor.process_datanodeDown
self._processMap["getDelegationToken"] = Processor.process_getDelegationToken
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_chmod(self, seqid, iprot, oprot):
args = chmod_args()
args.read(iprot)
iprot.readMessageEnd()
result = chmod_result()
try:
self._handler.chmod(args.ctx, args.path, args.perms)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("chmod", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_chown(self, seqid, iprot, oprot):
args = chown_args()
args.read(iprot)
iprot.readMessageEnd()
result = chown_result()
try:
self._handler.chown(args.ctx, args.path, args.owner, args.group)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("chown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_df(self, seqid, iprot, oprot):
args = df_args()
args.read(iprot)
iprot.readMessageEnd()
result = df_result()
result.success = self._handler.df(args.ctx)
oprot.writeMessageBegin("df", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_enterSafeMode(self, seqid, iprot, oprot):
args = enterSafeMode_args()
args.read(iprot)
iprot.readMessageEnd()
result = enterSafeMode_result()
try:
self._handler.enterSafeMode(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("enterSafeMode", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getBlocks(self, seqid, iprot, oprot):
args = getBlocks_args()
args.read(iprot)
iprot.readMessageEnd()
result = getBlocks_result()
try:
result.success = self._handler.getBlocks(args.ctx, args.path, args.offset, args.length)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getBlocks", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getPreferredBlockSize(self, seqid, iprot, oprot):
args = getPreferredBlockSize_args()
args.read(iprot)
iprot.readMessageEnd()
result = getPreferredBlockSize_result()
try:
result.success = self._handler.getPreferredBlockSize(args.ctx, args.path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getPreferredBlockSize", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isInSafeMode(self, seqid, iprot, oprot):
args = isInSafeMode_args()
args.read(iprot)
iprot.readMessageEnd()
result = isInSafeMode_result()
try:
result.success = self._handler.isInSafeMode(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("isInSafeMode", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_leaveSafeMode(self, seqid, iprot, oprot):
args = leaveSafeMode_args()
args.read(iprot)
iprot.readMessageEnd()
result = leaveSafeMode_result()
try:
self._handler.leaveSafeMode(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("leaveSafeMode", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ls(self, seqid, iprot, oprot):
args = ls_args()
args.read(iprot)
iprot.readMessageEnd()
result = ls_result()
try:
result.success = self._handler.ls(args.ctx, args.path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("ls", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mkdirhier(self, seqid, iprot, oprot):
args = mkdirhier_args()
args.read(iprot)
iprot.readMessageEnd()
result = mkdirhier_result()
try:
result.success = self._handler.mkdirhier(args.ctx, args.path, args.perms)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("mkdirhier", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_refreshNodes(self, seqid, iprot, oprot):
args = refreshNodes_args()
args.read(iprot)
iprot.readMessageEnd()
result = refreshNodes_result()
try:
self._handler.refreshNodes(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("refreshNodes", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rename(self, seqid, iprot, oprot):
args = rename_args()
args.read(iprot)
iprot.readMessageEnd()
result = rename_result()
try:
result.success = self._handler.rename(args.ctx, args.path, args.newPath)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("rename", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reportBadBlocks(self, seqid, iprot, oprot):
args = reportBadBlocks_args()
args.read(iprot)
iprot.readMessageEnd()
result = reportBadBlocks_result()
try:
self._handler.reportBadBlocks(args.ctx, args.blocks)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("reportBadBlocks", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_stat(self, seqid, iprot, oprot):
args = stat_args()
args.read(iprot)
iprot.readMessageEnd()
result = stat_result()
try:
result.success = self._handler.stat(args.ctx, args.path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("stat", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getContentSummary(self, seqid, iprot, oprot):
args = getContentSummary_args()
args.read(iprot)
iprot.readMessageEnd()
result = getContentSummary_result()
try:
result.success = self._handler.getContentSummary(args.ctx, args.Path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getContentSummary", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_multiGetContentSummary(self, seqid, iprot, oprot):
args = multiGetContentSummary_args()
args.read(iprot)
iprot.readMessageEnd()
result = multiGetContentSummary_result()
try:
result.success = self._handler.multiGetContentSummary(args.ctx, args.paths)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("multiGetContentSummary", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setQuota(self, seqid, iprot, oprot):
args = setQuota_args()
args.read(iprot)
iprot.readMessageEnd()
result = setQuota_result()
try:
self._handler.setQuota(args.ctx, args.path, args.namespaceQuota, args.diskspaceQuota)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("setQuota", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setReplication(self, seqid, iprot, oprot):
args = setReplication_args()
args.read(iprot)
iprot.readMessageEnd()
result = setReplication_result()
try:
result.success = self._handler.setReplication(args.ctx, args.path, args.replication)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("setReplication", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_unlink(self, seqid, iprot, oprot):
args = unlink_args()
args.read(iprot)
iprot.readMessageEnd()
result = unlink_result()
try:
result.success = self._handler.unlink(args.ctx, args.path, args.recursive)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("unlink", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_utime(self, seqid, iprot, oprot):
args = utime_args()
args.read(iprot)
iprot.readMessageEnd()
result = utime_result()
try:
self._handler.utime(args.ctx, args.path, args.atime, args.mtime)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("utime", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_datanodeUp(self, seqid, iprot, oprot):
args = datanodeUp_args()
args.read(iprot)
iprot.readMessageEnd()
result = datanodeUp_result()
self._handler.datanodeUp(args.name, args.storage, args.thriftPort)
oprot.writeMessageBegin("datanodeUp", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_datanodeDown(self, seqid, iprot, oprot):
args = datanodeDown_args()
args.read(iprot)
iprot.readMessageEnd()
result = datanodeDown_result()
self._handler.datanodeDown(args.name, args.storage, args.thriftPort)
oprot.writeMessageBegin("datanodeDown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getDelegationToken(self, seqid, iprot, oprot):
args = getDelegationToken_args()
args.read(iprot)
iprot.readMessageEnd()
result = getDelegationToken_result()
try:
result.success = self._handler.getDelegationToken(args.ctx, args.renewer)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getDelegationToken", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class chmod_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
- perms: New permissions for the file or directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I16, 'perms', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, perms=None,):
self.ctx = ctx
self.path = path
self.perms = perms
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.perms = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chmod_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.perms is not None:
oprot.writeFieldBegin('perms', TType.I16, 2)
oprot.writeI16(self.perms)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chmod_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chmod_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chown_args(object):
"""
Attributes:
- ctx
- path: Path to the file or directory
- owner: New owner.
- group: New group.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.STRING, 'owner', None, None, ), # 2
(3, TType.STRING, 'group', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, owner=None, group=None,):
self.ctx = ctx
self.path = path
self.owner = owner
self.group = group
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.owner = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.group = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chown_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 2)
oprot.writeString(self.owner)
oprot.writeFieldEnd()
if self.group is not None:
oprot.writeFieldBegin('group', TType.STRING, 3)
oprot.writeString(self.group)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chown_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chown_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class df_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('df_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class df_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.I64,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readI64();
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('df_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.I64, len(self.success))
for iter13 in self.success:
oprot.writeI64(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class enterSafeMode_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enterSafeMode_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class enterSafeMode_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enterSafeMode_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlocks_args(object):
"""
Attributes:
- ctx
- path: Path to the file.
- offset: Offset of the region.
- length: Length of the region
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I64, 'offset', None, None, ), # 2
(3, TType.I64, 'length', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, offset=None, length=None,):
self.ctx = ctx
self.path = path
self.offset = offset
self.length = length
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.offset = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.length = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlocks_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.offset is not None:
oprot.writeFieldBegin('offset', TType.I64, 2)
oprot.writeI64(self.offset)
oprot.writeFieldEnd()
if self.length is not None:
oprot.writeFieldBegin('length', TType.I64, 3)
oprot.writeI64(self.length)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlocks_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Block, Block.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = Block()
_elem19.read(iprot)
self.success.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlocks_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter20 in self.success:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getPreferredBlockSize_args(object):
"""
Attributes:
- ctx
- path: Path to the file.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None,):
self.ctx = ctx
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getPreferredBlockSize_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getPreferredBlockSize_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getPreferredBlockSize_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isInSafeMode_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isInSafeMode_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isInSafeMode_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isInSafeMode_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class leaveSafeMode_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('leaveSafeMode_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class leaveSafeMode_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('leaveSafeMode_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ls_args(object):
"""
Attributes:
- ctx
- path: Path to the directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None,):
self.ctx = ctx
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ls_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ls_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Stat, Stat.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in xrange(_size21):
_elem26 = Stat()
_elem26.read(iprot)
self.success.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ls_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter27 in self.success:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mkdirhier_args(object):
"""
Attributes:
- ctx
- path: Path to the directory.
- perms: Access permissions of the directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I16, 'perms', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, perms=None,):
self.ctx = ctx
self.path = path
self.perms = perms
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.perms = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mkdirhier_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.perms is not None:
oprot.writeFieldBegin('perms', TType.I16, 2)
oprot.writeI16(self.perms)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mkdirhier_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mkdirhier_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refreshNodes_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refreshNodes_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refreshNodes_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refreshNodes_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_args(object):
"""
Attributes:
- ctx
- path: Path to existing file or directory.
- newPath: New path.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.STRING, 'newPath', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, newPath=None,):
self.ctx = ctx
self.path = path
self.newPath = newPath
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.newPath = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.newPath is not None:
oprot.writeFieldBegin('newPath', TType.STRING, 2)
oprot.writeString(self.newPath)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reportBadBlocks_args(object):
"""
Attributes:
- ctx
- blocks: List of corrupted blocks.
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'blocks', (TType.STRUCT,(Block, Block.thrift_spec)), None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, blocks=None,):
self.ctx = ctx
self.blocks = blocks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.LIST:
self.blocks = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in xrange(_size28):
_elem33 = Block()
_elem33.read(iprot)
self.blocks.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reportBadBlocks_args')
if self.blocks is not None:
oprot.writeFieldBegin('blocks', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.blocks))
for iter34 in self.blocks:
iter34.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reportBadBlocks_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reportBadBlocks_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stat_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None,):
self.ctx = ctx
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stat_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stat_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (Stat, Stat.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Stat()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stat_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getContentSummary_args(object):
"""
Attributes:
- ctx
- Path
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'Path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, Path=None,):
self.ctx = ctx
self.Path = Path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.Path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getContentSummary_args')
if self.Path is not None:
oprot.writeFieldBegin('Path', TType.STRING, 1)
oprot.writeString(self.Path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getContentSummary_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ContentSummary, ContentSummary.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ContentSummary()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getContentSummary_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiGetContentSummary_args(object):
"""
Attributes:
- ctx
- paths
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'paths', (TType.STRING,None), None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, paths=None,):
self.ctx = ctx
self.paths = paths
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.LIST:
self.paths = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in xrange(_size35):
_elem40 = iprot.readString();
self.paths.append(_elem40)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiGetContentSummary_args')
if self.paths is not None:
oprot.writeFieldBegin('paths', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.paths))
for iter41 in self.paths:
oprot.writeString(iter41)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiGetContentSummary_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(ContentSummary, ContentSummary.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype45, _size42) = iprot.readListBegin()
for _i46 in xrange(_size42):
_elem47 = ContentSummary()
_elem47.read(iprot)
self.success.append(_elem47)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiGetContentSummary_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter48 in self.success:
iter48.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setQuota_args(object):
"""
Attributes:
- ctx
- path: Path of the directory.
- namespaceQuota: Limit on the number of names in the directory.
- diskspaceQuota: Limit on disk space occupied by all the files in the
directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I64, 'namespaceQuota', None, None, ), # 2
(3, TType.I64, 'diskspaceQuota', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, namespaceQuota=None, diskspaceQuota=None,):
self.ctx = ctx
self.path = path
self.namespaceQuota = namespaceQuota
self.diskspaceQuota = diskspaceQuota
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.namespaceQuota = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.diskspaceQuota = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setQuota_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.namespaceQuota is not None:
oprot.writeFieldBegin('namespaceQuota', TType.I64, 2)
oprot.writeI64(self.namespaceQuota)
oprot.writeFieldEnd()
if self.diskspaceQuota is not None:
oprot.writeFieldBegin('diskspaceQuota', TType.I64, 3)
oprot.writeI64(self.diskspaceQuota)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setQuota_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setQuota_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setReplication_args(object):
"""
Attributes:
- ctx
- path: Path of the file.
- replication: New replication factor.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I16, 'replication', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, replication=None,):
self.ctx = ctx
self.path = path
self.replication = replication
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.replication = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplication_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.replication is not None:
oprot.writeFieldBegin('replication', TType.I16, 2)
oprot.writeI16(self.replication)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setReplication_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplication_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class unlink_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
- recursive: Delete a non-empty directory recursively.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.BOOL, 'recursive', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, recursive=None,):
self.ctx = ctx
self.path = path
self.recursive = recursive
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.recursive = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('unlink_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.recursive is not None:
oprot.writeFieldBegin('recursive', TType.BOOL, 2)
oprot.writeBool(self.recursive)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class unlink_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('unlink_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class utime_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
- atime: Access time in milliseconds since 1970-01-01 00:00 UTC
- mtime: Modification time in milliseconds since 1970-01-01 00:00 UTC
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I64, 'atime', None, None, ), # 2
(3, TType.I64, 'mtime', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, atime=None, mtime=None,):
self.ctx = ctx
self.path = path
self.atime = atime
self.mtime = mtime
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.atime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.mtime = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('utime_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.atime is not None:
oprot.writeFieldBegin('atime', TType.I64, 2)
oprot.writeI64(self.atime)
oprot.writeFieldEnd()
if self.mtime is not None:
oprot.writeFieldBegin('mtime', TType.I64, 3)
oprot.writeI64(self.mtime)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class utime_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('utime_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeUp_args(object):
"""
Attributes:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'storage', None, None, ), # 2
(3, TType.I32, 'thriftPort', None, None, ), # 3
)
def __init__(self, name=None, storage=None, thriftPort=None,):
self.name = name
self.storage = storage
self.thriftPort = thriftPort
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.storage = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.thriftPort = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeUp_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.storage is not None:
oprot.writeFieldBegin('storage', TType.STRING, 2)
oprot.writeString(self.storage)
oprot.writeFieldEnd()
if self.thriftPort is not None:
oprot.writeFieldBegin('thriftPort', TType.I32, 3)
oprot.writeI32(self.thriftPort)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeUp_result(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeUp_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeDown_args(object):
"""
Attributes:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'storage', None, None, ), # 2
(3, TType.I32, 'thriftPort', None, None, ), # 3
)
def __init__(self, name=None, storage=None, thriftPort=None,):
self.name = name
self.storage = storage
self.thriftPort = thriftPort
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.storage = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.thriftPort = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeDown_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.storage is not None:
oprot.writeFieldBegin('storage', TType.STRING, 2)
oprot.writeString(self.storage)
oprot.writeFieldEnd()
if self.thriftPort is not None:
oprot.writeFieldBegin('thriftPort', TType.I32, 3)
oprot.writeI32(self.thriftPort)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeDown_result(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeDown_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDelegationToken_args(object):
"""
Attributes:
- ctx
- renewer
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'renewer', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, renewer=None,):
self.ctx = ctx
self.renewer = renewer
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.renewer = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDelegationToken_args')
if self.renewer is not None:
oprot.writeFieldBegin('renewer', TType.STRING, 1)
oprot.writeString(self.renewer)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDelegationToken_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (hadoop.api.common.ttypes.ThriftDelegationToken, hadoop.api.common.ttypes.ThriftDelegationToken.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = hadoop.api.common.ttypes.ThriftDelegationToken()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDelegationToken_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
apache-2.0
|
willthames/ansible
|
lib/ansible/utils/color.py
|
32
|
3891
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from ansible import constants as C
ANSIBLE_COLOR = True
if C.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR = False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR = False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR = False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR = False
if C.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR = True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': u'0;30', 'bright gray': u'0;37',
'blue': u'0;34', 'white': u'1;37',
'green': u'0;32', 'bright blue': u'1;34',
'cyan': u'0;36', 'bright green': u'1;32',
'red': u'0;31', 'bright cyan': u'1;36',
'purple': u'0;35', 'bright red': u'1;31',
'yellow': u'0;33', 'bright purple': u'1;35',
'dark gray': u'1;30', 'bright yellow': u'1;33',
'magenta': u'0;35', 'bright magenta': u'1;35',
'normal': u'0',
}
def parsecolor(color):
"""SGR parameter string for the specified color name."""
matches = re.match(r"color(?P<color>[0-9]+)"
r"|(?P<rgb>rgb(?P<red>[0-5])(?P<green>[0-5])(?P<blue>[0-5]))"
r"|gray(?P<gray>[0-9]+)", color)
if not matches:
return codeCodes[color]
if matches.group('color'):
return u'38;5;%d' % int(matches.group('color'))
if matches.group('rgb'):
return u'38;5;%d' % (16 + 36 * int(matches.group('red')) +
6 * int(matches.group('green')) +
int(matches.group('blue')))
if matches.group('gray'):
return u'38;5;%d' % (232 + int(matches.group('gray')))
def stringc(text, color):
"""String in color."""
if ANSIBLE_COLOR:
color_code = parsecolor(color)
return "\n".join([u"\033[%sm%s\033[0m" % (color_code, t) for t in text.split('\n')])
else:
return text
def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
s = u"%s=%-4s" % (lead, str(num))
if num != 0 and ANSIBLE_COLOR and color is not None:
s = stringc(s, color)
return s
def hostcolor(host, stats, color=True):
if ANSIBLE_COLOR and color:
if stats['failures'] != 0 or stats['unreachable'] != 0:
return u"%-37s" % stringc(host, C.COLOR_ERROR)
elif stats['changed'] != 0:
return u"%-37s" % stringc(host, C.COLOR_CHANGED)
else:
return u"%-37s" % stringc(host, C.COLOR_OK)
return u"%-26s" % host
|
gpl-3.0
|
rdio/sentry
|
src/sentry/migrations/0103_ensure_non_empty_slugs.py
|
1
|
26406
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.constants import RESERVED_TEAM_SLUGS
from sentry.models import slugify_instance
for team in orm['sentry.Team'].objects.filter(models.Q(slug='') | models.Q(slug__isnull=True)):
slugify_instance(team, team.name, reserved=RESERVED_TEAM_SLUGS)
team.save()
for project in orm['sentry.Project'].objects.filter(models.Q(slug='') | models.Q(slug__isnull=True)):
slugify_instance(project, project.name, reserved=RESERVED_TEAM_SLUGS)
project.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
|
bsd-3-clause
|
damdam-s/hr
|
__unported__/hr_policy_presence/hr_policy_presence.py
|
27
|
3008
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import fields, orm
class policy_presence(orm.Model):
_name = 'hr.policy.presence'
_columns = {
'name': fields.char('Name', size=128, required=True),
'date': fields.date('Effective Date', required=True),
'work_days_per_month': fields.integer(
'Working Days/Month', required=True),
'line_ids': fields.one2many(
'hr.policy.line.presence', 'policy_id', 'Policy Lines'),
}
_defaults = {
'work_days_per_month': 26,
}
# Return records with latest date first
_order = 'date desc'
def get_codes(self, cr, uid, idx, context=None):
res = []
[res.append(
(line.code, line.name, line.type, line.rate, line.duration))
for line in self.browse(cr, uid, idx, context=context).line_ids]
return res
class policy_line_presence(orm.Model):
_name = 'hr.policy.line.presence'
_columns = {
'name': fields.char('Name', size=64, required=True),
'policy_id': fields.many2one('hr.policy.presence', 'Policy'),
'code': fields.char(
'Code', required=True, help="Use this code in the salary rules."),
'rate': fields.float(
'Rate', required=True, help='Multiplier of employee wage.'),
'type': fields.selection([('normal', 'Normal Working Hours'),
('holiday', 'Holidays'),
('restday', 'Rest Days')],
'Type', required=True),
'active_after': fields.integer(
'Active After',
required=True,
help='Minutes after first punch of the day in which policy will '
'take effect.'
),
'duration': fields.integer(
'Duration', required=True, help="In minutes.")
}
_defaults = {
'rate': 1.0,
}
class policy_group(orm.Model):
_name = 'hr.policy.group'
_inherit = 'hr.policy.group'
_columns = {
'presence_policy_ids': fields.many2many(
'hr.policy.presence', 'hr_policy_group_presence_rel',
'group_id', 'presence_id', 'Presence Policy'),
}
|
agpl-3.0
|
kaplun/ops
|
modules/bibfield/lib/functions/get_number_of_reviews.py
|
24
|
1049
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
def get_number_of_reviews(recid):
"""
Returns number of reviews for given record.
@param recid:
@return: Number of reviews
"""
from invenio.webcommentadminlib import get_nb_reviews
if recid:
return get_nb_reviews(recid)
|
gpl-2.0
|
allanino/nupic
|
nupic/data/generators/pattern_machine.py
|
13
|
5172
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities for generating and manipulating patterns, for use in
experimentation and tests.
"""
import numpy
class PatternMachine(object):
"""
Base pattern machine class.
"""
def __init__(self,
n,
w,
num=100,
seed=42):
"""
@param n (int) Number of available bits in pattern
@param w (int/list) Number of on bits in pattern
If list, each pattern will have a `w` randomly
selected from the list.
@param num (int) Number of available patterns
"""
# Save member variables
self._n = n
self._w = w
self._num = num
# Initialize member variables
self._random = numpy.random.RandomState(seed)
self._patterns = dict()
self._generate()
def get(self, number):
"""
Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits
"""
if not number in self._patterns:
raise IndexError("Invalid number")
return self._patterns[number]
def addNoise(self, bits, amount):
"""
Add noise to pattern.
@param bits (set) Indices of on bits
@param amount (float) Probability of switching an on bit with a random bit
@return (set) Indices of on bits in noisy pattern
"""
newBits = set()
for bit in bits:
if self._random.rand() < amount:
newBits.add(self._random.randint(self._n))
else:
newBits.add(bit)
return newBits
def numbersForBit(self, bit):
"""
Return the set of pattern numbers that match a bit.
@param bit (int) Index of bit
@return (set) Indices of numbers
"""
if bit >= self._n:
raise IndexError("Invalid bit")
numbers = set()
for index, pattern in self._patterns.iteritems():
if bit in pattern:
numbers.add(index)
return numbers
def numberMapForBits(self, bits):
"""
Return a map from number to matching on bits,
for all numbers that match a set of bits.
@param bits (set) Indices of bits
@return (dict) Mapping from number => on bits.
"""
numberMap = dict()
for bit in bits:
numbers = self.numbersForBit(bit)
for number in numbers:
if not number in numberMap:
numberMap[number] = set()
numberMap[number].add(bit)
return numberMap
def prettyPrintPattern(self, bits, verbosity=1):
"""
Pretty print a pattern.
@param bits (set) Indices of on bits
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
numberMap = self.numberMapForBits(bits)
text = ""
numberList = []
numberItems = sorted(numberMap.iteritems(),
key=lambda (number, bits): len(bits),
reverse=True)
for number, bits in numberItems:
if verbosity > 2:
strBits = [str(n) for n in bits]
numberText = "{0} (bits: {1})".format(number, ",".join(strBits))
elif verbosity > 1:
numberText = "{0} ({1} bits)".format(number, len(bits))
else:
numberText = str(number)
numberList.append(numberText)
text += "[{0}]".format(", ".join(numberList))
return text
def _generate(self):
"""
Generates set of random patterns.
"""
candidates = range(self._n)
for i in xrange(self._num):
self._random.shuffle(candidates)
pattern = candidates[0:self._getW()]
self._patterns[i] = set(pattern)
def _getW(self):
"""
Gets a value of `w` for use in generating a pattern.
"""
w = self._w
if type(w) is list:
return w[self._random.randint(len(w))]
else:
return w
class ConsecutivePatternMachine(PatternMachine):
"""
Pattern machine class that generates patterns with non-overlapping,
consecutive on bits.
"""
def _generate(self):
"""
Generates set of consecutive patterns.
"""
n = self._n
w = self._w
assert type(w) is int, "List for w not supported"
for i in xrange(n / w):
pattern = set(xrange(i * w, (i+1) * w))
self._patterns[i] = pattern
|
agpl-3.0
|
buguen/pylayers
|
pylayers/location/geometric/constraints/exclude.py
|
1
|
3165
|
# -*- coding:Utf-8 -*-
#####################################################################
#This file is part of RGPA.
#Foobar is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Foobar is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#authors :
#Nicolas AMIOT : [email protected]
#Bernard UGUEN : [email protected]
#Mohamed LAARAIEDH : [email protected]
#####################################################################
import numpy as np
import scipy as sp
from pylayers.location.geometric.util.boxn import *
from pylayers.location.geometric.constraints.constraint import *
class Exclude(Constraint):
"""
"""
def __init__(self,nodes):
"""
Id : Cell Id
p : Cell position
v : Cell direction
alpha : sector angle
Rmax : Cell Radius (m)
"""
self.std = 1.0
self.vcw = 3.0
self.obsolete=False
self.usable=True
self.visible=True
box = BoxN(np.vstack((np.min(nodes,axis=0),np.max(nodes,axis=0))),ndim=np.shape(nodes)[1])
self.p = box.ctr
Constraint.__init__(self,'Exclude',self.p)
self.lbox = LBoxN([box],ndim=np.shape(self.p)[0])
def annulus_bound(self):
"""
annulus_bound():
Compute the minimum and maximum distance of the enclosing annulus of the constraint for a given self.vcw
"""
pass
def rescale(self):
"""
"""
pass
def valid(self,b):
"""
valid(b) : check if box b is valid for the given constraint
A box is valid if it not not valid
A box is not valid if all distances are greater than rangemax
or all distances are less than rangemin
"""
p0 = self.lbox.box[0]
eps = 0.00000000001
i=b.intersect(p0)
if np.sum(i.bd == b.bd) > 5:
return(True)
elif i.vol < eps :#(bmax<cmin)|(bmin>cmax):
return('out')
else :
return(False)
def valid_v(self,v):
"""
"""
DDbound = []
DDbound.append(np.sum(v>=self.lbox.bd[0,:],axis=1)>2)
DDbound.append(np.sum(v<=self.lbox.bd[1,:],axis=1)>2)
return DDbound
def limit_valid_v(self,v):
"""
"""
return v[np.nonzero( (np.sum(v>=self.lbox.bd[0,:],axis=1)>2) & (np.sum(v<=self.lbox.bd[1,:],axis=1)>2) )]
def rescale(self,vcw):
return True
|
lgpl-3.0
|
toshywoshy/ansible
|
lib/ansible/modules/cloud/vultr/vultr_startup_script.py
|
21
|
7337
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_startup_script
short_description: Manages startup scripts on Vultr.
description:
- Create, update and remove startup scripts.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- The script name.
required: true
type: str
script_type:
description:
- The script type, can not be changed once created.
default: boot
choices: [ boot, pxe ]
aliases: [ type ]
type: str
script:
description:
- The script source code.
- Required if I(state=present).
type: str
state:
description:
- State of the script.
default: present
choices: [ present, absent ]
type: str
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: ensure a pxe script exists, source from a file
vultr_startup_script:
name: my_web_script
script_type: pxe
script: "{{ lookup('file', 'path/to/script') }}"
- name: ensure a boot script exists
vultr_startup_script:
name: vultr_startup_script
script: "#!/bin/bash\necho Hello World > /root/hello"
- name: ensure a script is absent
vultr_startup_script:
name: my_web_script
state: absent
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_startup_script:
description: Response from Vultr API
returned: success
type: complex
contains:
id:
description: ID of the startup script.
returned: success
type: str
sample: 249395
name:
description: Name of the startup script.
returned: success
type: str
sample: my startup script
script:
description: The source code of the startup script.
returned: success
type: str
sample: "#!/bin/bash\necho Hello World > /root/hello"
script_type:
description: The type of the startup script.
returned: success
type: str
sample: pxe
date_created:
description: Date the startup script was created.
returned: success
type: str
sample: "2017-08-26 12:47:48"
date_modified:
description: Date the startup script was modified.
returned: success
type: str
sample: "2017-08-26 12:47:48"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrStartupScript(Vultr):
def __init__(self, module):
super(AnsibleVultrStartupScript, self).__init__(module, "vultr_startup_script")
self.returns = {
'SCRIPTID': dict(key='id'),
'type': dict(key='script_type'),
'name': dict(),
'script': dict(),
'date_created': dict(),
'date_modified': dict(),
}
def get_script(self):
scripts = self.api_query(path="/v1/startupscript/list")
name = self.module.params.get('name')
if scripts:
for script_id, script_data in scripts.items():
if script_data.get('name') == name:
return script_data
return {}
def present_script(self):
script = self.get_script()
if not script:
script = self._create_script(script)
else:
script = self._update_script(script)
return script
def _create_script(self, script):
self.result['changed'] = True
data = {
'name': self.module.params.get('name'),
'script': self.module.params.get('script'),
'type': self.module.params.get('script_type'),
}
self.result['diff']['before'] = {}
self.result['diff']['after'] = data
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/create",
method="POST",
data=data
)
script = self.get_script()
return script
def _update_script(self, script):
if script['script'] != self.module.params.get('script'):
self.result['changed'] = True
data = {
'SCRIPTID': script['SCRIPTID'],
'script': self.module.params.get('script'),
}
self.result['diff']['before'] = script
self.result['diff']['after'] = script.copy()
self.result['diff']['after'].update(data)
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/update",
method="POST",
data=data
)
script = self.get_script()
return script
def absent_script(self):
script = self.get_script()
if script:
self.result['changed'] = True
data = {
'SCRIPTID': script['SCRIPTID'],
}
self.result['diff']['before'] = script
self.result['diff']['after'] = {}
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/destroy",
method="POST",
data=data
)
return script
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
script=dict(type='str',),
script_type=dict(type='str', default='boot', choices=['boot', 'pxe'], aliases=['type']),
state=dict(type='str', choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['script']),
],
supports_check_mode=True,
)
vultr_script = AnsibleVultrStartupScript(module)
if module.params.get('state') == "absent":
script = vultr_script.absent_script()
else:
script = vultr_script.present_script()
result = vultr_script.get_result(script)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
askulkarni2/ansible-modules-core
|
windows/win_file.py
|
42
|
2574
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: win_file
version_added: "1.8"
short_description: Creates, touches or removes files or directories.
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
Unlike M(file), does not modify ownership, permissions or manipulate links.
notes:
- See also M(win_copy), M(win_template), M(copy), M(template), M(assemble)
requirements: [ ]
author: "Jon Hawkesworth (@jhawkesworth)"
options:
path:
description:
- 'path to the file being managed. Aliases: I(dest), I(name)'
required: true
default: []
aliases: ['dest', 'name']
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(absent),
directories will be recursively deleted, and files will be removed.
If C(touch), an empty file will be created if the c(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way `touch` works from the command line).
required: false
default: file
choices: [ file, directory, touch, absent ]
'''
EXAMPLES = '''
# create a file
- win_file: path=C:\\temp\\foo.conf
# touch a file (creates if not present, updates modification time if present)
- win_file: path=C:\\temp\\foo.conf state=touch
# remove a file, if present
- win_file: path=C:\\temp\\foo.conf state=absent
# create directory structure
- win_file: path=C:\\temp\\folder\\subfolder state=directory
# remove directory structure
- win_file: path=C:\\temp state=absent
'''
|
gpl-3.0
|
ckirby/django
|
tests/auth_tests/test_tokens.py
|
297
|
2551
|
import unittest
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.test import TestCase
from django.utils.six import PY3
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
@unittest.skipIf(PY3, "Unnecessary test with Python 3")
def test_date_length(self):
"""
Make sure we don't allow overly long dates, causing a potential DoS.
"""
user = User.objects.create_user('ima1337h4x0r', '[email protected]', 'p4ssw0rd')
p0 = PasswordResetTokenGenerator()
# This will put a 14-digit base36 timestamp into the token, which is too large.
self.assertRaises(ValueError,
p0._make_token_with_timestamp,
user, 175455491841851871349)
|
bsd-3-clause
|
evancich/apm_motor
|
modules/waf/waflib/extras/fsc.py
|
4
|
1899
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2011 (ita)
"""
Experimental F# stuff
FSC="mono /path/to/fsc.exe" waf configure build
"""
from waflib import Utils, Task
from waflib.TaskGen import before_method, after_method, feature
from waflib.Tools import ccroot, cs
ccroot.USELIB_VARS['fsc'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES'])
@feature('fs')
@before_method('process_source')
def apply_fsc(self):
cs_nodes = []
no_nodes = []
for x in self.to_nodes(self.source):
if x.name.endswith('.fs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source = no_nodes
bintype = getattr(self, 'type', self.gen.endswith('.dll') and 'library' or 'exe')
self.cs_task = tsk = self.create_task('fsc', cs_nodes, self.path.find_or_declare(self.gen))
tsk.env.CSTYPE = '/target:%s' % bintype
tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath()
inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}')
if inst_to:
# note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically
mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644)
self.install_task = self.bld.install_files(inst_to, self.cs_task.outputs[:], env=self.env, chmod=mod)
feature('fs')(cs.use_cs)
after_method('apply_fsc')(cs.use_cs)
feature('fs')(cs.debug_cs)
after_method('apply_fsc', 'use_cs')(cs.debug_cs)
class fsc(Task.Task):
"""
Compile F# files
"""
color = 'YELLOW'
run_str = '${FSC} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def configure(conf):
"""
Find a F# compiler, set the variable FSC for the compiler and FS_NAME (mono or fsc)
"""
conf.find_program(['fsc.exe', 'fsharpc'], var='FSC')
conf.env.ASS_ST = '/r:%s'
conf.env.RES_ST = '/resource:%s'
conf.env.FS_NAME = 'fsc'
if str(conf.env.FSC).lower().find('fsharpc') > -1:
conf.env.FS_NAME = 'mono'
|
gpl-3.0
|
yuchuanzhen/shadowsocks
|
shadowsocks/manager.py
|
925
|
9692
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
apache-2.0
|
IONISx/edx-platform
|
common/lib/calc/calc/tests/test_preview.py
|
257
|
8723
|
# -*- coding: utf-8 -*-
"""
Unit tests for preview.py
"""
import unittest
from calc import preview
import pyparsing
class LatexRenderedTest(unittest.TestCase):
"""
Test the initializing code for LatexRendered.
Specifically that it stores the correct data and handles parens well.
"""
def test_simple(self):
"""
Test that the data values are stored without changing.
"""
math = 'x^2'
obj = preview.LatexRendered(math, tall=True)
self.assertEquals(obj.latex, math)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, True)
def _each_parens(self, with_parens, math, parens, tall=False):
"""
Helper method to test the way parens are wrapped.
"""
obj = preview.LatexRendered(math, parens=parens, tall=tall)
self.assertEquals(obj.latex, with_parens)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, tall)
def test_parens(self):
""" Test curvy parens. """
self._each_parens('(x+y)', 'x+y', '(')
def test_brackets(self):
""" Test brackets. """
self._each_parens('[x+y]', 'x+y', '[')
def test_squiggles(self):
""" Test curly braces. """
self._each_parens(r'\{x+y\}', 'x+y', '{')
def test_parens_tall(self):
""" Test curvy parens with the tall parameter. """
self._each_parens(r'\left(x^y\right)', 'x^y', '(', tall=True)
def test_brackets_tall(self):
""" Test brackets, also tall. """
self._each_parens(r'\left[x^y\right]', 'x^y', '[', tall=True)
def test_squiggles_tall(self):
""" Test tall curly braces. """
self._each_parens(r'\left\{x^y\right\}', 'x^y', '{', tall=True)
def test_bad_parens(self):
""" Check that we get an error with invalid parens. """
with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'):
preview.LatexRendered('x^2', parens='not parens')
class LatexPreviewTest(unittest.TestCase):
"""
Run integrative tests for `latex_preview`.
All functionality was tested `RenderMethodsTest`, but see if it combines
all together correctly.
"""
def test_no_input(self):
"""
With no input (including just whitespace), see that no error is thrown.
"""
self.assertEquals('', preview.latex_preview(''))
self.assertEquals('', preview.latex_preview(' '))
self.assertEquals('', preview.latex_preview(' \t '))
def test_number_simple(self):
""" Simple numbers should pass through. """
self.assertEquals(preview.latex_preview('3.1415'), '3.1415')
def test_number_suffix(self):
""" Suffixes should be escaped. """
self.assertEquals(preview.latex_preview('1.618k'), r'1.618\text{k}')
def test_number_sci_notation(self):
""" Numbers with scientific notation should display nicely """
self.assertEquals(
preview.latex_preview('6.0221413E+23'),
r'6.0221413\!\times\!10^{+23}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23'),
r'-6.0221413\!\times\!10^{+23}'
)
def test_number_sci_notation_suffix(self):
""" Test numbers with both of these. """
self.assertEquals(
preview.latex_preview('6.0221413E+23k'),
r'6.0221413\!\times\!10^{+23}\text{k}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23k'),
r'-6.0221413\!\times\!10^{+23}\text{k}'
)
def test_variable_simple(self):
""" Simple valid variables should pass through. """
self.assertEquals(preview.latex_preview('x', variables=['x']), 'x')
def test_greek(self):
""" Variable names that are greek should be formatted accordingly. """
self.assertEquals(preview.latex_preview('pi'), r'\pi')
def test_variable_subscript(self):
""" Things like 'epsilon_max' should display nicely """
self.assertEquals(
preview.latex_preview('epsilon_max', variables=['epsilon_max']),
r'\epsilon_{max}'
)
def test_function_simple(self):
""" Valid function names should be escaped. """
self.assertEquals(
preview.latex_preview('f(3)', functions=['f']),
r'\text{f}(3)'
)
def test_function_tall(self):
r""" Functions surrounding a tall element should have \left, \right """
self.assertEquals(
preview.latex_preview('f(3^2)', functions=['f']),
r'\text{f}\left(3^{2}\right)'
)
def test_function_sqrt(self):
""" Sqrt function should be handled specially. """
self.assertEquals(preview.latex_preview('sqrt(3)'), r'\sqrt{3}')
def test_function_log10(self):
""" log10 function should be handled specially. """
self.assertEquals(preview.latex_preview('log10(3)'), r'\log_{10}(3)')
def test_function_log2(self):
""" log2 function should be handled specially. """
self.assertEquals(preview.latex_preview('log2(3)'), r'\log_2(3)')
def test_power_simple(self):
""" Powers should wrap the elements with braces correctly. """
self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}')
def test_power_parens(self):
""" Powers should ignore the parenthesis of the last math. """
self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}')
def test_parallel(self):
r""" Parallel items should combine with '\|'. """
self.assertEquals(preview.latex_preview('2||3'), r'2\|3')
def test_product_mult_only(self):
r""" Simple products should combine with a '\cdot'. """
self.assertEquals(preview.latex_preview('2*3'), r'2\cdot 3')
def test_product_big_frac(self):
""" Division should combine with '\frac'. """
self.assertEquals(
preview.latex_preview('2*3/4/5'),
r'\frac{2\cdot 3}{4\cdot 5}'
)
def test_product_single_frac(self):
""" Division should ignore parens if they are extraneous. """
self.assertEquals(
preview.latex_preview('(2+3)/(4+5)'),
r'\frac{2+3}{4+5}'
)
def test_product_keep_going(self):
"""
Complex products/quotients should split into many '\frac's when needed.
"""
self.assertEquals(
preview.latex_preview('2/3*4/5*6'),
r'\frac{2}{3}\cdot \frac{4}{5}\cdot 6'
)
def test_sum(self):
""" Sums should combine its elements. """
# Use 'x' as the first term (instead of, say, '1'), so it can't be
# interpreted as a negative number.
self.assertEquals(
preview.latex_preview('-x+2-3+4', variables=['x']),
'-x+2-3+4'
)
def test_sum_tall(self):
""" A complicated expression should not hide the tallness. """
self.assertEquals(
preview.latex_preview('(2+3^2)'),
r'\left(2+3^{2}\right)'
)
def test_complicated(self):
"""
Given complicated input, ensure that exactly the correct string is made.
"""
self.assertEquals(
preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'),
r'11\cdot \text{f}(x)+\frac{x^{2}\cdot (3\|4)}{\sqrt{\pi}}'
)
self.assertEquals(
preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))',
case_sensitive=True),
(r'\log_{10}\left(1+\frac{3}{4\cdot \text{Cos}\left(x^{2}\right)}'
r'\cdot (x+1)\right)')
)
def test_syntax_errors(self):
"""
Test a lot of math strings that give syntax errors
Rather than have a lot of self.assertRaises, make a loop and keep track
of those that do not throw a `ParseException`, and assert at the end.
"""
bad_math_list = [
'11+',
'11*',
'f((x)',
'sqrt(x^)',
'3f(x)', # Not 3*f(x)
'3|4',
'3|||4'
]
bad_exceptions = {}
for math in bad_math_list:
try:
preview.latex_preview(math)
except pyparsing.ParseException:
pass # This is what we were expecting. (not excepting :P)
except Exception as error: # pragma: no cover
bad_exceptions[math] = error
else: # pragma: no cover
# If there is no exception thrown, this is a problem
bad_exceptions[math] = None
self.assertEquals({}, bad_exceptions)
|
agpl-3.0
|
yephper/django
|
tests/template_tests/filter_tests/test_iriencode.py
|
1
|
1646
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
|
bsd-3-clause
|
sanketdjain/box-python-sdk
|
boxsdk/client.py
|
4
|
11505
|
# coding: utf-8
from __future__ import unicode_literals
import json
from .config import API
from .session.box_session import BoxSession
from .network.default_network import DefaultNetwork
from .object.user import User
from .object.folder import Folder
from .object.search import Search
from .object.events import Events
from .object.file import File
from .object.group import Group
from .object.group_membership import GroupMembership
from .util.shared_link import get_shared_link_header
from .util.translator import Translator
class Client(object):
def __init__(self, oauth, network_layer=None, session=None):
"""
:param oauth:
OAuth2 object used by the session to authorize requests.
:type oauth:
:class:`OAuth2`
:param network_layer:
The Network layer to use. If none is provided then an instance of :class:`DefaultNetwork` will be used.
:type network_layer:
:class:`Network`
:param session:
The session object to use. If None is provided then an instance of :class:`BoxSession` will be used.
:type session:
:class:`BoxSession`
"""
network_layer = network_layer or DefaultNetwork()
self._oauth = oauth
self._network = network_layer
self._session = session or BoxSession(oauth=oauth, network_layer=network_layer)
def folder(self, folder_id):
"""
Initialize a :class:`Folder` object, whose box id is folder_id.
:param folder_id:
The box id of the :class:`Folder` object. Can use '0' to get the root folder on Box.
:type folder_id:
`unicode`
:return:
A :class:`Folder` object with the given folder id.
:rtype:
:class:`Folder`
"""
return Folder(session=self._session, object_id=folder_id)
def file(self, file_id):
"""
Initialize a :class:`File` object, whose box id is file_id.
:param file_id:
The box id of the :class:`File` object.
:type file_id:
`unicode`
:return:
A :class:`File` object with the given file id.
:rtype:
:class:`File`
"""
return File(session=self._session, object_id=file_id)
def user(self, user_id='me'):
"""
Initialize a :class:`User` object, whose box id is user_id.
:param user_id:
The user id of the :class:`User` object. Can use 'me' to get the User for the current/authenticated user.
:type user_id:
`unicode`
:return:
A :class:`User` object with the given id.
:rtype:
:class:`User`
"""
return User(session=self._session, object_id=user_id)
def group(self, group_id):
"""
Initialize a :class:`Group` object, whose box id is group_id.
:param group_id:
The box id of the :class:`Group` object.
:type group_id:
`unicode`
:return:
A :class:`Group` object with the given group id.
:rtype:
:class:`Group`
"""
return Group(session=self._session, object_id=group_id)
def users(self):
"""
Get a list of all users for the Enterprise along with their user_id, public_name, and login.
:return:
The list of all users in the enterprise.
:rtype:
`list` of :class:`User`
"""
url = '{0}/users'.format(API.BASE_API_URL)
box_response = self._session.get(url)
response = box_response.json()
return [User(self._session, item['id'], item) for item in response['entries']]
def search(self, query, limit, offset, ancestor_folders=None, file_extensions=None, metadata_filters=None, result_type=None, content_types=None):
"""
Search Box for items matching the given query.
:param query:
The string to search for.
:type query:
`unicode`
:param limit:
The maximum number of items to return.
:type limit:
`int`
:param offset:
The search result at which to start the response.
:type offset:
`int`
:param ancestor_folders:
Folder ids to limit the search to.
:type ancestor_folders:
`iterable` of :class:`Folder`
:param file_extensions:
File extensions to limit the search to.
:type file_extensions:
`iterable` of `unicode`
:param metadata_filters:
Filters used for metadata search
:type metadata_filters:
:class:`MetadataSearchFilters`
:param result_type:
Which type of result you want. Can be file or folder.
:type result_type:
`unicode`
:param content_types:
Which content types to search. Valid types include name, description, file_content, comments, and tags.
:type content_types:
`Iterable` of `unicode`
:return:
A list of items that match the search query.
:rtype:
`list` of :class:`Item`
"""
return Search(self._session).search(query=query,
limit=limit,
offset=offset,
ancestor_folders=ancestor_folders,
file_extensions=file_extensions,
metadata_filters=metadata_filters,
result_type=result_type,
content_types=content_types)
def events(self):
"""
Get an events object that can get the latest events from Box or set up a long polling event subscription.
"""
return Events(self._session)
def group_membership(self, group_membership_id):
"""
Initialize a :class:`GroupMembership` object, whose box id is group_membership_id.
:param group_membership_id:
The box id of the :class:`GroupMembership` object.
:type group_membership_id:
`unicode`
:return:
A :class:`GroupMembership` object with the given membership id.
:rtype:
:class:`GroupMembership`
"""
return GroupMembership(session=self._session, object_id=group_membership_id)
def groups(self):
"""
Get a list of all groups for the current user.
:return:
The list of all groups.
:rtype:
`list` of :class:`Group`
"""
url = '{0}/groups'.format(API.BASE_API_URL)
box_response = self._session.get(url)
response = box_response.json()
return [Group(self._session, item['id'], item) for item in response['entries']]
def create_group(self, name):
"""
Create a group with the given name.
:param name:
The name of the group.
:type name:
`unicode`
:return:
The newly created Group.
:rtype:
:class:`Group`
:raises:
:class:`BoxAPIException` if current user doesn't have permissions to create a group.
"""
url = '{0}/groups'.format(API.BASE_API_URL)
body_attributes = {
'name': name,
}
box_response = self._session.post(url, data=json.dumps(body_attributes))
response = box_response.json()
return Group(self._session, response['id'], response)
def get_shared_item(self, shared_link, password=None):
"""
Get information about a Box shared link. https://developers.box.com/docs/#shared-items
:param shared_link:
The shared link.
:type shared_link:
`unicode`
:param password:
The password for the shared link.
:type password:
`unicode`
:return:
The item referred to by the shared link.
:rtype:
:class:`Item`
:raises:
:class:`BoxAPIException` if current user doesn't have permissions to view the shared link.
"""
response = self.make_request(
'GET',
'{0}/shared_items'.format(API.BASE_API_URL),
headers=get_shared_link_header(shared_link, password),
).json()
return Translator().translate(response['type'])(
self._session.with_shared_link(shared_link, password),
response['id'],
response,
)
def make_request(self, method, url, **kwargs):
"""
Make an authenticated request to the Box API.
:param method:
The HTTP verb to use for the request.
:type method:
`unicode`
:param url:
The URL for the request.
:type url:
`unicode`
:return:
The network response for the given request.
:rtype:
:class:`BoxResponse`
:raises:
:class:`BoxAPIException`
"""
return self._session.request(method, url, **kwargs)
def create_user(self, name, login=None, **user_attributes):
"""
Create a new user. Can only be used if the current user is an enterprise admin, or the current authorization
scope is a Box developer edition instance.
:param name:
The user's display name.
:type name:
`unicode`
:param login:
The user's email address. Required for an enterprise user, but None for an app user.
:type login:
`unicode` or None
:param user_attributes:
Additional attributes for the user. See the documentation at
https://box-content.readme.io/#create-an-enterprise-user for enterprise users
or https://developers.box.com/developer-edition/ for app users.
"""
url = '{0}/users'.format(API.BASE_API_URL)
user_attributes['name'] = name
if login is not None:
user_attributes['login'] = login
else:
user_attributes['is_platform_access_only'] = True
box_response = self._session.post(url, data=json.dumps(user_attributes))
response = box_response.json()
return User(self._session, response['id'], response)
def as_user(self, user):
"""
Returns a new client object with default headers set up to make requests as the specified user.
:param user:
The user to impersonate when making API requests.
:type user:
:class:`User`
"""
return self.__class__(self._oauth, self._network, self._session.as_user(user))
def with_shared_link(self, shared_link, shared_link_password):
"""
Returns a new client object with default headers set up to make requests using the shared link for auth.
:param shared_link:
The shared link.
:type shared_link:
`unicode`
:param shared_link_password:
The password for the shared link.
:type shared_link_password:
`unicode`
"""
return self.__class__(
self._oauth,
self._network,
self._session.with_shared_link(shared_link, shared_link_password),
)
|
apache-2.0
|
NikolayChesnokov/webgsrp3
|
local/webviewmodelmanager.py
|
2
|
1390
|
# --*-- coding: utf-8 --*--
from os.path import join as opj
import os
from lxml import etree
from local.manager import Manager
class webviewModelManager(Manager):
"""
Загрузка view
"""
_name = 'webviewModelManager'
_alias = 'model'
_allow = {}
_inherit = 'web.view'
_list_methods = ('load', 'save')
def load(self, name):
fn = self._getFullFileName(name)
if self._isGenerated:
src = self._loadResoutceFromFile(fn+'.xml')
html = self._generateFromXML(src)
return jsonify({'json':render_template(name + '.html')})
def loadCSS(self, name):
return jsonify({'json':render_template(name + '.css')})
def _getFullFileName(self, name):
return opj(current_app.root_path, current_app.template_folder, name)
def _isGenerated(self, name):
if os.path.exists(name+'.html') and os.path.isfile(name+'.html') and os.path.getmtime(name+'.xml') > os.path.getmtime(name+'.html'):
return True
else:
return False
def _loadResoutceFromFile(self,fullname):
f = open(fullname,'r')
d = f.read()
f.close()
return d
def _generateFromXML(self, data):
tree = etree.fromstring(text = data)
nodes = tree.xpath('/gsrp/data/template/form')
for node in nodes:
print(node.tag,node.keys(),node.values())
for child in node.getchildren():
print(child.tag,child.keys(),child.values())
def save(self, name, value):
pass
webviewModelManager()
|
agpl-3.0
|
eduardomartins/ProjectEuler
|
Python/problem14.py
|
1
|
2101
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# problem14.py
#
# Copyright 2017 Eduardo Sant'Anna Martins <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# ==========================================================
# Longest Collatz sequence
#
# The following iterative sequence is defined for the set of positive integers:
# n → n/2 (n is even)
# n → 3n + 1 (n is odd)
# Using the rule above and starting with 13, we generate the following sequence:
# 13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
# It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms.
# Although it has not been proved yet (Collatz Problem), it is thought that all starting
# numbers finish at 1.
# Which starting number, under one million, produces the longest chain?
import sys
def collatz_sequence(n, verbose=False):
count = 1
while n > 1:
if verbose:
sys.stdout.write("%s->" % n)
if n % 2 == 0:
n /= 2
else:
n = 3 * n + 1
count += 1
if verbose:
print 1
return count
def main():
n = 1
count = 0
largest = 0
number = None
while n < 10**6:
count = collatz_sequence(n)
if count > largest:
largest = count
number = n
n += 1
print 'LARGEST', largest, number
if __name__ == '__main__':
main()
|
gpl-2.0
|
etherkit/OpenBeacon2
|
client/linux-x86/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-shapely.py
|
3
|
2470
|
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
import os
from ctypes.util import find_library
from PyInstaller.utils.hooks import get_package_paths
from PyInstaller.utils.hooks import is_module_satisfies
from PyInstaller import compat
# Necessary when using the vectorized subpackage
hiddenimports = ['shapely.prepared']
pkg_base, pkg_dir = get_package_paths('shapely')
binaries = []
if compat.is_win:
# Search conda directory if conda is active, then search standard
# directory. This is the same order of precidence used in shapely.
standard_path = os.path.join(pkg_dir, 'DLLs')
lib_paths = [standard_path, os.environ['PATH']]
if compat.is_conda:
conda_path = os.path.join(compat.base_prefix, 'Library', 'bin')
lib_paths.insert(0, conda_path)
original_path = os.environ['PATH']
try:
os.environ['PATH'] = os.pathsep.join(lib_paths)
dll_path = find_library('geos_c')
finally:
os.environ['PATH'] = original_path
if dll_path is None:
raise SystemExit(
"Error: geos_c.dll not found, required by hook-shapely.py.\n"
"Please check your installation or provide a pull request to "
"PyInstaller to update hook-shapely.py.")
binaries += [(dll_path, '.')]
elif compat.is_linux:
lib_dir = os.path.join(pkg_dir, '.libs')
dest_dir = os.path.join('shapely', '.libs')
# This duplicates the libgeos*.so* files in the build. PyInstaller will
# copy them into the root of the build by default, but shapely cannot load
# them from there in linux IF shapely was installed via a whl file. The
# whl bundles its' own libgeos with a different name, something like
# libgeos_c-*.so.* but shapely tries to load libgeos_c.so if there isn't a
# ./libs directory under its' package. There is a proposed fix for this in
# shapely but it has not been accepted it:
# https://github.com/Toblerity/Shapely/pull/485
if is_module_satisfies('shapely <= 1.6'):
binaries += [(os.path.join(lib_dir, f), dest_dir) for f in os.listdir(lib_dir)]
|
gpl-3.0
|
NewProggie/Cpp-Project-Template
|
thirdparty/gtest-1.7.0/test/gtest_color_test.py
|
3259
|
4911
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
mit
|
comiconomenclaturist/libretime
|
python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py
|
2
|
3322
|
"""Contains the main application class for airtime_analyzer.
"""
import logging
import logging.handlers
import sys
import signal
import traceback
import config_file
from functools import partial
from metadata_analyzer import MetadataAnalyzer
from replaygain_analyzer import ReplayGainAnalyzer
from status_reporter import StatusReporter
from message_listener import MessageListener
class AirtimeAnalyzerServer:
"""A server for importing uploads to Airtime as background jobs.
"""
# Constants
_LOG_PATH = "/var/log/airtime/airtime_analyzer.log"
# Variables
_log_level = logging.INFO
def __init__(self, rmq_config_path, cloud_storage_config_path, http_retry_queue_path, debug=False):
# Dump a stacktrace with 'kill -SIGUSR2 <PID>'
signal.signal(signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace())
# Configure logging
self.setup_logging(debug)
# Read our rmq config file
rmq_config = config_file.read_config_file(rmq_config_path)
# Read the cloud storage config file
cloud_storage_config = config_file.read_config_file(cloud_storage_config_path)
# Start up the StatusReporter process
StatusReporter.start_thread(http_retry_queue_path)
# Start listening for RabbitMQ messages telling us about newly
# uploaded files. This blocks until we recieve a shutdown signal.
self._msg_listener = MessageListener(rmq_config, cloud_storage_config)
StatusReporter.stop_thread()
def setup_logging(self, debug):
"""Set up nicely formatted logging and log rotation.
Keyword arguments:
debug -- a boolean indicating whether to enable super verbose logging
to the screen and disk.
"""
if debug:
self._log_level = logging.DEBUG
else:
#Disable most pika/rabbitmq logging:
pika_logger = logging.getLogger('pika')
pika_logger.setLevel(logging.CRITICAL)
boto_logger = logging.getLogger('auth')
boto_logger.setLevel(logging.CRITICAL)
# Set up logging
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(self._log_level)
#fileHandler = logging.handlers.RotatingFileHandler(filename=self._LOG_PATH, maxBytes=1024*1024*30,
# backupCount=8)
#fileHandler.setFormatter(logFormatter)
#rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
@classmethod
def dump_stacktrace(stack):
''' Dump a stacktrace for all threads '''
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
logging.info('\n'.join(code))
|
agpl-3.0
|
Lemma1/MAC-POSTS
|
doc_builder/sphinx-contrib/argdoc/sphinxcontrib/argdoc/test/cases/c5_with_epilog.py
|
1
|
2878
|
#!/usr/bin/env python
"""Subcommands, each having its own epilog
An epilog should appear at the bottom of each command-group section
"""
import argparse
import sys
main_epilog = """This is a multi-line epilog which should appear at the bottom of the module
docstring and also follow all of the options, arguments, et cetera.
"""
foo_help = "Run the ``foo`` subprogram"
foo_desc = """This is a long description of what a ``foo`` program might do.
It spans multiple lines, so that we can test things reasonably.
"""
foo_epilog = """This is the epilog for the ``foo`` subprogram. It should appear
at the end of the ``foo`` subcommand section."""
bar_help = "Take output from ``foo`` subprogram and run it through the ``bar`` subprogram"
bar_desc = """This is the long description for the ``bar`` subcommand."""
bar_epilog = """This the epilog for the ``bar`` subcommand. It is short."""
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=main_epilog)
parser.add_argument("mainarg1")
parser.add_argument("mainarg2",help="main positional argument #2, which has a description (``mainarg1`` did not)")
subparsers = parser.add_subparsers(title="subcommands",
description="choose one of the following:",
dest="program")
fooparser = subparsers.add_parser("foo",
help=foo_help,
description=foo_desc,
epilog=foo_epilog)
barparser = subparsers.add_parser("bar",
help=bar_help,
description=bar_desc,
epilog=bar_epilog)
fooparser.add_argument("fooarg1",help="foo argument 1")
fooparser.add_argument("fooarg2",help="foo argument 2")
fooparser.add_argument("-f",help="short foo argument",type=str)
fooparser.add_argument("--fookwarg",help="foo keyword argument",type=str)
fooparser.add_argument("-v","--verbose",help="foo verbosely")
barparser.add_argument("bararg",help="bar argument")
barparser.add_argument("--choice",choices=("option1","option2","option3"),
help="A keyword that requries a choice")
bgroup = barparser.add_argument_group(title="An argument group",
description="A special goup of arguments in the ``bar`` subparser")
bgroup.add_argument("--b1")
bgroup.add_argument("--b2",help="Argument 2 has help")
bgroup.add_argument("-k",nargs=2,metavar="N",help="Some other argument")
args = parser.parse_args(argv)
if __name__ == "__main__":
main()
|
mit
|
nzavagli/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/boto-2.38.0/boto/services/result.py
|
153
|
5596
|
#!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from datetime import datetime, timedelta
from boto.utils import parse_ts
import boto
class ResultProcessor(object):
LogFileName = 'log.csv'
def __init__(self, batch_name, sd, mimetype_files=None):
self.sd = sd
self.batch = batch_name
self.log_fp = None
self.num_files = 0
self.total_time = 0
self.min_time = timedelta.max
self.max_time = timedelta.min
self.earliest_time = datetime.max
self.latest_time = datetime.min
self.queue = self.sd.get_obj('output_queue')
self.domain = self.sd.get_obj('output_domain')
def calculate_stats(self, msg):
start_time = parse_ts(msg['Service-Read'])
end_time = parse_ts(msg['Service-Write'])
elapsed_time = end_time - start_time
if elapsed_time > self.max_time:
self.max_time = elapsed_time
if elapsed_time < self.min_time:
self.min_time = elapsed_time
self.total_time += elapsed_time.seconds
if start_time < self.earliest_time:
self.earliest_time = start_time
if end_time > self.latest_time:
self.latest_time = end_time
def log_message(self, msg, path):
keys = sorted(msg.keys())
if not self.log_fp:
self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
line = ','.join(keys)
self.log_fp.write(line+'\n')
values = []
for key in keys:
value = msg[key]
if value.find(',') > 0:
value = '"%s"' % value
values.append(value)
line = ','.join(values)
self.log_fp.write(line+'\n')
def process_record(self, record, path, get_file=True):
self.log_message(record, path)
self.calculate_stats(record)
outputs = record['OutputKey'].split(',')
if 'OutputBucket' in record:
bucket = boto.lookup('s3', record['OutputBucket'])
else:
bucket = boto.lookup('s3', record['Bucket'])
for output in outputs:
if get_file:
key_name = output.split(';')[0]
key = bucket.lookup(key_name)
file_name = os.path.join(path, key_name)
print('retrieving file: %s to %s' % (key_name, file_name))
key.get_contents_to_filename(file_name)
self.num_files += 1
def get_results_from_queue(self, path, get_file=True, delete_msg=True):
m = self.queue.read()
while m:
if 'Batch' in m and m['Batch'] == self.batch:
self.process_record(m, path, get_file)
if delete_msg:
self.queue.delete_message(m)
m = self.queue.read()
def get_results_from_domain(self, path, get_file=True):
rs = self.domain.query("['Batch'='%s']" % self.batch)
for item in rs:
self.process_record(item, path, get_file)
def get_results_from_bucket(self, path):
bucket = self.sd.get_obj('output_bucket')
if bucket:
print('No output queue or domain, just retrieving files from output_bucket')
for key in bucket:
file_name = os.path.join(path, key)
print('retrieving file: %s to %s' % (key, file_name))
key.get_contents_to_filename(file_name)
self.num_files + 1
def get_results(self, path, get_file=True, delete_msg=True):
if not os.path.isdir(path):
os.mkdir(path)
if self.queue:
self.get_results_from_queue(path, get_file)
elif self.domain:
self.get_results_from_domain(path, get_file)
else:
self.get_results_from_bucket(path)
if self.log_fp:
self.log_fp.close()
print('%d results successfully retrieved.' % self.num_files)
if self.num_files > 0:
self.avg_time = float(self.total_time)/self.num_files
print('Minimum Processing Time: %d' % self.min_time.seconds)
print('Maximum Processing Time: %d' % self.max_time.seconds)
print('Average Processing Time: %f' % self.avg_time)
self.elapsed_time = self.latest_time-self.earliest_time
print('Elapsed Time: %d' % self.elapsed_time.seconds)
tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
print('Throughput: %f transactions / minute' % tput)
|
mit
|
obnam-mirror/obnam
|
obnamlib/fmt_ga/format.py
|
1
|
4838
|
# Copyright 2015-2016 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-3+ =*=
import obnamlib
GREEN_ALBATROSS_VERSION = 'green-albatross-20160813'
class RepositoryFormatGA(obnamlib.RepositoryDelegator):
format = GREEN_ALBATROSS_VERSION
def __init__(self, **kwargs):
obnamlib.RepositoryDelegator.__init__(self, **kwargs)
self.set_client_list_object(obnamlib.GAClientList())
self.set_client_factory(self._client_factory)
self.set_chunk_indexes_object(obnamlib.GAChunkIndexes())
self._checksum_algorithm = kwargs['checksum_algorithm']
self._chunk_indexes.set_default_checksum_algorithm(
kwargs['checksum_algorithm'])
chunk_store = obnamlib.GAChunkStore()
if 'chunk_size' in kwargs: # pragma: no cover
chunk_store.set_max_chunk_size(kwargs['chunk_size'])
if 'chunk_cache_size' in kwargs: # pragma: no cover
chunk_store.set_chunk_cache_size(kwargs['chunk_cache_size'])
self.set_chunk_store_object(chunk_store)
def _client_factory(self, client_name):
client = obnamlib.GAClient(client_name)
client.set_default_checksum_algorithm(self._checksum_algorithm)
return client
def init_repo(self):
pass
def close(self):
pass
def get_fsck_work_items(self):
return []
def get_shared_directories(self):
return ['client-list', 'chunk-store', 'chunk-indexes']
#
# Per-client methods.
#
def get_allowed_client_keys(self):
return []
def get_client_key(self, client_name, key): # pragma: no cover
raise obnamlib.RepositoryClientKeyNotAllowed(
format=self.format,
client_name=client_name,
key_name=obnamlib.repo_key_name(key))
def set_client_key(self, client_name, key, value):
raise obnamlib.RepositoryClientKeyNotAllowed(
format=self.format,
client_name=client_name,
key_name=obnamlib.repo_key_name(key))
def get_client_extra_data_directory(self, client_name): # pragma: no cover
if client_name not in self.get_client_names():
raise obnamlib.RepositoryClientDoesNotExist(
client_name=client_name)
return self._client_list.get_client_dirname(client_name)
def get_allowed_generation_keys(self):
return [
obnamlib.REPO_GENERATION_TEST_KEY,
obnamlib.REPO_GENERATION_STARTED,
obnamlib.REPO_GENERATION_ENDED,
obnamlib.REPO_GENERATION_IS_CHECKPOINT,
obnamlib.REPO_GENERATION_FILE_COUNT,
obnamlib.REPO_GENERATION_TOTAL_DATA,
]
def get_allowed_file_keys(self):
return [
obnamlib.REPO_FILE_TEST_KEY,
obnamlib.REPO_FILE_MODE,
obnamlib.REPO_FILE_MTIME_SEC,
obnamlib.REPO_FILE_MTIME_NSEC,
obnamlib.REPO_FILE_ATIME_SEC,
obnamlib.REPO_FILE_ATIME_NSEC,
obnamlib.REPO_FILE_NLINK,
obnamlib.REPO_FILE_SIZE,
obnamlib.REPO_FILE_UID,
obnamlib.REPO_FILE_USERNAME,
obnamlib.REPO_FILE_GID,
obnamlib.REPO_FILE_GROUPNAME,
obnamlib.REPO_FILE_SYMLINK_TARGET,
obnamlib.REPO_FILE_XATTR_BLOB,
obnamlib.REPO_FILE_BLOCKS,
obnamlib.REPO_FILE_DEV,
obnamlib.REPO_FILE_INO,
obnamlib.REPO_FILE_SHA224,
obnamlib.REPO_FILE_SHA256,
obnamlib.REPO_FILE_SHA384,
obnamlib.REPO_FILE_SHA512,
]
def interpret_generation_spec(self, client_name, genspec):
ids = self.get_client_generation_ids(client_name)
if not ids:
raise obnamlib.RepositoryClientHasNoGenerations(
client_name=client_name)
if genspec == 'latest':
return ids[-1]
for gen_id in ids:
if self.make_generation_spec(gen_id) == genspec:
return gen_id
raise obnamlib.RepositoryGenerationDoesNotExist(
client_name=client_name, gen_id=genspec)
def make_generation_spec(self, generation_id):
return generation_id.gen_number
|
gpl-3.0
|
JioEducation/edx-platform
|
common/djangoapps/edxmako/paths.py
|
23
|
4914
|
"""
Set up lookup paths for mako templates.
"""
import hashlib
import contextlib
import os
import pkg_resources
from django.conf import settings
from mako.lookup import TemplateLookup
from mako.exceptions import TopLevelLookupException
from . import LOOKUP
from openedx.core.djangoapps.theming.helpers import (
get_template as themed_template,
get_template_path_with_theme,
strip_site_theme_templates_path,
)
class DynamicTemplateLookup(TemplateLookup):
"""
A specialization of the standard mako `TemplateLookup` class which allows
for adding directories progressively.
"""
def __init__(self, *args, **kwargs):
super(DynamicTemplateLookup, self).__init__(*args, **kwargs)
self.__original_module_directory = self.template_args['module_directory']
def __repr__(self):
return "<{0.__class__.__name__} {0.directories}>".format(self)
def add_directory(self, directory, prepend=False):
"""
Add a new directory to the template lookup path.
"""
if prepend:
self.directories.insert(0, os.path.normpath(directory))
else:
self.directories.append(os.path.normpath(directory))
# Since the lookup path has changed, the compiled modules might be
# wrong because now "foo.html" might be a completely different template,
# and "foo.html.py" in the module directory has no way to know that.
# Update the module_directory argument to point to a directory
# specifically for this lookup path.
unique = hashlib.md5(":".join(str(d) for d in self.directories)).hexdigest()
self.template_args['module_directory'] = os.path.join(self.__original_module_directory, unique)
# Also clear the internal caches. Ick.
self._collection.clear()
self._uri_cache.clear()
def get_template(self, uri):
"""
Overridden method for locating a template in either the database or the site theme.
If not found, template lookup will be done in comprehensive theme for current site
by prefixing path to theme.
e.g if uri is `main.html` then new uri would be something like this `/red-theme/lms/static/main.html`
If still unable to find a template, it will fallback to the default template directories after stripping off
the prefix path to theme.
"""
# try to get template for the given file from microsite
template = themed_template(uri)
# if microsite template is not present or request is not in microsite then
# let mako find and serve a template
if not template:
try:
# Try to find themed template, i.e. see if current theme overrides the template
template = super(DynamicTemplateLookup, self).get_template(get_template_path_with_theme(uri))
except TopLevelLookupException:
# strip off the prefix path to theme and look in default template dirs
template = super(DynamicTemplateLookup, self).get_template(strip_site_theme_templates_path(uri))
return template
def clear_lookups(namespace):
"""
Remove mako template lookups for the given namespace.
"""
if namespace in LOOKUP:
del LOOKUP[namespace]
def add_lookup(namespace, directory, package=None, prepend=False):
"""
Adds a new mako template lookup directory to the given namespace.
If `package` is specified, `pkg_resources` is used to look up the directory
inside the given package. Otherwise `directory` is assumed to be a path
in the filesystem.
"""
templates = LOOKUP.get(namespace)
if not templates:
LOOKUP[namespace] = templates = DynamicTemplateLookup(
module_directory=settings.MAKO_MODULE_DIR,
output_encoding='utf-8',
input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace',
)
if package:
directory = pkg_resources.resource_filename(package, directory)
templates.add_directory(directory, prepend=prepend)
def lookup_template(namespace, name):
"""
Look up a Mako template by namespace and name.
"""
return LOOKUP[namespace].get_template(name)
@contextlib.contextmanager
def save_lookups():
"""
A context manager to save and restore the Mako template lookup path.
Useful for testing.
"""
# Make a copy of the list of directories for each namespace.
namespace_dirs = {namespace: list(look.directories) for namespace, look in LOOKUP.items()}
try:
yield
finally:
# Get rid of all the lookups.
LOOKUP.clear()
# Re-create the lookups from our saved list.
for namespace, directories in namespace_dirs.items():
for directory in directories:
add_lookup(namespace, directory)
|
agpl-3.0
|
unicri/edx-platform
|
common/test/acceptance/pages/studio/settings_group_configurations.py
|
74
|
10062
|
"""
Course Group Configurations page.
"""
from bok_choy.promise import EmptyPromise
from .course_page import CoursePage
from .utils import confirm_prompt
class GroupConfigurationsPage(CoursePage):
"""
Course Group Configurations page.
"""
url_path = "group_configurations"
experiment_groups_css = ".experiment-groups"
content_groups_css = ".content-groups"
def is_browser_on_page(self):
"""
Verify that the browser is on the page and it is not still loading.
"""
EmptyPromise(
lambda: self.q(css='body.view-group-configurations').present,
'On the group configuration page'
).fulfill()
EmptyPromise(
lambda: not self.q(css='span.spin').visible,
'Group Configurations are finished loading'
).fulfill()
return True
@property
def experiment_group_configurations(self):
"""
Return list of the experiment group configurations for the course.
"""
return self._get_groups(self.experiment_groups_css)
@property
def content_groups(self):
"""
Return list of the content groups for the course.
"""
return self._get_groups(self.content_groups_css)
def _get_groups(self, prefix):
"""
Return list of the group-configurations-list-item's of specified type for the course.
"""
css = prefix + ' .wrapper-collection'
return [GroupConfiguration(self, prefix, index) for index in xrange(len(self.q(css=css)))]
def create_experiment_group_configuration(self):
"""
Creates new group configuration.
"""
self.q(css=self.experiment_groups_css + " .new-button").first.click()
def create_first_content_group(self):
"""
Creates new content group when there are none initially defined.
"""
self.q(css=self.content_groups_css + " .new-button").first.click()
def add_content_group(self):
"""
Creates new content group when at least one already exists
"""
self.q(css=self.content_groups_css + " .action-add").first.click()
@property
def no_experiment_groups_message_is_present(self):
return self._no_content_message(self.experiment_groups_css).present
@property
def no_content_groups_message_is_present(self):
return self._no_content_message(self.content_groups_css).present
@property
def no_experiment_groups_message_text(self):
return self._no_content_message(self.experiment_groups_css).text[0]
@property
def no_content_groups_message_text(self):
return self._no_content_message(self.content_groups_css).text[0]
def _no_content_message(self, prefix):
"""
Returns the message about "no content" for the specified type.
"""
return self.q(css='.wrapper-content ' + prefix + ' .no-content')
@property
def experiment_group_sections_present(self):
"""
Returns whether or not anything related to content experiments is present.
"""
return self.q(css=self.experiment_groups_css).present or self.q(css=".experiment-groups-doc").present
class GroupConfiguration(object):
"""
Group Configuration wrapper.
"""
def __init__(self, page, prefix, index):
self.page = page
self.SELECTOR = prefix + ' .wrapper-collection-{}'.format(index)
self.index = index
def get_selector(self, css=''):
return ' '.join([self.SELECTOR, css])
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.get_selector(css=selector))
def toggle(self):
"""
Expand/collapse group configuration.
"""
self.find_css('a.group-toggle').first.click()
@property
def is_expanded(self):
"""
Group configuration usage information is expanded.
"""
return self.find_css('a.group-toggle.hide-groups').present
def add_group(self):
"""
Add new group.
"""
self.find_css('button.action-add-group').first.click()
def get_text(self, css):
"""
Return text for the defined by css locator.
"""
return self.find_css(css).first.text[0]
def click_outline_anchor(self):
"""
Click on the `Course Outline` link.
"""
self.find_css('p.group-configuration-usage-text a').first.click()
def click_unit_anchor(self, index=0):
"""
Click on the link to the unit.
"""
self.find_css('li.group-configuration-usage-unit a').nth(index).click()
def edit(self):
"""
Open editing view for the group configuration.
"""
self.find_css('.action-edit .edit').first.click()
@property
def delete_button_is_disabled(self):
return self.find_css('.actions .delete.is-disabled').present
@property
def delete_button_is_present(self):
"""
Returns whether or not the delete icon is present.
"""
return self.find_css('.actions .delete').present
def delete(self):
"""
Delete the group configuration.
"""
self.find_css('.actions .delete').first.click()
confirm_prompt(self.page)
def save(self):
"""
Save group configuration.
"""
self.find_css('.action-primary').first.click()
self.page.wait_for_ajax()
def cancel(self):
"""
Cancel group configuration.
"""
self.find_css('.action-secondary').first.click()
@property
def mode(self):
"""
Return group configuration mode.
"""
if self.find_css('.collection-edit').present:
return 'edit'
elif self.find_css('.collection').present:
return 'details'
@property
def id(self):
"""
Return group configuration id.
"""
return self.get_text('.group-configuration-id .group-configuration-value')
@property
def validation_message(self):
"""
Return validation message.
"""
return self.get_text('.message-status.error')
@property
def usages(self):
"""
Return list of usages.
"""
css = '.group-configuration-usage-unit'
return self.find_css(css).text
@property
def name(self):
"""
Return group configuration name.
"""
return self.get_text('.title')
@name.setter
def name(self, value):
"""
Set group configuration name.
"""
self.find_css('.collection-name-input').first.fill(value)
@property
def description(self):
"""
Return group configuration description.
"""
return self.get_text('.group-configuration-description')
@description.setter
def description(self, value):
"""
Set group configuration description.
"""
self.find_css('.group-configuration-description-input').first.fill(value)
@property
def groups(self):
"""
Return list of groups.
"""
def group_selector(group_index):
return self.get_selector('.group-{} '.format(group_index))
return [Group(self.page, group_selector(index)) for index, element in enumerate(self.find_css('.group'))]
@property
def delete_note(self):
"""
Return delete note for the group configuration.
"""
return self.find_css('.wrapper-delete-button').first.attrs('data-tooltip')[0]
@property
def details_error_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .fa-exclamation-circle').present
@property
def details_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .fa-warning').present
@property
def details_message_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').present
@property
def details_message_text(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').text[0]
@property
def edit_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .fa-warning').present
@property
def edit_warning_message_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').present
@property
def edit_warning_message_text(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').text[0]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
class Group(object):
"""
Group wrapper.
"""
def __init__(self, page, prefix_selector):
self.page = page
self.prefix = prefix_selector
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.prefix + selector)
@property
def name(self):
"""
Return the name of the group .
"""
css = '.group-name'
return self.find_css(css).first.text[0]
@name.setter
def name(self, value):
"""
Set the name for the group.
"""
css = '.group-name'
self.find_css(css).first.fill(value)
@property
def allocation(self):
"""
Return allocation for the group.
"""
css = '.group-allocation'
return self.find_css(css).first.text[0]
def remove(self):
"""
Remove the group.
"""
css = '.action-close'
return self.find_css(css).first.click()
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
|
agpl-3.0
|
ironbox360/django
|
django/core/checks/urls.py
|
110
|
2727
|
from __future__ import unicode_literals
from . import Tags, Warning, register
@register(Tags.urls)
def check_url_config(app_configs, **kwargs):
from django.core.urlresolvers import get_resolver
resolver = get_resolver()
return check_resolver(resolver)
def check_resolver(resolver):
"""
Recursively check the resolver.
"""
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
warnings = []
for pattern in resolver.url_patterns:
if isinstance(pattern, RegexURLResolver):
warnings.extend(check_include_trailing_dollar(pattern))
# Check resolver recursively
warnings.extend(check_resolver(pattern))
elif isinstance(pattern, RegexURLPattern):
warnings.extend(check_pattern_name(pattern))
warnings.extend(check_pattern_startswith_slash(pattern))
return warnings
def describe_pattern(pattern):
"""
Format the URL pattern for display in warning messages.
"""
description = "'{}'".format(pattern.regex.pattern)
if getattr(pattern, 'name', False):
description += " [name='{}']".format(pattern.name)
return description
def check_include_trailing_dollar(pattern):
"""
Check that include is not used with a regex ending with a dollar.
"""
regex_pattern = pattern.regex.pattern
if regex_pattern.endswith('$') and not regex_pattern.endswith('\$'):
warning = Warning(
"Your URL pattern {} uses include with a regex ending with a '$'. "
"Remove the dollar from the regex to avoid problems including "
"URLs.".format(describe_pattern(pattern)),
id="urls.W001",
)
return [warning]
else:
return []
def check_pattern_startswith_slash(pattern):
"""
Check that the pattern does not begin with a forward slash.
"""
regex_pattern = pattern.regex.pattern
if regex_pattern.startswith('/') or regex_pattern.startswith('^/'):
warning = Warning(
"Your URL pattern {} has a regex beginning with a '/'. "
"Remove this slash as it is unnecessary.".format(describe_pattern(pattern)),
id="urls.W002",
)
return [warning]
else:
return []
def check_pattern_name(pattern):
"""
Check that the pattern name does not contain a colon.
"""
if pattern.name is not None and ":" in pattern.name:
warning = Warning(
"Your URL pattern {} has a name including a ':'. Remove the colon, to "
"avoid ambiguous namespace references.".format(describe_pattern(pattern)),
id="urls.W003",
)
return [warning]
else:
return []
|
bsd-3-clause
|
ghandiosm/Test
|
addons/l10n_be_invoice_bba/partner.py
|
47
|
1364
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
from openerp.osv import fields, osv
import time
from openerp.tools.translate import _
class res_partner(osv.osv):
""" add field to indicate default 'Communication Type' on customer invoices """
_inherit = 'res.partner'
def _get_comm_type(self, cr, uid, context=None):
res = self.pool.get('account.invoice')._get_reference_type(cr, uid,context=context)
return res
_columns = {
'out_inv_comm_type': fields.selection(_get_comm_type, 'Communication Type', change_default=True,
help='Select Default Communication Type for Outgoing Invoices.' ),
'out_inv_comm_algorithm': fields.selection([
('random','Random'),
('date','Date'),
('partner_ref','Customer Reference'),
], 'Communication Algorithm',
help='Select Algorithm to generate the Structured Communication on Outgoing Invoices.' ),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + \
['out_inv_comm_type', 'out_inv_comm_algorithm']
_default = {
'out_inv_comm_type': 'none',
}
|
gpl-3.0
|
iamgreaser/pysnip
|
pyspades/collision.py
|
8
|
1431
|
# Copyright (c) Mathias Kaerlev 2011-2012.
# This file is part of pyspades.
# pyspades is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyspades is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyspades. If not, see <http://www.gnu.org/licenses/>.
import math
def vector_collision(vec1, vec2, distance = 3):
return (math.fabs(vec1.x - vec2.x) < distance and
math.fabs(vec1.y - vec2.y) < distance and
math.fabs(vec1.z - vec2.z) < distance)
def collision_3d(x1, y1, z1, x2, y2, z2, distance = 3):
return (math.fabs(x1 - x2) < distance and
math.fabs(y1 - y2) < distance and
math.fabs(z1 - z2) < distance)
def distance_3d_vector(vector1, vector2):
xd = vector1.x - vector2.x
yd = vector1.y - vector2.y
zd = vector1.z - vector2.z
return math.sqrt(xd**2 + yd**2 + zd**2)
def distance_3d((x1, y1, z1), (x2, y2, z2)):
xd = x1 - x2
yd = y1 - y2
zd = z2 - z2
return math.sqrt(xd**2 + yd**2 + zd**2)
|
gpl-3.0
|
cul-it/Invenio
|
modules/bibsort/web/admin/bibsortadmin.py
|
2
|
3766
|
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibSort Administrator Interface."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
from invenio.webpage import page, create_error_box
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG, CFG_SITE_NAME
from invenio.dbquery import Error
from invenio.webuser import getUid, page_not_authorized
from invenio.urlutils import wash_url_argument
from invenio import bibsortadminlib as bsc
def index(req, ln=CFG_SITE_LANG, action='', bsrID='', sm_name='', sm_def_type='', sm_def_value='', sm_washer='', sm_locale=''):
"""
Display the initial(main) page
"""
navtrail_previous_links = bsc.getnavtrail()
try:
uid = getUid(req)
except Error:
return error_page(req)
auth = bsc.check_user(req,'cfgbibsort')
if not auth[0]:
action = wash_url_argument(action, 'str')
bsrID = wash_url_argument(bsrID, 'int')
sm_name = wash_url_argument(sm_name, 'str')
sm_def_type = wash_url_argument(sm_def_type, 'str')
sm_def_value = wash_url_argument(sm_def_value, 'str')
sm_washer = wash_url_argument(sm_washer, 'str')
sm_locale = wash_url_argument(sm_locale, 'str')
return page(title="BibSort Admin Interface",
body=bsc.perform_index(ln, action, bsrID, sm_name, sm_def_type, sm_def_value, sm_washer, sm_locale),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifytranslations(req, ln=CFG_SITE_LANG, bsrID='', trans=None, confirm=0):
"""
Display the modify translations page
"""
navtrail_previous_links = bsc.getnavtrail()+ """> <a class="navtrail" href="%s/admin/bibsort/bibsortadmin.py/">BibSort Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except Error:
return error_page(req)
auth = bsc.check_user(req,'cfgbibsort')
if not auth[0]:
bsrID = wash_url_argument(bsrID, 'int')
confirm = wash_url_argument(confirm, 'int')
return page(title="Modify translations",
body=bsc.perform_modifytranslations(ln, bsrID, trans, confirm),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def error_page(req, ln=CFG_SITE_LANG, verbose=1):
"""
Returns a default error page
"""
return page(title="Internal Error",
body = create_error_box(req, verbose=verbose, ln=ln),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.