repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pabigot/pyxb | tests/trac/test-trac-0181.py | 2 | 1998 | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:complexType name="tAny">
<xsd:all minOccurs="0">
<xsd:element type="xsd:int" name="a" minOccurs="1"/>
<xsd:element type="xsd:int" name="b" minOccurs="1"/>
<xsd:element type="xsd:int" name="c" minOccurs="1"/>
<xsd:element type="xsd:int" name="d" minOccurs="1"/>
</xsd:all>
</xsd:complexType>
<xsd:element name="eAny" type="tAny"/>
</xsd:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0181 (unittest.TestCase):
def getInstance (self):
instance = eAny()
vc = instance._validationConfig.copy()
instance._setValidationConfig(vc)
instance.a = 1
instance.d = 2
instance.c = 3
instance.b = 4
return instance
def testNEVER (self):
i = self.getInstance()
vc = i._validationConfig
vc._setContentInfluencesGeneration(vc.NEVER)
# Uses declaration order for sub-automata (alphabetic)
xmls = '<eAny><a>1</a><b>4</b><c>3</c><d>2</d></eAny>'
xmld = xmls.encode('utf-8')
self.assertEqual(i.toxml('utf-8', root_only=True), xmld)
def testALWAYS (self):
i = self.getInstance()
vc = i._validationConfig
vc._setContentInfluencesGeneration(vc.ALWAYS)
# Uses assignment order for sub-automata (numeric)
xmls = '<eAny><a>1</a><d>2</d><c>3</c><b>4</b></eAny>'
xmld = xmls.encode('utf-8')
self.assertEqual(i.toxml('utf-8', root_only=True), xmld)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -9,116,964,154,924,526,000 | 29.738462 | 64 | 0.611111 | false |
qiyuangong/leetcode | python/099_Recover_Binary_Search_Tree.py | 2 | 2329 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# def recoverTree(self, root):
# """
# :type root: TreeNode
# :rtype: void Do not return anything, modify root in-place instead.
# """
# # https://discuss.leetcode.com/topic/9305/detail-explain-about-how-morris-traversal-finds-two-incorrect-pointer/2
# pre = first = second = None
# while root is not None:
# if root.left is not None:
# temp = root.left
# while temp.right is not None and temp.right != root:
# temp = temp.right
# if temp.right is not None:
# if pre is not None and pre.val > root.val:
# if first is None:
# first = pre
# second = root
# pre = root
# temp.right = None
# root = root.right
# else:
# temp.right = root
# root = root.left
# else:
# if pre is not None and pre.val > root.val:
# if first is None:
# first = pre
# second = root
# pre = root
# root = root.right
# # only two elements are swapped
# if first is not None and second is not None:
# first.val, second.val = second.val, first.val
# https://discuss.leetcode.com/topic/3988/no-fancy-algorithm-just-simple-and-powerful-in-order-traversal/2
def __init__(self):
self.first = self.second = None
self.pre = TreeNode(-sys.maxint - 1)
def recoverTree(self, root):
self.traverse(root)
self.first.val, self.second.val = self.second.val, self.first.val
def traverse(self, root):
if root is None:
return
self.traverse(root.left)
if self.pre.val >= root.val:
if self.first is None:
self.first = self.pre
if self.first is not None:
self.second = root
self.pre = root
self.traverse(root.right)
| mit | -2,211,204,601,014,194,200 | 33.761194 | 123 | 0.490339 | false |
attritionorg/nessrest | setup.py | 3 | 2946 | '''
Build script for the nessrest module
'''
# Copyright (c) 2014-2015, Tenable Network Security, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Tenable Network Security, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, TITLE,
# NON-INFRINGEMENT, INTEGRATION, PERFORMANCE, AND ACCURACY AND ANY IMPLIED
# WARRANTIES ARISING FROM STATUTE, COURSE OF DEALING, COURSE OF PERFORMANCE, OR
# USAGE OF TRADE, ARE DISCLAIMED. IN NO EVENT SHALL TENABLE NETWORK SECURITY,
# INC., OR ANY SUCCESSOR-IN-INTEREST, BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
from codecs import open
from os import path
import nessrest
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(here, "LICENSE"), encoding="utf-8") as f:
license = f.read()
setup(name="nessrest",
version=nessrest.__version__,
packages=["nessrest"],
include_package_data=True,
exclude_package_data={'': ['*.pyc']},
author="Scott Walsh, Ben Bergman, Matthew Everson, Matthew Woelk",
author_email="[email protected]",
url="https://github.com/tenable/nessrest",
include_dirs=["."],
license=license,
description="An interface to the Nessus 6 REST API",
long_description=long_description,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
],
keywords="nessus nessrest ness_rest",
install_requires=['argparse >= 1.0', 'requests >= 1.4'],
scripts=['nessrest/ness_rest'])
| bsd-3-clause | -1,643,282,323,444,939,800 | 45.03125 | 80 | 0.720978 | false |
qPCR4vir/evowarepy | evoware/fileutil.py | 1 | 7953 | ## evoware/py -- python modules for Evoware scripting
## Copyright 2014 Raik Gruenberg
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""General purpose methods; mostly about file handling"""
import os.path as osp
import os
import shutil, glob, sys
import logging
import util
class UtilError( Exception ):
pass
def absfile( filename, resolveLinks=1 ):
"""
Get absolute file path::
- expand ~ to user home, change
- expand ../../ to absolute path
- resolve links
- add working directory to unbound files ('ab.txt'->'/home/raik/ab.txt')
@param filename: name of file
@type filename: str
@param resolveLinks: eliminate any symbolic links (default: 1)
@type resolveLinks: 1|0
@return: absolute path or filename
@rtype: string
@raise ToolsError: if a ~user part does not translate to an existing path
"""
if not filename:
return filename
r = osp.abspath( osp.expanduser( filename ) )
if '~' in r:
raise UtilError, 'Could not expand user home in %s' % filename
if resolveLinks:
r = osp.realpath( r )
r = osp.normpath(r)
return r
def projectRoot():
"""
Root folder of synbio project.
@return: absolute path of the root of current project::
i.e. '/home/raik/py/synbiolib/py/'
@rtype: string
"""
## import this module
from evoware import fileutil
## get location of this module
f = absfile(fileutil.__file__)
## extract path and assume it is 'project_root/synbio'
f = osp.join( osp.split( f )[0], '..' )
return absfile( f )
def testRoot( subfolder='' ):
"""
Root folder of synbio testdata.
This method assumes that the python module is located within
synbiolib/py/.
@param subfolder: str; sub-folder within testroot
@return: str; folder containing testdata
"""
return absfile( osp.join( projectRoot(), 'evoware', 'testdata', subfolder ) )
def stripFilename( filename ):
"""
Return filename without path and without ending.
@param filename: name of file
@type filename: str
@return: base filename
@rtype: str
"""
name = osp.basename( filename ) # remove path
try:
if name.find('.') <> -1:
name = name[: name.rfind('.') ] # remove ending
except:
pass ## just in case there is no ending to start with...
return name
def tryRemove(f, verbose=0, tree=0, wildcard=0 ):
"""
Remove file or folder::
remove(f [,verbose=0, tree=0]), remove if possible, otherwise do nothing
@param f: file path
@type f: str
@param verbose: report failure (default 0)
@type verbose: 0|1
@param tree: remove whole folder (default 0)
@type tree: 0|1
@param wildcard: filename contains wildcards (default 0)
@type wildcard: 0|1
@return: 1 if file was removed
@rtype: 1|0
"""
try:
f = absfile( f )
if osp.isdir(f):
if tree:
shutil.rmtree( f, ignore_errors=1 )
else:
logging.error('%r is directory - not removed.' % f)
return False
else:
if wildcard:
l = glob.glob( f )
for i in l:
os.remove( i )
else:
os.remove( f )
return True
except Exception, why:
if verbose: logging.warning( 'Cannot remove %r:\n%s' % (f,
util.lastError()) )
return False
## quick and dirty command line argument parsing... could be made more elegant
def get_cmdDict(lst_cmd, dic_default):
"""
Parse commandline options into dictionary of type C{ {<option> : <value>} }
Options are recognised by a leading '-'.
Error handling should be improved.
Option C{ -x |file_name| } is interpreted as file with additional options.
The key value pairs in lst_cmd replace key value pairs in the
-x file and in dic_default.
@param lst_cmd: list with the command line options::
e.g. ['-pdb', 'in1.pdb', 'in2.pdb', '-o', 'out.dat']
@type lst_cmd: [str]
@param dic_default: dictionary with default options::
e.g. {'psf':'in.psf'}
@type dic_default: {str : str}
@return: command dictionary::
ala {'pdb':['in1.pdb', 'in2.pdb'], 'psf':'in.psf', 'o':'out.dat'}
@rtype: {<option> : <value>}
"""
dic_cmd = {} # create return dictionary
try:
for cmd in lst_cmd:
if (cmd[0] == '-'): # this entry is new option
current_option = cmd[1:] # take all but leading "-"
dic_cmd[current_option] = "" # make sure key exists even
# w/o value
counter = 0 # number of values for this option
else: # this entry is value for latest option
if counter < 1:
dic_cmd[current_option] = cmd
# in case, several values follow after a "-xxx" option convert dictionary
# entry into list and add all elements (until the next "-") to this list
else:
if counter == 1: # there is already a value assigned
# convert to list
dic_cmd[current_option] = [dic_cmd[current_option]]
# add value to list
dic_cmd[current_option] = dic_cmd[current_option] + [cmd]
counter = counter + 1
except (KeyError, UnboundLocalError), why:
raise UtilError, "Can't resolve command line options.\n \tError:"+\
str(why)
## get extra options from external file
try:
if dic_cmd.has_key('x'):
d = file2dic( dic_cmd['x'] )
d.update( dic_cmd )
dic_cmd = d
except IOError:
raise IOError, "Error opening %s."% dic_cmd['x']
## fill in missing default values
dic_default.update( dic_cmd )
dic_cmd = dic_default
return dic_cmd
def cmdDict( defaultDic={} ):
"""
Convenience implementation of L{get_cmdDict}. Take command line options
from sys.argv[1:] and convert them into dictionary.
Example::
'-o out.dat -in 1.pdb 2.pdb 3.pdb -d' will be converted to
{'o':'out.dat', 'in': ['1.pdb', '2.pdb', '3.pdb'], 'd':'' }
Option C{ -x |file_name| } is interpreted as file with additional options.
@param defaultDic: dic with default values.
@type defaultDic: dic
@return: command dictionary
@rtype: dic
"""
return get_cmdDict( sys.argv[1:], defaultDic )
######################
### Module testing ###
import testing
class Test(testing.AutoTest):
"""Test MyModule"""
TAGS = [ testing.NORMAL ]
def prepare( self ):
self.fname1 = '~/nonexistent/../subfolder/file.txt'
def test_stripFilename( self ):
"""fileutil.stripFilename test"""
r = stripFilename( self.fname1 )
self.assertEqual( r, 'file', '%r != %r' % (r, 'file') )
def test_absfilename( self ):
"""fileutil.absfilename test"""
r = absfile( self.fname1 )
self.assertEqual( r,
osp.join( osp.expanduser('~'), 'subfolder/file.txt'))
if __name__ == '__main__':
testing.localTest()
| apache-2.0 | -30,888,078,753,680,924 | 29.588462 | 81 | 0.574626 | false |
hellotomfan/v8-coroutine | deps/v8/tools/push-to-trunk/auto_push.py | 36 | 5466 | #!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import os
import re
import sys
import urllib
from common_includes import *
import push_to_trunk
PUSH_MESSAGE_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
self.InitialEnvironmentChecks(self.default_cwd)
self.CommonPrepare()
class CheckAutoPushSettings(Step):
MESSAGE = "Checking settings file."
def RunStep(self):
settings_file = os.path.realpath(self.Config("SETTINGS_LOCATION"))
if os.path.exists(settings_file):
settings_dict = json.loads(FileToText(settings_file))
if settings_dict.get("enable_auto_roll") is False:
self.Die("Push to trunk disabled by auto-roll settings file: %s"
% settings_file)
class CheckTreeStatus(Step):
MESSAGE = "Checking v8 tree status message."
def RunStep(self):
status_url = "https://v8-status.appspot.com/current?format=json"
status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
self["tree_message"] = json.loads(status_json)["message"]
if re.search(r"nopush|no push", self["tree_message"], flags=re.I):
self.Die("Push to trunk disabled by tree state: %s"
% self["tree_message"])
class FetchLKGR(Step):
MESSAGE = "Fetching V8 LKGR."
def RunStep(self):
lkgr_url = "https://v8-status.appspot.com/lkgr"
# Retry several times since app engine might have issues.
self["lkgr"] = self.ReadURL(lkgr_url, wait_plan=[5, 20, 300, 300])
class CheckLastPush(Step):
MESSAGE = "Checking last V8 push to trunk."
def RunStep(self):
last_push = self.FindLastTrunkPush()
# Retrieve the bleeding edge revision of the last push from the text in
# the push commit message.
last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
last_push_be = PUSH_MESSAGE_RE.match(last_push_title).group(1)
if not last_push_be: # pragma: no cover
self.Die("Could not retrieve bleeding edge revision for trunk push %s"
% last_push)
if self["lkgr"] == last_push_be:
print "Already pushed current lkgr %s" % last_push_be
return True
class PushToCandidates(Step):
MESSAGE = "Pushing to candidates if specified."
def RunStep(self):
print "Pushing lkgr %s to candidates." % self["lkgr"]
args = [
"--author", self._options.author,
"--reviewer", self._options.reviewer,
"--revision", self["lkgr"],
"--force",
]
if self._options.svn:
args.extend(["--svn", self._options.svn])
if self._options.svn_config:
args.extend(["--svn-config", self._options.svn_config])
if self._options.vc_interface:
args.extend(["--vc-interface", self._options.vc_interface])
if self._options.work_dir:
args.extend(["--work-dir", self._options.work_dir])
# TODO(machenbach): Update the script before calling it.
if self._options.push:
self._side_effect_handler.Call(push_to_trunk.PushToTrunk().Run, args)
class AutoPush(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-p", "--push",
help="Push to trunk. Dry run if unspecified.",
default=False, action="store_true")
def _ProcessOptions(self, options):
if not options.author or not options.reviewer: # pragma: no cover
print "You need to specify author and reviewer."
return False
options.requires_editor = False
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-auto-push-tempfile",
"SETTINGS_LOCATION": "~/.auto-roll",
}
def _Steps(self):
return [
Preparation,
CheckAutoPushSettings,
CheckTreeStatus,
FetchLKGR,
CheckLastPush,
PushToCandidates,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(AutoPush().Run())
| gpl-2.0 | -6,222,276,143,869,037,000 | 33.1625 | 76 | 0.682217 | false |
shifter/grr | gui/api_plugins/artifact.py | 1 | 4572 | #!/usr/bin/env python
"""API renderers for accessing artifacts."""
from grr.gui import api_call_renderer_base
from grr.gui import api_value_renderers
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_registry
from grr.lib import parsers
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import api_pb2
class ApiArtifactsRendererArgs(rdf_structs.RDFProtoStruct):
protobuf = api_pb2.ApiArtifactsRendererArgs
class ApiArtifactsRenderer(api_call_renderer_base.ApiCallRenderer):
"""Renders available artifacts definitions."""
args_type = ApiArtifactsRendererArgs
def RenderArtifacts(self, artifacts, custom_artifacts=None):
if custom_artifacts is None:
custom_artifacts = set()
result = []
for artifact_val in artifacts:
descriptor = artifact_registry.ArtifactDescriptor(
artifact=artifact_val,
artifact_source=artifact_val.ToPrettyJson(extended=True),
dependencies=sorted(artifact_val.GetArtifactDependencies()),
path_dependencies=sorted(artifact_val.GetArtifactPathDependencies()),
is_custom=artifact_val.name in custom_artifacts)
for processor in parsers.Parser.GetClassesByArtifact(artifact_val.name):
descriptor.processors.append(
artifact_registry.ArtifactProcessorDescriptor(
name=processor.__name__,
output_types=processor.output_types,
description=processor.GetDescription()))
result.append(api_value_renderers.RenderValue(descriptor))
return result
def Render(self, args, token=None):
"""Get available artifact information for rendering."""
# get custom artifacts from data store
artifact_urn = rdfvalue.RDFURN("aff4:/artifact_store")
try:
collection = aff4.FACTORY.Open(artifact_urn,
aff4_type="RDFValueCollection",
token=token)
except IOError:
collection = {}
custom_artifacts = set()
for artifact_val in collection:
custom_artifacts.add(artifact_val.name)
# Get all artifacts that aren't Bootstrap and aren't the base class.
artifacts = sorted(artifact_registry.REGISTRY.GetArtifacts(
reload_datastore_artifacts=True))
total_count = len(artifacts)
if args.count:
artifacts = artifacts[args.offset:args.offset + args.count]
else:
artifacts = artifacts[args.offset:]
rendered_artifacts = self.RenderArtifacts(artifacts,
custom_artifacts=custom_artifacts)
return dict(total_count=total_count,
offset=args.offset,
count=len(rendered_artifacts),
items=rendered_artifacts)
class ApiArtifactsUploadRendererArgs(rdf_structs.RDFProtoStruct):
protobuf = api_pb2.ApiArtifactsUploadRendererArgs
class ApiArtifactsUploadRenderer(api_call_renderer_base.ApiCallRenderer):
"""Handles artifact upload."""
args_type = ApiArtifactsUploadRendererArgs
def Render(self, args, token=None):
urn = artifact.UploadArtifactYamlFile(args.artifact, token=token)
return dict(status="OK", urn=utils.SmartStr(urn))
class ApiArtifactsDeleteRendererArgs(rdf_structs.RDFProtoStruct):
protobuf = api_pb2.ApiArtifactsDeleteRendererArgs
class ApiArtifactsDeleteRenderer(api_call_renderer_base.ApiCallRenderer):
"""Handles artifact deletion."""
args_type = ApiArtifactsDeleteRendererArgs
def Render(self, args, token=None):
with aff4.FACTORY.Create("aff4:/artifact_store", mode="r",
aff4_type="RDFValueCollection",
token=token) as store:
filtered_artifacts = [artifact_value for artifact_value in store
if artifact_value.name not in args.names]
# TODO(user): this is ugly and error- and race-condition- prone.
# We need to store artifacts not in an RDFValueCollection, which is an
# append-only object, but in some different way that allows easy
# deletion. Possible option - just store each artifact in a separate object
# in the same folder.
aff4.FACTORY.Delete("aff4:/artifact_store", token=token)
with aff4.FACTORY.Create("aff4:/artifact_store", mode="w",
aff4_type="RDFValueCollection",
token=token) as store:
for artifact_value in filtered_artifacts:
store.Add(artifact_value)
return dict(status="OK")
| apache-2.0 | -5,476,630,757,292,344,000 | 34.44186 | 80 | 0.687008 | false |
evildmp/Arkestra | vacancies_and_studentships/views.py | 2 | 5172 | import datetime
from django.utils.translation import ugettext as _
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import Http404
from arkestra_utilities.views import ArkestraGenericView
from contacts_and_people.models import Entity
from models import Studentship, Vacancy
from lister import VacanciesAndStudentshipsCurrentLister, \
VacanciesArchiveLister, VacanciesForthcomingLister, \
StudentshipsArchiveLister, StudentshipsForthcomingLister
from arkestra_utilities.settings import MULTIPLE_ENTITY_MODE
class VacanciesAndStudentshipsView(ArkestraGenericView, ):
auto_page_attribute = "auto_vacancies_page"
def get(self, request, *args, **kwargs):
self.get_entity()
self.lister = VacanciesAndStudentshipsCurrentLister(
entity=self.entity,
request=self.request
)
self.generic_lister_template = "arkestra/generic_lister.html"
self.meta = {"description": "Recent vacancies and forthcoming studentships",}
self.title = unicode(self.entity) + u" vacancies & studentships"
if MULTIPLE_ENTITY_MODE:
self.pagetitle = unicode(self.entity) + u" vacancies & studentships"
else:
self.pagetitle = "Vacancies & studentships"
return self.response(request)
class VacanciesCurrentView(ArkestraGenericView):
auto_page_attribute = "auto_vacancies_page"
def get(self, request, *args, **kwargs):
self.get_entity()
self.lister = VacanciesForthcomingLister(
entity=self.entity,
request=self.request
)
self.generic_lister_template = "arkestra/generic_filter_list.html"
self.meta = {"description": "Searchable list of forthcoming studentships",}
self.title = u"Forthcoming studentships for %s" % unicode(self.entity)
self.pagetitle = u"Forthcoming studentships for %s" % unicode(self.entity)
return self.response(request)
class VacanciesArchiveView(ArkestraGenericView):
auto_page_attribute = "auto_vacancies_page"
def get(self, request, *args, **kwargs):
self.get_entity()
self.lister = VacanciesArchiveLister(
entity=self.entity,
request=self.request
)
self.generic_lister_template = "arkestra/generic_filter_list.html"
self.meta = {"description": "Searchable archive of vacancies items",}
self.title = u"Vacancies archive for %s" % unicode(self.entity)
self.pagetitle = u"Vacancies archive for %s" % unicode(self.entity)
return self.response(request)
class StudentshipsArchiveView(ArkestraGenericView):
auto_page_attribute = "auto_vacancies_page"
def get(self, request, *args, **kwargs):
self.get_entity()
self.lister = StudentshipsArchiveLister(
entity=self.entity,
request=self.request
)
self.generic_lister_template = "arkestra/generic_filter_list.html"
self.meta = {"description": "Searchable archive of studentships",}
self.title = u"Studentships archive for %s" % unicode(self.entity)
self.pagetitle = u"Studentships archive for %s" % unicode(self.entity)
return self.response(request)
class StudentshipsForthcomingView(ArkestraGenericView):
auto_page_attribute = "auto_vacancies_page"
def get(self, request, *args, **kwargs):
self.get_entity()
self.lister = StudentshipsForthcomingLister(
entity=self.entity,
request=self.request
)
self.generic_lister_template = "arkestra/generic_filter_list.html"
self.meta = {"description": "Searchable list of forthcoming studentships",}
self.title = u"Forthcoming studentships for %s" % unicode(self.entity)
self.pagetitle = u"Forthcoming studentships for %s" % unicode(self.entity)
return self.response(request)
def vacancy(request, slug):
"""
Responsible for publishing vacancies
"""
if request.user.is_staff:
vacancy = get_object_or_404(Vacancy, slug=slug)
else:
vacancy = get_object_or_404(Vacancy, slug=slug, published=True, date__gte=datetime.datetime.now())
return render_to_response(
"vacancies_and_studentships/vacancy.html",
{
"vacancy":vacancy,
"entity": vacancy.get_hosted_by,
"meta": {"description": vacancy.summary,}
},
RequestContext(request),
)
def studentship(request, slug):
"""
Responsible for publishing an studentship
"""
if request.user.is_staff:
studentship = get_object_or_404(Studentship, slug=slug)
else:
studentship = get_object_or_404(Studentship, slug=slug, published=True, date__gte=datetime.datetime.now())
return render_to_response(
"vacancies_and_studentships/studentship.html",
{"studentship": studentship,
"entity": studentship.get_hosted_by,
"meta": {"description": studentship.summary,},
},
RequestContext(request),
)
| bsd-2-clause | 6,610,627,639,235,087,000 | 33.251656 | 114 | 0.66512 | false |
jolyonb/edx-platform | common/djangoapps/terrain/stubs/comments.py | 2 | 5708 | """
Stub implementation of cs_comments_service for acceptance tests
"""
from __future__ import absolute_import
import re
import six.moves.urllib.parse # pylint: disable=import-error
from .http import StubHttpRequestHandler, StubHttpService
class StubCommentsServiceHandler(StubHttpRequestHandler):
@property
def _params(self):
return six.moves.urllib.parse.parse_qs(six.moves.urllib.parse.urlparse(self.path).query)
def do_GET(self):
pattern_handlers = {
"/api/v1/users/(?P<user_id>\\d+)/active_threads$": self.do_user_profile,
"/api/v1/users/(?P<user_id>\\d+)$": self.do_user,
"/api/v1/search/threads$": self.do_search_threads,
"/api/v1/threads$": self.do_threads,
"/api/v1/threads/(?P<thread_id>\\w+)$": self.do_thread,
"/api/v1/comments/(?P<comment_id>\\w+)$": self.do_comment,
"/api/v1/(?P<commentable_id>\\w+)/threads$": self.do_commentable,
}
if self.match_pattern(pattern_handlers):
return
self.send_response(404, content="404 Not Found")
def match_pattern(self, pattern_handlers):
path = six.moves.urllib.parse.urlparse(self.path).path
for pattern in pattern_handlers:
match = re.match(pattern, path)
if match:
pattern_handlers[pattern](**match.groupdict())
return True
return None
def do_PUT(self):
if self.path.startswith('/set_config'):
return StubHttpRequestHandler.do_PUT(self)
pattern_handlers = {
"/api/v1/users/(?P<user_id>\\d+)$": self.do_put_user,
}
if self.match_pattern(pattern_handlers):
return
self.send_response(204, "")
def do_put_user(self, user_id):
self.server.config['default_sort_key'] = self.post_dict.get("default_sort_key", "date")
self.send_json_response({'username': self.post_dict.get("username"), 'external_id': self.post_dict.get("external_id")})
def do_DELETE(self):
pattern_handlers = {
"/api/v1/comments/(?P<comment_id>\\w+)$": self.do_delete_comment
}
if self.match_pattern(pattern_handlers):
return
self.send_json_response({})
def do_user(self, user_id):
response = {
"id": user_id,
"default_sort_key": self.server.config.get("default_sort_key", "date"),
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
if 'course_id' in self._params:
response.update({
"threads_count": 1,
"comments_count": 2
})
self.send_json_response(response)
def do_user_profile(self, user_id):
if 'active_threads' in self.server.config:
user_threads = self.server.config['active_threads'][:]
params = self._params
page = int(params.get("page", ["1"])[0])
per_page = int(params.get("per_page", ["20"])[0])
num_pages = max(len(user_threads) - 1, 1) / per_page + 1
user_threads = user_threads[(page - 1) * per_page:page * per_page]
self.send_json_response({
"collection": user_threads,
"page": page,
"num_pages": num_pages
})
else:
self.send_response(404, content="404 Not Found")
def do_thread(self, thread_id):
if thread_id in self.server.config.get('threads', {}):
thread = self.server.config['threads'][thread_id].copy()
params = six.moves.urllib.parse.parse_qs(six.moves.urllib.parse.urlparse(self.path).query)
if "recursive" in params and params["recursive"][0] == "True":
thread.setdefault('children', [])
resp_total = thread.setdefault('resp_total', len(thread['children']))
resp_skip = int(params.get("resp_skip", ["0"])[0])
resp_limit = int(params.get("resp_limit", ["10000"])[0])
thread['children'] = thread['children'][resp_skip:(resp_skip + resp_limit)]
self.send_json_response(thread)
else:
self.send_response(404, content="404 Not Found")
def do_threads(self):
threads = self.server.config.get('threads', {})
threads_data = list(threads.values())
self.send_json_response({"collection": threads_data, "page": 1, "num_pages": 1})
def do_search_threads(self):
self.send_json_response(self.server.config.get('search_result', {}))
def do_comment(self, comment_id):
# django_comment_client calls GET comment before doing a DELETE, so that's what this is here to support.
if comment_id in self.server.config.get('comments', {}):
comment = self.server.config['comments'][comment_id]
self.send_json_response(comment)
def do_delete_comment(self, comment_id):
"""Handle comment deletion. Returns a JSON representation of the
deleted comment."""
if comment_id in self.server.config.get('comments', {}):
comment = self.server.config['comments'][comment_id]
self.send_json_response(comment)
def do_commentable(self, commentable_id):
self.send_json_response({
"collection": [
thread
for thread in self.server.config.get('threads', {}).values()
if thread.get('commentable_id') == commentable_id
],
"page": 1,
"num_pages": 1,
})
class StubCommentsService(StubHttpService):
HANDLER_CLASS = StubCommentsServiceHandler
| agpl-3.0 | 2,863,648,932,884,572,700 | 38.365517 | 127 | 0.574632 | false |
caseyrollins/osf.io | osf_tests/test_files.py | 8 | 3323 | import pytest
from django.contrib.contenttypes.models import ContentType
from addons.osfstorage.models import NodeSettings
from addons.osfstorage import settings as osfstorage_settings
from osf.models import BaseFileNode, Folder, File
from osf_tests.factories import (
UserFactory,
ProjectFactory,
RegionFactory
)
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def project(user):
return ProjectFactory(creator=user)
@pytest.fixture()
def create_test_file(fake):
# TODO: Copied from api_tests/utils.py. DRY this up.
def _create_test_file(target, user=None, filename=None, create_guid=True):
filename = filename or fake.file_name()
user = user or target.creator
osfstorage = target.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file(filename)
if create_guid:
test_file.get_guid(create=True)
test_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return test_file
return _create_test_file
def test_active_manager_does_not_return_trashed_file_nodes(project, create_test_file):
create_test_file(target=project)
deleted_file = create_test_file(target=project)
deleted_file.delete(user=project.creator, save=True)
content_type_for_query = ContentType.objects.get_for_model(project)
# root folder + file + deleted_file = 3 BaseFileNodes
assert BaseFileNode.objects.filter(target_object_id=project.id, target_content_type=content_type_for_query).count() == 3
# root folder + file = 2 BaseFileNodes
assert BaseFileNode.active.filter(target_object_id=project.id, target_content_type=content_type_for_query).count() == 2
def test_folder_update_calls_folder_update_method(project, create_test_file):
file = create_test_file(target=project)
parent_folder = file.parent
# the folder update method should be the Folder.update method
assert parent_folder.__class__.update == Folder.update
# the folder update method should not be the File update method
assert parent_folder.__class__.update != File.update
# the file update method should be the File update method
assert file.__class__.update == File.update
def test_file_update_respects_region(project, user, create_test_file):
test_file = create_test_file(target=project)
version = test_file.versions.first()
original_region = project.osfstorage_region
assert version.region == original_region
# update the region on the project, ensure the new version has the new region
node_settings = NodeSettings.objects.get(owner=project.id)
new_region = RegionFactory()
node_settings.region = new_region
node_settings.save()
test_file.save()
new_version = test_file.create_version(
user, {
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
'object': '07d80a',
}, {
'sha256': 'existing',
}
)
assert new_region != original_region
assert new_version.region == new_region
| apache-2.0 | 4,214,935,528,451,518,500 | 34.351064 | 124 | 0.682516 | false |
IntelLabs/hpat | sdc/distributed_api.py | 1 | 23019 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
.. module:: distributed_api.py
The description of the distributed_api module will be here.
"""
import time
from enum import Enum
import llvmlite.binding as ll
import operator
import numpy as np
import numba
from numba import types
from numba.extending import models, overload, register_model
from numba.typing import signature
from numba.typing.templates import AbstractTemplate, infer_global
import sdc
from sdc.str_arr_ext import (string_array_type, num_total_chars, StringArray,
pre_alloc_string_array, get_offset_ptr,
get_data_ptr, convert_len_arr_to_offset)
from sdc.utilities.utils import (debug_prints, empty_like_type, _numba_to_c_type_map, unliteral_all)
from . import transport_seq as transport
ll.add_symbol('c_alltoall', transport.c_alltoall)
ll.add_symbol('c_gather_scalar', transport.c_gather_scalar)
ll.add_symbol('c_gatherv', transport.c_gatherv)
ll.add_symbol('c_bcast', transport.c_bcast)
ll.add_symbol('c_recv', transport.hpat_dist_recv)
ll.add_symbol('c_send', transport.hpat_dist_send)
# get size dynamically from C code (mpich 3.2 is 4 bytes but openmpi 1.6 is 8)
mpi_req_numba_type = getattr(types, "int" + str(8 * transport.mpi_req_num_bytes))
MPI_ROOT = 0
class Reduce_Type(Enum):
Sum = 0
Prod = 1
Min = 2
Max = 3
Argmin = 4
Argmax = 5
Or = 6
def get_type_enum(arr):
return np.int32(-1)
@overload(get_type_enum)
def get_type_enum_overload(arr):
dtype = arr.dtype
if isinstance(dtype, sdc.hiframes.pd_categorical_ext.PDCategoricalDtype):
dtype = sdc.hiframes.pd_categorical_ext.get_categories_int_type(dtype)
typ_val = _numba_to_c_type_map[dtype]
return lambda arr: np.int32(typ_val)
INT_MAX = np.iinfo(np.int32).max
_send = types.ExternalFunction("c_send", types.void(types.voidptr, types.int32, types.int32, types.int32, types.int32))
@numba.njit
def send(val, rank, tag):
# dummy array for val
send_arr = np.full(1, val)
type_enum = get_type_enum(send_arr)
_send(send_arr.ctypes, 1, type_enum, rank, tag)
_recv = types.ExternalFunction("c_recv", types.void(types.voidptr, types.int32, types.int32, types.int32, types.int32))
@numba.njit
def recv(dtype, rank, tag):
# dummy array for val
recv_arr = np.empty(1, dtype)
type_enum = get_type_enum(recv_arr)
_recv(recv_arr.ctypes, 1, type_enum, rank, tag)
return recv_arr[0]
_alltoall = types.ExternalFunction("c_alltoall", types.void(types.voidptr, types.voidptr, types.int32, types.int32))
@numba.njit
def alltoall(send_arr, recv_arr, count):
# TODO: handle int64 counts
assert count < INT_MAX
type_enum = get_type_enum(send_arr)
_alltoall(send_arr.ctypes, recv_arr.ctypes, np.int32(count), type_enum)
def gather_scalar(data): # pragma: no cover
return np.ones(1)
c_gather_scalar = types.ExternalFunction("c_gather_scalar", types.void(types.voidptr, types.voidptr, types.int32))
# TODO: test
@overload(gather_scalar)
def gather_scalar_overload(val):
assert isinstance(val, (types.Integer, types.Float))
# TODO: other types like boolean
typ_val = _numba_to_c_type_map[val]
func_text = (
"def gather_scalar_impl(val):\n"
" n_pes = sdc.distributed_api.get_size()\n"
" rank = sdc.distributed_api.get_rank()\n"
" send = np.full(1, val, np.{})\n"
" res_size = n_pes if rank == {} else 0\n"
" res = np.empty(res_size, np.{})\n"
" c_gather_scalar(send.ctypes, res.ctypes, np.int32({}))\n"
" return res\n").format(val, MPI_ROOT, val, typ_val)
loc_vars = {}
exec(func_text, {'sdc': sdc, 'np': np, 'c_gather_scalar': c_gather_scalar}, loc_vars)
gather_impl = loc_vars['gather_scalar_impl']
return gather_impl
# TODO: test
def gatherv(data): # pragma: no cover
return data
# sendbuf, sendcount, recvbuf, recv_counts, displs, dtype
c_gatherv = types.ExternalFunction(
"c_gatherv",
types.void(
types.voidptr,
types.int32,
types.voidptr,
types.voidptr,
types.voidptr,
types.int32))
@overload(gatherv)
def gatherv_overload(data):
if isinstance(data, types.Array):
# TODO: other types like boolean
typ_val = _numba_to_c_type_map[data.dtype]
def gatherv_impl(data):
rank = sdc.distributed_api.get_rank()
n_loc = len(data)
recv_counts = gather_scalar(np.int32(n_loc))
n_total = recv_counts.sum()
all_data = empty_like_type(n_total, data)
# displacements
displs = np.empty(1, np.int32)
if rank == MPI_ROOT:
displs = sdc.hiframes.join.calc_disp(recv_counts)
c_gatherv(
data.ctypes,
np.int32(n_loc),
all_data.ctypes,
recv_counts.ctypes,
displs.ctypes,
np.int32(typ_val))
return all_data
return gatherv_impl
if data == string_array_type:
int32_typ_enum = np.int32(_numba_to_c_type_map[types.int32])
char_typ_enum = np.int32(_numba_to_c_type_map[types.uint8])
def gatherv_str_arr_impl(data):
rank = sdc.distributed_api.get_rank()
n_loc = len(data)
n_all_chars = num_total_chars(data)
# allocate send lens arrays
send_arr_lens = np.empty(n_loc, np.uint32) # XXX offset type is uint32
send_data_ptr = get_data_ptr(data)
for i in range(n_loc):
_str = data[i]
send_arr_lens[i] = len(_str)
recv_counts = gather_scalar(np.int32(n_loc))
recv_counts_char = gather_scalar(np.int32(n_all_chars))
n_total = recv_counts.sum()
n_total_char = recv_counts_char.sum()
# displacements
all_data = StringArray(['']) # dummy arrays on non-root PEs
displs = np.empty(0, np.int32)
displs_char = np.empty(0, np.int32)
if rank == MPI_ROOT:
all_data = pre_alloc_string_array(n_total, n_total_char)
displs = sdc.hiframes.join.calc_disp(recv_counts)
displs_char = sdc.hiframes.join.calc_disp(recv_counts_char)
offset_ptr = get_offset_ptr(all_data)
data_ptr = get_data_ptr(all_data)
c_gatherv(
send_arr_lens.ctypes,
np.int32(n_loc),
offset_ptr,
recv_counts.ctypes,
displs.ctypes,
int32_typ_enum)
c_gatherv(
send_data_ptr,
np.int32(n_all_chars),
data_ptr,
recv_counts_char.ctypes,
displs_char.ctypes,
char_typ_enum)
convert_len_arr_to_offset(offset_ptr, n_total)
return all_data
return gatherv_str_arr_impl
# TODO: test
# TODO: large BCast
def bcast(data): # pragma: no cover
return
@overload(bcast)
def bcast_overload(data):
if isinstance(data, types.Array):
def bcast_impl(data):
typ_enum = get_type_enum(data)
count = len(data)
assert count < INT_MAX
c_bcast(data.ctypes, np.int32(count), typ_enum)
return
return bcast_impl
if data == string_array_type:
int32_typ_enum = np.int32(_numba_to_c_type_map[types.int32])
char_typ_enum = np.int32(_numba_to_c_type_map[types.uint8])
def bcast_str_impl(data):
rank = sdc.distributed_api.get_rank()
n_loc = len(data)
n_all_chars = num_total_chars(data)
assert n_loc < INT_MAX
assert n_all_chars < INT_MAX
offset_ptr = get_offset_ptr(data)
data_ptr = get_data_ptr(data)
if rank == MPI_ROOT:
send_arr_lens = np.empty(n_loc, np.uint32) # XXX offset type is uint32
for i in range(n_loc):
_str = data[i]
send_arr_lens[i] = len(_str)
c_bcast(send_arr_lens.ctypes, np.int32(n_loc), int32_typ_enum)
else:
c_bcast(offset_ptr, np.int32(n_loc), int32_typ_enum)
c_bcast(data_ptr, np.int32(n_all_chars), char_typ_enum)
if rank != MPI_ROOT:
convert_len_arr_to_offset(offset_ptr, n_loc)
return bcast_str_impl
# sendbuf, sendcount, dtype
c_bcast = types.ExternalFunction("c_bcast", types.void(types.voidptr, types.int32, types.int32))
def bcast_scalar(val): # pragma: no cover
return val
# TODO: test
@overload(bcast_scalar)
def bcast_scalar_overload(val):
assert isinstance(val, (types.Integer, types.Float))
# TODO: other types like boolean
typ_val = _numba_to_c_type_map[val]
# TODO: fix np.full and refactor
func_text = (
"def bcast_scalar_impl(val):\n"
" send = np.full(1, val, np.{})\n"
" c_bcast(send.ctypes, np.int32(1), np.int32({}))\n"
" return send[0]\n").format(val, typ_val)
loc_vars = {}
exec(func_text, {'sdc': sdc, 'np': np, 'c_bcast': c_bcast}, loc_vars)
bcast_scalar_impl = loc_vars['bcast_scalar_impl']
return bcast_scalar_impl
# if arr is string array, pre-allocate on non-root the same size as root
def prealloc_str_for_bcast(arr):
return arr
@overload(prealloc_str_for_bcast)
def prealloc_str_for_bcast_overload(arr):
if arr == string_array_type:
def prealloc_impl(arr):
rank = sdc.distributed_api.get_rank()
n_loc = bcast_scalar(len(arr))
n_all_char = bcast_scalar(np.int64(num_total_chars(arr)))
if rank != MPI_ROOT:
arr = pre_alloc_string_array(n_loc, n_all_char)
return arr
return prealloc_impl
return lambda arr: arr
# assuming start and step are None
def const_slice_getitem(arr, slice_index, start, count):
return arr[slice_index]
@overload(const_slice_getitem)
def const_slice_getitem_overload(arr, slice_index, start, count):
'''Provides parallel implementation of getting a const slice from arrays of different types
Arguments:
arr -- part of the input array processed by this processor
slice_index -- start and stop of the slice in the input array (same on all ranks)
start -- position of first arr element in the input array
count -- lenght of the part of the array processed by this processor
Return value:
Function providing implementation basing on arr type. The function should implement
logic of fetching const slice from the array distributed over multiple processes.
'''
# TODO: should this also handle slices not staring from zero?
if arr == string_array_type:
reduce_op = Reduce_Type.Sum.value
def getitem_str_impl(arr, slice_index, start, count):
rank = sdc.distributed_api.get_rank()
k = slice_index.stop
# get total characters for allocation
n_chars = np.uint64(0)
if k > start:
# if slice end is beyond the start of this subset we have to send our elements
my_end = min(count, k - start)
my_arr = arr[:my_end]
else:
my_arr = arr[:0]
# get the total number of chars in our array, then gather all arrays into one
# and compute total number of chars in all arrays
n_chars = num_total_chars(my_arr)
my_arr = sdc.distributed_api.gatherv(my_arr)
n_chars = sdc.distributed_api.dist_reduce(n_chars, np.int32(reduce_op))
if rank != 0:
out_arr = pre_alloc_string_array(k, n_chars)
else:
out_arr = my_arr
# actual communication
sdc.distributed_api.bcast(out_arr)
return out_arr
return getitem_str_impl
def getitem_impl(arr, slice_index, start, count):
rank = sdc.distributed_api.get_rank()
k = slice_index.stop
out_arr = np.empty(k, arr.dtype)
if k > start:
# if slice end is beyond the start of this subset we have to send our elements
my_end = min(count, k - start)
my_arr = arr[:my_end]
else:
my_arr = arr[:0]
# gather all subsets from all processors
my_arr = sdc.distributed_api.gatherv(my_arr)
if rank == 0:
out_arr = my_arr
# actual communication
sdc.distributed_api.bcast(out_arr)
return out_arr
return getitem_impl
@numba.njit
def local_len(A):
return len(A)
# send_data, recv_data, send_counts, recv_counts, send_disp, recv_disp, typ_enum
c_alltoallv = types.ExternalFunction(
"c_alltoallv",
types.void(
types.voidptr,
types.voidptr,
types.voidptr,
types.voidptr,
types.voidptr,
types.voidptr,
types.int32))
# TODO: test
# TODO: big alltoallv
@numba.njit
def alltoallv(send_data, out_data, send_counts, recv_counts, send_disp, recv_disp): # pragma: no cover
typ_enum = get_type_enum(send_data)
typ_enum_o = get_type_enum(out_data)
assert typ_enum == typ_enum_o
c_alltoallv(
send_data.ctypes,
out_data.ctypes,
send_counts.ctypes,
recv_counts.ctypes,
send_disp.ctypes,
recv_disp.ctypes,
typ_enum)
return
def alltoallv_tup(send_data, out_data, send_counts, recv_counts, send_disp, recv_disp): # pragma: no cover
return
@overload(alltoallv_tup)
def alltoallv_tup_overload(send_data, out_data, send_counts, recv_counts, send_disp, recv_disp):
count = send_data.count
assert out_data.count == count
func_text = "def f(send_data, out_data, send_counts, recv_counts, send_disp, recv_disp):\n"
for i in range(count):
func_text += " alltoallv(send_data[{}], out_data[{}],\n".format(i, i)
func_text += " send_counts, recv_counts, send_disp, recv_disp)\n"
func_text += " return\n"
loc_vars = {}
exec(func_text, {'alltoallv': alltoallv}, loc_vars)
a2a_impl = loc_vars['f']
return a2a_impl
def get_rank(): # pragma: no cover
"""dummy function for C mpi get_rank"""
return 0
def barrier(): # pragma: no cover
return 0
def get_size(): # pragma: no cover
"""dummy function for C mpi get_size"""
return 0
def get_start(total_size, pes, rank): # pragma: no cover
"""get end point of range for parfor division"""
return 0
def get_end(total_size, pes, rank): # pragma: no cover
"""get end point of range for parfor division"""
return 0
def get_node_portion(total_size, pes, rank): # pragma: no cover
"""get portion of size for alloc division"""
return 0
def dist_reduce(value, op): # pragma: no cover
"""dummy to implement simple reductions"""
return value
def dist_arr_reduce(arr): # pragma: no cover
"""dummy to implement array reductions"""
return -1
def dist_cumsum(arr): # pragma: no cover
"""dummy to implement cumsum"""
return arr
def dist_cumprod(arr): # pragma: no cover
"""dummy to implement cumprod"""
return arr
def dist_exscan(value): # pragma: no cover
"""dummy to implement simple exscan"""
return value
def dist_setitem(arr, index, val): # pragma: no cover
return 0
def allgather(arr, val): # pragma: no cover
arr[0] = val
def dist_time(): # pragma: no cover
return time.time()
def dist_return(A): # pragma: no cover
return A
def threaded_return(A): # pragma: no cover
return A
def rebalance_array(A):
return A
def rebalance_array_parallel(A):
return A
@overload(rebalance_array)
def dist_return_overload(A):
return dist_return
# TODO: move other funcs to old API?
@infer_global(threaded_return)
@infer_global(dist_return)
class ThreadedRetTyper(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 1 # array
return signature(args[0], *args)
@numba.njit
def parallel_print(s):
print(s)
def irecv(): # pragma: no cover
return 0
def isend(): # pragma: no cover
return 0
def wait(): # pragma: no cover
return 0
def waitall(): # pragma: no cover
return 0
@infer_global(allgather)
class DistAllgather(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2 # array and val
return signature(types.none, *unliteral_all(args))
@infer_global(rebalance_array_parallel)
class DistRebalanceParallel(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2 # array and count
return signature(args[0], *unliteral_all(args))
@infer_global(get_rank)
class DistRank(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 0
return signature(types.int32, *unliteral_all(args))
@infer_global(get_size)
class DistSize(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 0
return signature(types.int32, *unliteral_all(args))
@infer_global(get_start)
class DistStart(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 3
return signature(types.int64, *unliteral_all(args))
@infer_global(get_end)
class DistEnd(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 3
return signature(types.int64, *unliteral_all(args))
@infer_global(get_node_portion)
class DistPortion(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 3
return signature(types.int64, *unliteral_all(args))
@infer_global(dist_reduce)
class DistReduce(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2 # value and reduce_op
return signature(args[0], *unliteral_all(args))
@infer_global(dist_exscan)
class DistExscan(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 1
return signature(args[0], *unliteral_all(args))
@infer_global(dist_arr_reduce)
class DistArrReduce(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2 # value and reduce_op
return signature(types.int32, *unliteral_all(args))
@infer_global(time.time)
class DistTime(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 0
return signature(types.float64, *unliteral_all(args))
@infer_global(dist_time)
class DistDistTime(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 0
return signature(types.float64, *unliteral_all(args))
@infer_global(barrier)
class DistBarrier(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 0
return signature(types.int32, *unliteral_all(args))
@infer_global(dist_cumsum)
@infer_global(dist_cumprod)
class DistCumsumprod(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2
return signature(types.int32, *unliteral_all(args))
@infer_global(irecv)
@infer_global(isend)
class DistIRecv(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) in [4, 5]
return signature(mpi_req_numba_type, *unliteral_all(args))
@infer_global(wait)
class DistWait(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2
return signature(types.int32, *unliteral_all(args))
@infer_global(waitall)
class DistWaitAll(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2 and args == (types.int32, req_array_type)
return signature(types.none, *unliteral_all(args))
# @infer_global(dist_setitem)
# class DistSetitem(AbstractTemplate):
# def generic(self, args, kws):
# assert not kws
# assert len(args)==5
# return signature(types.int32, *unliteral_all(args))
class ReqArrayType(types.Type):
def __init__(self):
super(ReqArrayType, self).__init__(
name='ReqArrayType()')
req_array_type = ReqArrayType()
register_model(ReqArrayType)(models.OpaqueModel)
def comm_req_alloc():
return 0
def comm_req_dealloc():
return 0
@infer_global(comm_req_alloc)
class DistCommReqAlloc(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 1 and args[0] == types.int32
return signature(req_array_type, *unliteral_all(args))
@infer_global(comm_req_dealloc)
class DistCommReqDeAlloc(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 1 and args[0] == req_array_type
return signature(types.none, *unliteral_all(args))
@infer_global(operator.setitem)
class SetItemReqArray(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[ary, idx, val] = args
if isinstance(ary, ReqArrayType) and idx == types.intp and val == mpi_req_numba_type:
return signature(types.none, *unliteral_all(args))
| bsd-2-clause | -8,176,314,561,692,909,000 | 28.027743 | 119 | 0.623485 | false |
cgstudiomap/cgstudiomap | main/parts/product-attribute/product_sequence/models/__init__.py | 4 | 1065 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2015 Domatix (http://domatix.com)
# Angel Moya <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import product_product
| agpl-3.0 | -4,610,172,880,151,221,000 | 45.304348 | 78 | 0.599061 | false |
petrutlucian94/nova | nova/tests/unit/api/openstack/compute/test_server_actions.py | 3 | 51004 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import uuid
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v2
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.image import glance
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
def return_server_not_found(*arg, **kwarg):
raise exception.InstanceNotFound(instance_id=FAKE_UUID)
def instance_update_and_get_original(context, instance_uuid, values,
update_cells=True,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, kwargs, update_cells=True):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
return inst
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance, password):
self.instance_id = instance['uuid']
self.password = password
class ServerActionsControllerTestV21(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_base_url = 'http://localhost:9292/images/'
image_href = image_base_url + '/' + image_uuid
servers = servers_v21
validation_error = exception.ValidationError
request_too_large_error = exception.ValidationError
image_url = None
def setUp(self):
super(ServerActionsControllerTestV21, self).setUp()
CONF.set_override('host', 'localhost', group='glance')
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
self.stubs.Set(db, 'instance_update_and_get_original',
instance_update_and_get_original)
fakes.stub_out_nw_api(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fake.stub_out_image_service(self.stubs)
self.flags(allow_instance_snapshots=True,
enable_instance_password=True)
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
self.controller = self._get_controller()
self.compute_api = self.controller.compute_api
self.req = fakes.HTTPRequest.blank('')
self.context = self.req.environ['nova.context']
def _get_controller(self):
ext_info = plugins.LoadedExtensionInfo()
return self.servers.ServersController(extension_info=ext_info)
def _set_fake_extension(self):
pass
def _rebuild(self, context, image_ref, value=None):
if value is not None:
compute_api.API.rebuild(context, mox.IgnoreArg(), image_ref,
mox.IgnoreArg(), preserve_ephemeral=value)
else:
compute_api.API.rebuild(context, mox.IgnoreArg(), image_ref,
mox.IgnoreArg())
def _stub_instance_get(self, uuid=None):
self.mox.StubOutWithMock(compute_api.API, 'get')
if uuid is None:
uuid = uuidutils.generate_uuid()
instance = fake_instance.fake_db_instance(
id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance)
self.compute_api.get(self.context, uuid,
expected_attrs=['pci_devices', 'flavor'],
want_objects=True).AndReturn(instance)
return instance
def _test_locked_instance(self, action, method=None, body_map=None,
compute_api_args_map=None):
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(compute_api.API, method)(self.context, instance,
*args, **kwargs).AndRaise(
exception.InstanceIsLocked(instance_uuid=instance['uuid']))
self.mox.ReplayAll()
controller_function = 'self.controller.' + action
self.assertRaises(webob.exc.HTTPConflict,
eval(controller_function),
self.req, instance['uuid'],
body=body_map.get(action))
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_actions_with_locked_instance(self):
actions = ['_action_resize', '_action_confirm_resize',
'_action_revert_resize', '_action_reboot',
'_action_rebuild']
method_translations = {'_action_resize': 'resize',
'_action_confirm_resize': 'confirm_resize',
'_action_revert_resize': 'revert_resize',
'_action_reboot': 'reboot',
'_action_rebuild': 'rebuild'}
body_map = {'_action_resize': {'resize': {'flavorRef': '2'}},
'_action_reboot': {'reboot': {'type': 'HARD'}},
'_action_rebuild': {'rebuild': {
'imageRef': self.image_uuid,
'adminPass': 'TNc53Dr8s7vw'}}}
args_map = {'_action_resize': (('2'), {}),
'_action_confirm_resize': ((), {}),
'_action_reboot': (('HARD',), {}),
'_action_rebuild': ((self.image_uuid,
'TNc53Dr8s7vw'), {})}
if self.servers == servers_v2:
args_map['_action_rebuild'] = ((self.image_uuid, 'TNc53Dr8s7vw'),
{'files_to_inject': None})
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(compute_api.API, method)
self._test_locked_instance(action, method=method,
body_map=body_map,
compute_api_args_map=args_map)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_none(self):
body = dict(reboot=dict(type=None))
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_not_found(self):
self.stubs.Set(db, 'instance_get_by_uuid',
return_server_not_found)
body = dict(reboot=dict(type="HARD"))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
self.req, str(uuid.uuid4()), body=body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
body = dict(reboot=dict(type="HARD"))
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_hard_with_hard_in_progress(self):
body = dict(reboot=dict(type="HARD"))
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def _test_rebuild_preserve_ephemeral(self, value=None):
self._set_fake_extension()
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
if value is not None:
body['rebuild']['preserve_ephemeral'] = value
self.mox.StubOutWithMock(compute_api.API, 'rebuild')
self._rebuild(self.context, self._image_href, value)
self.mox.ReplayAll()
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
def test_rebuild_preserve_ephemeral_true(self):
self._test_rebuild_preserve_ephemeral(True)
def test_rebuild_preserve_ephemeral_false(self):
self._test_rebuild_preserve_ephemeral(False)
def test_rebuild_preserve_ephemeral_default(self):
self._test_rebuild_preserve_ephemeral()
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
CONF.password_length)
self.assertEqual(robj['location'], self_href)
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_uuid,
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_href,
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn("adminPass", body['server'])
self.assertEqual(robj['location'], self_href)
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": metadata,
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": "stack",
},
}
self.assertRaises(self.validation_error,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_too_large_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": {
256 * "k": "value"
}
}
}
self.assertRaises(self.request_too_large_error,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
self.assertRaises(self.validation_error,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_admin_pass(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn('adminPass', body['server'])
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"imageRef": "foo",
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accessIP(self):
attributes = {
'access_ip_v4': '172.19.0.1',
'access_ip_v6': 'fe80::1',
}
body = {
"rebuild": {
"imageRef": self._image_href,
"accessIPv4": "172.19.0.1",
"accessIPv6": "fe80::1",
},
}
data = {'changes': {}}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
data['instance'] = orig_get(*args, **kwargs)
return data['instance']
def fake_save(context, **kwargs):
data['changes'].update(data['instance'].obj_get_changes())
self.stubs.Set(compute_api.API, 'get', wrap_get)
self.stubs.Set(objects.Instance, 'save', fake_save)
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(self._image_href, data['changes']['image_ref'])
self.assertEqual("", data['changes']['kernel_id'])
self.assertEqual("", data['changes']['ramdisk_id'])
self.assertEqual(task_states.REBUILDING, data['changes']['task_state'])
self.assertEqual(0, data['changes']['progress'])
for attr, value in attributes.items():
self.assertEqual(value, str(data['changes'][attr]))
def test_rebuild_when_kernel_not_exists(self):
def return_image_meta(*args, **kwargs):
image_meta_table = {
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_proper_kernel_ram(self):
instance_meta = {'kernel_id': None, 'ramdisk_id': None}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
inst = orig_get(*args, **kwargs)
instance_meta['instance'] = inst
return inst
def fake_save(context, **kwargs):
instance = instance_meta['instance']
for key in instance_meta.keys():
if key in instance.obj_what_changed():
instance_meta[key] = instance[key]
def return_image_meta(*args, **kwargs):
image_meta_table = {
'1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
self.stubs.Set(compute_api.API, 'get', wrap_get)
self.stubs.Set(objects.Instance, 'save', fake_save)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body).obj
self.assertEqual(instance_meta['kernel_id'], '1')
self.assertEqual(instance_meta['ramdisk_id'], '2')
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_resize_server(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(compute_api.API, 'resize', resize_mock)
self.controller._action_resize(self.req, FAKE_UUID, body=body)
self.assertEqual(self.resize_called, True)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavorRef=None))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_with_extra_arg(self):
body = dict(resize=dict(favorRef="http://localhost/3",
extra_arg="extra_arg"))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_invalid_flavor_ref(self):
body = dict(resize=dict(flavorRef=1.2))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_server_not_found(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.stubs.Set(compute_api.API, 'get', return_server_not_found)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_image_exceptions(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = 0
image_id = 'fake_image_id'
exceptions = [
(exception.ImageNotAuthorized(image_id=image_id),
webob.exc.HTTPUnauthorized),
(exception.ImageNotFound(image_id=image_id),
webob.exc.HTTPBadRequest),
(exception.Invalid, webob.exc.HTTPBadRequest),
(exception.NoValidHost(reason='Bad host'),
webob.exc.HTTPBadRequest),
(exception.AutoDiskConfigDisabledByImage(image=image_id),
webob.exc.HTTPBadRequest),
]
raised, expected = map(iter, zip(*exceptions))
def _fake_resize(obj, context, instance, flavor_id):
self.resize_called += 1
raise raised.next()
self.stubs.Set(compute_api.API, 'resize', _fake_resize)
for call_no in range(len(exceptions)):
next_exception = expected.next()
actual = self.assertRaises(next_exception,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
if (isinstance(exceptions[call_no][0],
exception.NoValidHost)):
self.assertEqual(actual.explanation,
'No valid host was found. Bad host')
elif (isinstance(exceptions[call_no][0],
exception.AutoDiskConfigDisabledByImage)):
self.assertEqual(actual.explanation,
'Requested image fake_image_id has automatic'
' disk resize disabled.')
self.assertEqual(self.resize_called, call_no + 1)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.CannotResizeDisk(reason=''))
def test_resize_raises_cannot_resize_disk(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.FlavorNotFound(reason='',
flavor_id='fake_id'))
def test_resize_raises_flavor_not_found(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_too_many_instances(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.TooManyInstances(message="TooManyInstance")
self.stubs.Set(compute_api.API, 'resize', fake_resize)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'resize', fake_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.NoValidHost(reason=''))
def test_resize_raises_no_valid_host(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'resize')
def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize):
mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_confirm_resize_server(self):
body = dict(confirmResize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
self.controller._action_confirm_resize(self.req, FAKE_UUID, body=body)
self.assertEqual(self.confirm_resize_called, True)
def test_confirm_resize_migration_not_found(self):
body = dict(confirmResize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'confirm_resize',
confirm_resize_mock)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'confirm_resize',
fake_confirm_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'revert_resize',
revert_resize_mock)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_server_not_found(self):
body = dict(revertResize=None)
self.assertRaises(webob. exc.HTTPNotFound,
self.controller._action_revert_resize,
self.req, "bad_server_id", body=body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
body = self.controller._action_revert_resize(self.req, FAKE_UUID,
body=body)
self.assertEqual(self.revert_resize_called, True)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'revert_resize',
fake_revert_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
self.assertEqual(self.image_url + '123' if self.image_url else
glance.generate_image_url('123'),
location)
def test_create_image_name_too_long(self):
long_name = 'a' * 260
body = {
'createImage': {
'name': long_name,
},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image, self.req,
FAKE_UUID, body=body)
def _do_test_create_volume_backed_image(self, extra_properties):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_properties:
body['createImage']['metadata'] = extra_properties
image_service = glance.get_default_image_service()
bdm = [dict(volume_id=_fake_id('a'),
volume_size=1,
device_name='vda',
delete_on_termination=False)]
props = dict(kernel_id=_fake_id('b'),
ramdisk_id=_fake_id('c'),
root_device_name='/dev/vda',
block_device_mapping=bdm)
original_image = dict(properties=props,
container_format='ami',
status='active',
is_public=True)
image_service.create(None, original_image)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref=original_image['id'],
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
self.mox.StubOutWithMock(self.controller.compute_api.compute_rpcapi,
'quiesce_instance')
self.controller.compute_api.compute_rpcapi.quiesce_instance(
mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
exception.InstanceQuiesceNotSupported(instance_id='fake',
reason='test'))
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
self.mox.ReplayAll()
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
image_id = location.replace(self.image_url or
glance.generate_image_url(''), '')
image = image_service.show(None, image_id)
self.assertEqual(image['name'], 'snapshot_of_volume_backed')
properties = image['properties']
self.assertEqual(properties['kernel_id'], _fake_id('b'))
self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
self.assertEqual(properties['root_device_name'], '/dev/vda')
self.assertEqual(properties['bdm_v2'], True)
bdms = properties['block_device_mapping']
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['boot_index'], 0)
self.assertEqual(bdms[0]['source_type'], 'snapshot')
self.assertEqual(bdms[0]['destination_type'], 'volume')
self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
for fld in ('connection_info', 'id',
'instance_uuid', 'device_name'):
self.assertNotIn(fld, bdms[0])
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
def test_create_volume_backed_image_no_metadata(self):
self._do_test_create_volume_backed_image({})
def test_create_volume_backed_image_with_metadata(self):
self._do_test_create_volume_backed_image(dict(ImageType='Gold',
ImageVersion='2.0'))
def _test_create_volume_backed_image_with_metadata_from_volume(
self, extra_metadata=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_metadata:
body['createImage']['metadata'] = extra_metadata
image_service = glance.get_default_image_service()
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref='',
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
self.mox.StubOutWithMock(self.controller.compute_api.compute_rpcapi,
'quiesce_instance')
self.controller.compute_api.compute_rpcapi.quiesce_instance(
mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
exception.InstanceQuiesceNotSupported(instance_id='fake',
reason='test'))
fake_metadata = {'test_key1': 'test_value1',
'test_key2': 'test_value2'}
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake',
volume_image_metadata=fake_metadata)
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
self.mox.ReplayAll()
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
image_id = location.replace(self.image_base_url, '')
image = image_service.show(None, image_id)
properties = image['properties']
self.assertEqual(properties['test_key1'], 'test_value1')
self.assertEqual(properties['test_key2'], 'test_value2')
if extra_metadata:
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume()
def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume(
extra_metadata={'a': 'b'})
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
self.assertEqual(self.image_url + '123' if self.image_url else
glance.generate_image_url('123'), location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(CONF.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_blank_name(self):
body = {
'createImage': {
'name': '',
}
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'snapshot', snapshot)
body = {
"createImage": {
"name": "test_snapshot",
},
}
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
class ServerActionsControllerTestV2(ServerActionsControllerTestV21):
image_base_url = 'http://localhost/v2/fake/images/'
servers = servers_v2
validation_error = webob.exc.HTTPBadRequest
request_too_large_error = webob.exc.HTTPRequestEntityTooLarge
image_url = 'http://localhost/v2/fake/images/'
def _get_controller(self):
class FakeExtManager(object):
def is_loaded(self, ext):
return False
return self.servers.Controller(ext_mgr=FakeExtManager())
def _set_fake_extension(self):
def fake_is_loaded(ext):
return ext == 'os-preserve-ephemeral-rebuild'
self.stubs.Set(self.controller.ext_mgr, 'is_loaded', fake_is_loaded)
def _rebuild(self, context, image_ref, value=None):
if value is not None:
compute_api.API.rebuild(context, mox.IgnoreArg(), image_ref,
mox.IgnoreArg(), preserve_ephemeral=value,
files_to_inject=None)
else:
compute_api.API.rebuild(context, mox.IgnoreArg(), image_ref,
mox.IgnoreArg(), files_to_inject=None)
def _stub_instance_get(self, uuid=None):
self.mox.StubOutWithMock(compute_api.API, 'get')
if uuid is None:
uuid = uuidutils.generate_uuid()
instance = fake_instance.fake_db_instance(
id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance)
self.compute_api.get(self.context, uuid,
expected_attrs=['flavor'],
want_objects=True).AndReturn(instance)
return instance
def test_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": base64.b64encode("Test String"),
}]
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertNotIn('personality', body['server'])
def test_resize_server_with_extra_arg(self):
# NOTE: v2.0 API cannot cover this case, skip this.
pass
def test_resize_server_invalid_flavor_ref(self):
# NOTE: v2.0 API cannot cover this case, skip this.
pass
def test_create_image_glance_link_prefix(self):
self.flags(osapi_glance_link_prefix='https://glancehost')
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
self.assertEqual('https://glancehost/v2/fake/images/123', location)
def test_rebuild_preserve_ephemeral_is_ignored_when_ext_not_loaded(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"preserve_ephemeral": False,
},
}
self.mox.StubOutWithMock(compute_api.API, 'rebuild')
compute_api.API.rebuild(self.context, mox.IgnoreArg(),
self._image_href,
mox.IgnoreArg(), files_to_inject=None)
self.mox.ReplayAll()
self.controller._action_rebuild(self.req, FAKE_UUID, body)
| apache-2.0 | 1,847,969,642,705,192,700 | 37.934351 | 79 | 0.544644 | false |
veger/ansible | lib/ansible/modules/windows/win_chocolatey_config.py | 28 | 1677 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_chocolatey_config
version_added: '2.7'
short_description: Manages Chocolatey config settings
description:
- Used to manage Chocolatey config settings as well as unset the values.
options:
name:
description:
- The name of the config setting to manage.
- See U(https://chocolatey.org/docs/chocolatey-configuration) for a list of
valid configuration settings that can be changed.
- Any config values that contain encrypted values like a password are not
idempotent as the plaintext value cannot be read.
required: yes
state:
description:
- When C(absent), it will ensure the setting is unset or blank.
- When C(present), it will ensure the setting is set to the value of
I(value).
choices:
- absent
- present
default: present
value:
description:
- Used when C(state=present) that contains the value to set for the config
setting.
- Cannot be null or an empty string, use C(state=absent) to unset a config
value instead.
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: set the cache location
win_chocolatey_config:
name: cacheLocation
state: present
value: D:\chocolatey_temp
- name: unset the cache location
win_chocolatey_config:
name: cacheLocation
state: absent
'''
RETURN = r'''
'''
| gpl-3.0 | 8,094,782,639,069,273,000 | 26.95 | 92 | 0.673226 | false |
citrix-openstack-build/trove | trove/tests/unittests/guestagent/test_api.py | 2 | 13947 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mockito import when
from mockito import any
from mockito import verify
from mockito import unstub
from mockito import mock
from mockito import verifyZeroInteractions
from mockito import never
import mockito.matchers
import testtools
from testtools.matchers import KeysEqual, Is
from trove.guestagent import models as agent_models
import trove.db.models as db_models
from trove.common import exception
from trove.guestagent import api
import trove.openstack.common.rpc as rpc
def _mock_call_pwd_change(cmd, users=None):
if users == 'dummy':
return True
else:
raise BaseException("Test Failed")
def _mock_call(cmd, timerout, username=None, hostname=None,
database=None, databases=None):
#To check get_user, list_access, grant_access, revoke_access in cmd.
if cmd in ('get_user', 'list_access', 'grant_access', 'revoke_access'):
return True
else:
raise BaseException("Test Failed")
class ApiTest(testtools.TestCase):
def setUp(self):
super(ApiTest, self).setUp()
self.guest = api.API('mock_content', 0)
self.guest._cast = _mock_call_pwd_change
self.guest._call = _mock_call
self.FAKE_ID = 'instance-id-x23d2d'
self.api = api.API(mock(), self.FAKE_ID)
when(rpc).call(any(), any(), any(), any(int)).thenRaise(
ValueError('Unexpected Rpc Invocation'))
when(rpc).cast(any(), any(), any()).thenRaise(
ValueError('Unexpected Rpc Invocation'))
def tearDown(self):
super(ApiTest, self).tearDown()
unstub()
def test_delete_queue(self):
self.skipTest("find out if this delete_queue function is needed "
"anymore, Bug#1097482")
def test_change_passwords(self):
self.assertIsNone(self.guest.change_passwords("dummy"))
def test_get_user(self):
self.assertTrue(self.guest.get_user("dummyname", "dummyhost"))
def test_list_access(self):
self.assertTrue(self.guest.list_access("dummyname", "dummyhost"))
def test_grant_access(self):
self.assertTrue(self.guest.grant_access("dumname", "dumhost", "dumdb"))
def test_revoke_access(self):
self.assertTrue(self.guest.revoke_access("dumname", "dumhost",
"dumdb"))
def test_get_routing_key(self):
self.assertEqual('guestagent.' + self.FAKE_ID,
self.api._get_routing_key())
def test_check_for_heartbeat_positive(self):
when(db_models.DatabaseModelBase).find_by(
instance_id=any()).thenReturn('agent')
when(agent_models.AgentHeartBeat).is_active('agent').thenReturn(True)
self.assertTrue(self.api._check_for_hearbeat())
def test_check_for_heartbeat_exception(self):
# TODO(juice): maybe it would be ok to extend the test to validate
# the is_active method on the heartbeat
when(db_models.DatabaseModelBase).find_by(instance_id=any()).thenRaise(
exception.ModelNotFoundError)
when(agent_models.AgentHeartBeat).is_active(any()).thenReturn(None)
self.assertRaises(exception.GuestTimeout, self.api._check_for_hearbeat)
verify(agent_models.AgentHeartBeat, times=0).is_active(any())
def test_check_for_heartbeat_negative(self):
# TODO(juice): maybe it would be ok to extend the test to validate
# the is_active method on the heartbeat
when(db_models.DatabaseModelBase).find_by(
instance_id=any()).thenReturn('agent')
when(agent_models.AgentHeartBeat).is_active(any()).thenReturn(False)
self.assertRaises(exception.GuestTimeout, self.api._check_for_hearbeat)
def test_create_user(self):
exp_msg = RpcMsgMatcher('create_user', 'users')
self._mock_rpc_cast(exp_msg)
self.api.create_user('test_user')
self._verify_rpc_cast(exp_msg)
def test_rpc_cast_exception(self):
exp_msg = RpcMsgMatcher('create_user', 'users')
when(rpc).cast(any(), any(), exp_msg).thenRaise(IOError('host down'))
with testtools.ExpectedException(exception.GuestError, '.* host down'):
self.api.create_user('test_user')
self._verify_rpc_cast(exp_msg)
def test_list_users(self):
exp_msg = RpcMsgMatcher('list_users', 'limit', 'marker',
'include_marker')
exp_resp = ['user1', 'user2', 'user3']
self._mock_rpc_call(exp_msg, exp_resp)
act_resp = self.api.list_users()
self.assertThat(act_resp, Is(exp_resp))
self._verify_rpc_call(exp_msg)
def test_rpc_call_exception(self):
exp_msg = RpcMsgMatcher('list_users', 'limit', 'marker',
'include_marker')
when(rpc).call(any(), any(), exp_msg, any(int)).thenRaise(
IOError('host down'))
with testtools.ExpectedException(exception.GuestError,
'An error occurred.*'):
self.api.list_users()
self._verify_rpc_call(exp_msg)
def test_delete_user(self):
exp_msg = RpcMsgMatcher('delete_user', 'user')
self._mock_rpc_cast(exp_msg)
self.api.delete_user('test_user')
self._mock_rpc_cast(exp_msg)
def test_create_database(self):
exp_msg = RpcMsgMatcher('create_database', 'databases')
self._mock_rpc_cast(exp_msg)
self.api.create_database(['db1', 'db2', 'db3'])
self._verify_rpc_cast(exp_msg)
def test_list_databases(self):
exp_msg = RpcMsgMatcher('list_databases', 'limit', 'marker',
'include_marker')
exp_resp = ['db1', 'db2', 'db3']
self._mock_rpc_call(exp_msg, exp_resp)
resp = self.api.list_databases(limit=1, marker=2,
include_marker=False)
self.assertThat(resp, Is(exp_resp))
self._verify_rpc_call(exp_msg)
def test_delete_database(self):
exp_msg = RpcMsgMatcher('delete_database', 'database')
self._mock_rpc_cast(exp_msg)
self.api.delete_database('test_database_name')
self._verify_rpc_cast(exp_msg)
def test_enable_root(self):
exp_msg = RpcMsgMatcher('enable_root')
self._mock_rpc_call(exp_msg, True)
self.assertThat(self.api.enable_root(), Is(True))
self._verify_rpc_call(exp_msg)
def test_disable_root(self):
exp_msg = RpcMsgMatcher('disable_root')
self._mock_rpc_call(exp_msg, True)
self.assertThat(self.api.disable_root(), Is(True))
self._verify_rpc_call(exp_msg)
def test_is_root_enabled(self):
exp_msg = RpcMsgMatcher('is_root_enabled')
self._mock_rpc_call(exp_msg, False)
self.assertThat(self.api.is_root_enabled(), Is(False))
self._verify_rpc_call(exp_msg)
def test_get_hwinfo(self):
exp_msg = RpcMsgMatcher('get_hwinfo')
self._mock_rpc_call(exp_msg)
self.api.get_hwinfo()
self._verify_rpc_call(exp_msg)
def test_get_diagnostics(self):
exp_msg = RpcMsgMatcher('get_diagnostics')
self._mock_rpc_call(exp_msg)
self.api.get_diagnostics()
self._verify_rpc_call(exp_msg)
def test_restart(self):
exp_msg = RpcMsgMatcher('restart')
self._mock_rpc_call(exp_msg)
self.api.restart()
self._verify_rpc_call(exp_msg)
def test_start_db_with_conf_changes(self):
exp_msg = RpcMsgMatcher('start_db_with_conf_changes',
'config_contents')
self._mock_rpc_call(exp_msg)
self.api.start_db_with_conf_changes(None)
self._verify_rpc_call(exp_msg)
def test_stop_db(self):
exp_msg = RpcMsgMatcher('stop_db', 'do_not_start_on_reboot')
self._mock_rpc_call(exp_msg)
self.api.stop_db(do_not_start_on_reboot=False)
self._verify_rpc_call(exp_msg)
def test_get_volume_info(self):
fake_resp = {'fake': 'resp'}
exp_msg = RpcMsgMatcher('get_filesystem_stats', 'fs_path')
self._mock_rpc_call(exp_msg, fake_resp)
self.assertThat(self.api.get_volume_info(), Is(fake_resp))
self._verify_rpc_call(exp_msg)
def test_update_guest(self):
exp_msg = RpcMsgMatcher('update_guest')
self._mock_rpc_call(exp_msg)
self.api.update_guest()
self._verify_rpc_call(exp_msg)
def test_create_backup(self):
exp_msg = RpcMsgMatcher('create_backup', 'backup_id')
self._mock_rpc_cast(exp_msg)
self.api.create_backup('123')
self._verify_rpc_cast(exp_msg)
def _verify_rpc_connection_and_cast(self, rpc, mock_conn, exp_msg):
verify(rpc).create_connection(new=True)
verify(mock_conn).create_consumer(self.api._get_routing_key(), None,
fanout=False)
verify(rpc).cast(any(), any(), exp_msg)
def test_prepare(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'databases', 'users',
'device_path', 'mount_point', 'backup_id',
'config_contents')
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
self.api.prepare('2048', 'db1', 'user1', '/dev/vdt', '/mnt/opt',
'bkup-1232', 'cont')
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
def test_prepare_with_backup(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'databases', 'users',
'device_path', 'mount_point', 'backup_id',
'config_contents')
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
self.api.prepare('2048', 'db1', 'user1', '/dev/vdt', '/mnt/opt',
'backup_id_123', 'cont')
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
def test_upgrade(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
exp_msg = RpcMsgMatcher('upgrade')
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
self.api.upgrade()
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
def test_rpc_cast_with_consumer_exception(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenRaise(IOError('host down'))
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'databases', 'users',
'device_path', 'mount_point')
with testtools.ExpectedException(exception.GuestError, '.* host down'):
self.api.prepare('2048', 'db1', 'user1', '/dev/vdt', '/mnt/opt')
verify(rpc).create_connection(new=True)
verifyZeroInteractions(mock_conn)
verify(rpc, never).cast(any(), any(), exp_msg)
def _mock_rpc_call(self, exp_msg, resp=None):
rpc.common = mock()
when(rpc).call(any(), any(), exp_msg, any(int)).thenReturn(resp)
def _verify_rpc_call(self, exp_msg):
verify(rpc).call(any(), any(), exp_msg, any(int))
def _mock_rpc_cast(self, exp_msg):
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
def _verify_rpc_cast(self, exp_msg):
verify(rpc).cast(any(), any(), exp_msg)
class CastWithConsumerTest(testtools.TestCase):
def setUp(self):
super(CastWithConsumerTest, self).setUp()
self.api = api.API(mock(), 'instance-id-x23d2d')
def tearDown(self):
super(CastWithConsumerTest, self).tearDown()
unstub()
def test__cast_with_consumer(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
when(rpc).cast(any(), any(), any()).thenReturn(None)
self.api._cast_with_consumer('fake_method_name', fake_param=1)
verify(rpc).create_connection(new=True)
verify(mock_conn).create_consumer(any(), None, fanout=False)
verify(rpc).cast(any(), any(), any())
class RpcMsgMatcher(mockito.matchers.Matcher):
def __init__(self, method, *args_dict):
self.wanted_method = method
self.wanted_dict = KeysEqual('version', 'method', 'args', 'namespace')
self.args_dict = KeysEqual(*args_dict)
def matches(self, arg):
if self.wanted_method != arg['method']:
raise Exception("Method does not match: %s != %s" %
(self.wanted_method, arg['method']))
#return False
if self.wanted_dict.match(arg) or self.args_dict.match(arg['args']):
raise Exception("Args do not match: %s != %s" %
(self.args_dict, arg['args']))
#return False
return True
def __repr__(self):
return "<Dict: %s>" % self.wanted_dict
| apache-2.0 | 5,870,155,701,787,544,000 | 37.958101 | 79 | 0.607586 | false |
eharney/nova | nova/tests/scheduler/test_caching_scheduler.py | 12 | 6554 | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova.openstack.common import timeutils
from nova.scheduler import caching_scheduler
from nova.scheduler import host_manager
from nova.tests.scheduler import test_scheduler
ENABLE_PROFILER = False
class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Caching Scheduler."""
driver_cls = caching_scheduler.CachingScheduler
@mock.patch.object(caching_scheduler.CachingScheduler,
"_get_up_hosts")
def test_run_periodic_tasks_loads_hosts(self, mock_up_hosts):
mock_up_hosts.return_value = []
context = mock.Mock()
self.driver.run_periodic_tasks(context)
self.assertTrue(mock_up_hosts.called)
self.assertEqual([], self.driver.all_host_states)
context.elevated.assert_called_with()
@mock.patch.object(caching_scheduler.CachingScheduler,
"_get_up_hosts")
def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
self.driver.all_host_states = []
result = self.driver._get_all_host_states(self.context)
self.assertFalse(mock_up_hosts.called)
self.assertEqual([], self.driver.all_host_states)
@mock.patch.object(caching_scheduler.CachingScheduler,
"_get_up_hosts")
def test_get_all_host_states_loads_hosts(self, mock_up_hosts):
mock_up_hosts.return_value = ["asdf"]
result = self.driver._get_all_host_states(self.context)
self.assertTrue(mock_up_hosts.called)
self.assertEqual(["asdf"], self.driver.all_host_states)
self.assertEqual(["asdf"], result)
def test_get_up_hosts(self):
with mock.patch.object(self.driver.host_manager,
"get_all_host_states") as mock_get_hosts:
mock_get_hosts.return_value = ["asdf"]
result = self.driver._get_up_hosts(self.context)
self.assertTrue(mock_get_hosts.called)
self.assertEqual(mock_get_hosts.return_value, result)
def test_select_destination_raises_with_no_hosts(self):
fake_request_spec = self._get_fake_request_spec()
self.driver.all_host_states = []
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations,
self.context, fake_request_spec, {})
def test_select_destination_works(self):
fake_request_spec = self._get_fake_request_spec()
fake_host = self._get_fake_host_state()
self.driver.all_host_states = [fake_host]
result = self._test_select_destinations(fake_request_spec)
self.assertEqual(1, len(result))
self.assertEqual(result[0]["host"], fake_host.host)
def _test_select_destinations(self, request_spec):
return self.driver.select_destinations(
self.context, request_spec, {})
def _get_fake_request_spec(self):
flavor = {
"flavorid": "small",
"memory_mb": 512,
"root_gb": 1,
"ephemeral_gb": 1,
"vcpus": 1,
}
instance_properties = {
"os_type": "linux",
"project_id": "1234",
"memory_mb": 512,
"root_gb": 1,
"ephemeral_gb": 1,
"vcpus": 1,
}
request_spec = {
"instance_type": flavor,
"instance_properties": instance_properties,
"num_instances": 1,
}
return request_spec
def _get_fake_host_state(self, index=0):
host_state = host_manager.HostState(
'host_%s' % index,
'node_%s' % index)
host_state.free_ram_mb = 50000
host_state.service = {
"disabled": False,
"updated_at": timeutils.utcnow(),
"created_at": timeutils.utcnow(),
}
return host_state
def test_performance_check_select_destination(self):
hosts = 2
requests = 1
self.flags(service_down_time=240)
request_spec = self._get_fake_request_spec()
host_states = []
for x in xrange(hosts):
host_state = self._get_fake_host_state(x)
host_states.append(host_state)
self.driver.all_host_states = host_states
def run_test():
a = timeutils.utcnow()
for x in xrange(requests):
self.driver.select_destinations(
self.context, request_spec, {})
b = timeutils.utcnow()
c = b - a
seconds = (c.days * 24 * 60 * 60 + c.seconds)
microseconds = seconds * 1000 + c.microseconds / 1000.0
per_request_ms = microseconds / requests
return per_request_ms
per_request_ms = None
if ENABLE_PROFILER:
import pycallgraph
from pycallgraph import output
config = pycallgraph.Config(max_depth=10)
config.trace_filter = pycallgraph.GlobbingFilter(exclude=[
'pycallgraph.*',
'unittest.*',
'nova.tests.*',
])
graphviz = output.GraphvizOutput(output_file='scheduler.png')
with pycallgraph.PyCallGraph(output=graphviz):
per_request_ms = run_test()
else:
per_request_ms = run_test()
# This has proved to be around 1 ms on a random dev box
# But this is here so you can do simply performance testing easily.
self.assertTrue(per_request_ms < 1000)
if __name__ == '__main__':
# A handy tool to help profile the schedulers performance
ENABLE_PROFILER = True
import unittest
suite = unittest.TestSuite()
test = "test_performance_check_select_destination"
test_case = CachingSchedulerTestCase(test)
suite.addTest(test_case)
runner = unittest.TextTestRunner()
runner.run(suite)
| apache-2.0 | 6,606,672,940,516,586,000 | 33.135417 | 78 | 0.603448 | false |
GiggleLiu/nrg_mapping | nrgmap/tests/test_mp.py | 1 | 1221 | from numpy import *
from numpy.testing import dec,assert_,assert_raises,assert_almost_equal,assert_allclose
import gmpy2
import time,pdb,sys
from ..mplib import *
tocomplex=vectorize(gmpy2.mpc)
assert_mpclose=lambda x,y:assert_(abs(x-y).sum()<1e-10)
def test_conj():
print('Test conjugate.')
c1=tocomplex(1+2j)
assert_(mpconj(c1)==gmpy2.mpc(1-2j))
print('vectorized conjugate.')
arr=array([1-2j,0.1,6j])
carr=tocomplex(arr)
assert_mpclose(mpconj(carr),tocomplex(arr.conj()))
def test_qr():
for shp in [[3,3],[3,2],[2,3]]:
print('test qr for shape %s'%shp)
ai=random.random(shp)+1j*random.random(shp)
ai=tocomplex(ai)
if shp[0]<shp[1]:
assert_raises(NotImplementedError,mpqr,ai)
continue
Q,R=mpqr(ai)
#QR=ai
assert_mpclose(dot(Q,R),ai)
#orthogonality of Q
assert_mpclose(dot(mpconj(Q.T),Q),identity(Q.shape[1]))
#lower left part of R be empty.
for i in range(R.shape[0]):
for j in range(R.shape[1]):
if i>j:
assert_almost_equal(R[i,j],0)
def test_all():
test_conj()
test_qr()
if __name__=='__main__':
test_all()
| mit | -8,404,902,727,140,415,000 | 26.75 | 87 | 0.588862 | false |
cmptrgeekken/evething | thing/models/publiccontract.py | 1 | 4026 | # ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from django.db import models
from thing.models.character import Character
from thing.models.corporation import Corporation
from thing.models.station import Station
from thing.models.region import Region
class PublicContract(models.Model):
contract_id = models.IntegerField(db_index=True)
issuer_char = models.ForeignKey(Character, blank=True, null=True, related_name='+', on_delete=models.DO_NOTHING)
issuer_corp = models.ForeignKey(Corporation, related_name='+', on_delete=models.DO_NOTHING)
region = models.ForeignKey(Region, on_delete=models.DO_NOTHING)
start_station = models.ForeignKey(Station, blank=True, null=True, related_name='+', on_delete=models.DO_NOTHING)
end_station = models.ForeignKey(Station, blank=True, null=True, related_name='+', on_delete=models.DO_NOTHING)
type = models.CharField(max_length=16)
status = models.CharField(max_length=24)
title = models.CharField(max_length=64)
for_corp = models.BooleanField(default=False)
date_issued = models.DateTimeField()
date_expired = models.DateTimeField()
date_lastseen = models.DateTimeField()
num_days = models.IntegerField()
price = models.DecimalField(max_digits=20, decimal_places=2)
reward = models.DecimalField(max_digits=20, decimal_places=2)
collateral = models.DecimalField(max_digits=20, decimal_places=2)
buyout = models.DecimalField(max_digits=20, decimal_places=2)
volume = models.DecimalField(max_digits=20, decimal_places=4)
retrieved_items = models.BooleanField(default=False)
class Meta:
app_label = 'thing'
ordering = ('-date_issued',)
def __unicode__(self):
if self.type == 'Courier':
return '#%d (%s, %s -> %s)' % (self.contract_id, self.type, self.start_station.short_name, self.end_station.short_name)
else:
return '#%d (%s, %s)' % (self.contract_id, self.type, self.start_station.short_name)
def get_issuer_name(self):
if self.for_corp:
return self.issuer_corp.name
else:
return self.issuer_char.name
def get_items(self):
from thing.models import PublicContractItem
items = PublicContractItem.objects.filter(contract_id=self.id)
item_lookup = dict()
for i in items:
if i.item_id not in item_lookup:
item_lookup[i.item_id] = i
else:
item_lookup[i.item_id].quantity += i.quantity
return item_lookup.values()
| bsd-2-clause | -2,238,725,556,423,410,200 | 42.76087 | 131 | 0.68157 | false |
bitcoinec/bitcoinec | qa/rpc-tests/test_framework/mininode.py | 18 | 55213 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# mininode.py - Bitcoin P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from .util import hex_str_to_bytes, bytes_to_hex_str
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
from test_framework.siphash import siphash256
BIP0031_VERSION = 60000
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_open(self, conn): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
if self.state != "connected":
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| mit | -6,504,394,193,462,515,000 | 29.555064 | 262 | 0.573361 | false |
michaeljoseph/cookiecutter | cookiecutter/find.py | 1 | 1034 | # -*- coding: utf-8 -*-
"""Functions for finding Cookiecutter templates and other components."""
import logging
import os
from cookiecutter.exceptions import NonTemplatedInputDirException
logger = logging.getLogger(__name__)
def find_template(repo_dir):
"""Determine which child directory of `repo_dir` is the project template.
:param repo_dir: Local directory of newly cloned repo.
:returns project_template: Relative path to project template.
"""
logger.debug('Searching %s for the project template.', repo_dir)
repo_dir_contents = os.listdir(repo_dir)
project_template = None
for item in repo_dir_contents:
if 'cookiecutter' in item and '{{' in item and '}}' in item:
project_template = item
break
if project_template:
project_template = os.path.join(repo_dir, project_template)
logger.debug('The project template appears to be %s', project_template)
return project_template
else:
raise NonTemplatedInputDirException
| bsd-3-clause | -4,415,473,042,776,241,000 | 29.411765 | 79 | 0.689555 | false |
dangra/scrapy-sci | setup.py | 1 | 1322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='scrapy-sci',
version='0.1.0',
description='Improve your scrapy pipeline with machine learning',
long_description=readme + '\n\n' + history,
author='John Cadigan',
author_email='[email protected]',
url='https://github.com/johncadigan/scrapy-sci',
packages=[
'scrapy_sci',
"scrapy_sci.commands",
"scrapy_sci.templates",
],
package_dir={'scrapy_sci':
'scrapy_sci'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='machine learning',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements
)
| bsd-3-clause | -756,883,268,587,011,100 | 23.481481 | 69 | 0.626324 | false |
TiKeil/Master-thesis-LOD | python_files/tests/tests/MonteCarlo_Coefficient_4.py | 1 | 9096 | import os
import sys
import numpy as np
import scipy.sparse as sparse
import scipy.stats as stats
import random
import csv
import matplotlib.pyplot as plt
from visualize import drawCoefficient
from data import *
from gridlod import interp, coef, util, fem, world, linalg, femsolver
import pg_rand, femsolverCoarse, buildcoef2d
from gridlod.world import World
def result(pglod, world, CoefClass, A, f, MC=1, prob=100):
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
plist = [0,5,10,20,30,100]
#### initial #####
xmLoda = np.zeros([MC,np.size(plist)])
xmVcLoda = np.zeros([MC,np.size(plist)])
xmLodVcLoda = np.zeros([MC,np.size(plist)])
ems = []
plottingx = np.zeros([MC-1,np.size(plist)])
plottingy = np.zeros([MC-1,np.size(plist)])
plottingz = np.zeros([MC-1,np.size(plist)])
plotting2x = np.zeros([MC-1,np.size(plist)])
plotting2y = np.zeros([MC-1,np.size(plist)])
plotting2z = np.zeros([MC-1,np.size(plist)])
plotting3x = np.zeros([MC-1,np.size(plist)])
plotting3y = np.zeros([MC-1,np.size(plist)])
plotting3z = np.zeros([MC-1,np.size(plist)])
for i in range(0,MC):
print '_____Sample__ ' + str(i+1) + '/' + str(MC) + ' ____'
R = CoefClass.RandomVanish( probfactor = prob,
PartlyVanish = None,
Original = True)
ANew = R.flatten()
###### Reference solution ######
f_fine = np.ones(NpFine)
uFineFem, AFine, MFine = femsolver.solveFine(world, ANew, f_fine, None, boundaryConditions)
Anew = coef.coefficientFine(NWorldCoarse, NCoarseElement, ANew)
###### tolerance = 0 without computing ######
vis, eps = pglod.updateCorrectors(Anew, 0, f, 1, clearFineQuantities=False, mc=True, Computing=None)
print 'Affected correctors: ' + str(np.sum(vis))
##### VCLOD ######
uVc = []
updated = 0
for p in plist:
print 'p = ' + str(p) + '%',
uVcLod, updated = VcLod(pglod, world, Anew, eps, updated, numberofcorrectors=p)
if p == 100:
uLod = uVcLod
pglod.CorrectorsToOrigin()
else:
uVc.append(uVcLod)
for k in range(0,np.shape(uVc)[0]):
uVcLod = uVc[k]
eVcLod = np.sqrt(np.dot(uFineFem - uVcLod, MFine*(uFineFem - uVcLod))) / np.sqrt(np.dot(uFineFem, MFine*uFineFem))
eLodVcLod = np.sqrt(np.dot(uVcLod - uLod, MFine*(uVcLod - uLod))) / np.sqrt(np.dot(uLod, MFine*uLod))
eLod = np.sqrt(np.dot(uFineFem - uLod, MFine*(uFineFem - uLod))) / np.sqrt(np.dot(uFineFem, MFine*uFineFem))
xmLoda[i,k] = eLod
xmVcLoda[i,k] = eVcLod
xmLodVcLoda[i,k] = eLodVcLod
if i == 0:
continue
ems.append(i+1)
for k in range(0,np.shape(uVc)[0]):
muLod = 0
muVcLod = 0
muLodVcLod = 0
for j in range(0,i+1):
muLod += xmLoda[j,k]
muVcLod += xmVcLoda[j,k]
muLodVcLod += xmLodVcLoda[j,k]
muLod /= i+1
muVcLod /= i+1
muLodVcLod /= i+1
sig2Lod = 0
sig2VcLod = 0
sig2LodVcLod = 0
for j in range(0,i+1):
sig2Lod += (xmLoda[j,k]-muLod)**(2)
sig2VcLod += (xmVcLoda[j,k]-muVcLod)**(2)
sig2LodVcLod += (xmLodVcLoda[j,k]-muLodVcLod)**(2)
sig2Lod /= i
sig2VcLod /= i
sig2LodVcLod /= i
a = [np.sqrt(sig2Lod)/np.sqrt(i+1)*1.96,np.sqrt(sig2VcLod)/np.sqrt(i+1)*1.96,np.sqrt(sig2LodVcLod)/np.sqrt(i+1)*1.96]
mum = [muLod,muVcLod,muLodVcLod]
plottingx[i-1,k] = mum[0]-a[0]
plottingy[i-1,k] = mum[0]
plottingz[i-1,k] = mum[0]+a[0]
plotting2x[i-1,k] = mum[1]-a[1]
plotting2y[i-1,k] = mum[1]
plotting2z[i-1,k] = mum[1]+a[1]
plotting3x[i-1,k] = mum[2]-a[2]
plotting3y[i-1,k] = mum[2]
plotting3z[i-1,k] = mum[2]+a[2]
Matrix = CoefClass.Matrix.flatten()
ROOT = '../test_data/MonteCarlo/Coef1/p' + str(100/prob) + '/' + str(plist[k])
safer(ROOT, mum, a, plottingx[:,k], plottingy[:,k], plottingz[:,k], plotting2x[:,k], plotting2y[:,k], plotting2z[:,k], plotting3x[:,k], plotting3y[:,k], plotting3z[:,k], ems, Matrix)
return a,mum
def VcLod(pglod, world, Anew, eps, updated = 0,
numberofcorrectors=5):
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
##### tolerance = certain ######
eps = filter(lambda x: x!=0, eps)
eps.sort()
epssize = np.size(eps)
until = int(round((numberofcorrectors/100. * epssize) +0.49,0))
if epssize != 0:
until = int(round((until * 256./epssize)+0.49,0))
tolrev = []
for i in range(epssize-1,-1,-1):
tolrev.append(eps[i])
if epssize == 0:
print 'nothing to update'
else:
if until >= epssize:
tol = 0
else:
tol = tolrev[until]
vistol = pglod.updateCorrectors(Anew, tol, f, clearFineQuantities=False, mc=True, Testing=True)
updated += np.sum(vistol)
print 'Updated correctors: ' + str(updated)
KFull = pglod.assembleMsStiffnessMatrix()
MFull = fem.assemblePatchMatrix(NWorldCoarse, world.MLocCoarse)
free = util.interiorpIndexMap(NWorldCoarse)
bFull = MFull*f
KFree = KFull[free][:,free]
bFree = bFull[free]
xFree = sparse.linalg.spsolve(KFree, bFree)
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
basisCorrectors = pglod.assembleBasisCorrectors()
modifiedBasis = basis - basisCorrectors
xFull = np.zeros(NpCoarse)
xFull[free] = xFree
uCoarse = xFull
uVcLod = modifiedBasis*xFull
return uVcLod, updated
#background
bg = 0.05
#values
val = 1
random.seed(20)
#fine World
NWorldFine = np.array([256, 256])
NpFine = np.prod(NWorldFine+1)
#coarse World
NWorldCoarse = np.array([16,16])
NpCoarse = np.prod(NWorldCoarse+1)
#ratio between Fine and Coarse
NCoarseElement = NWorldFine/NWorldCoarse
boundaryConditions = np.array([[0, 0],
[0, 0]])
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
#righthandside
f = np.ones(NpCoarse)
#Coefficient 4
CoefClass = buildcoef2d.Coefficient2d(NWorldFine,
bg = bg,
val = val,
thick = 1,
space = 0,
probfactor = 1,
right = 0,
down = 0,
diagr1 = 1,
diagr2 = 0,
diagl1 = 1,
diagl2 = 0,
LenSwitch = [4,5,6,7,8],
thickSwitch = None,
equidistant = None,
ChannelHorizontal = None,
ChannelVertical = None,
BoundarySpace = None)
A = CoefClass.BuildCoefficient()
ABase = A.flatten()
plt.figure("OriginalCoefficient")
drawCoefficient(NWorldFine, ABase)
plt.title('Original coefficient')
k = 4
###### precompute #######
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
#interpolant
IPatchGenerator = lambda i, N: interp.L2ProjectionPatchMatrix(i, N, NWorldCoarse, NCoarseElement, boundaryConditions)
#old Coefficient (need flatten form)
ABase = A.flatten()
Aold = coef.coefficientFine(NWorldCoarse, NCoarseElement, ABase)
pglod = pg_rand.VcPetrovGalerkinLOD(Aold, world, k, IPatchGenerator, 0)
pglod.originCorrectors(clearFineQuantities=False)
#Perturbations
print '_____________ 1% Perturbations __________'
prob = 100
MC = 100
a, mum = result(pglod, world, CoefClass, A, f, MC, prob)
#Perturbations
print '_____________ 2% Perturbations __________'
prob = 50
MC = 100
a, mum = result(pglod, world, CoefClass, A, f, MC, prob) | apache-2.0 | -2,205,497,332,147,814,400 | 31.841155 | 194 | 0.543865 | false |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/twisted/cred/test/test_cred.py | 13 | 13589 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.cred}, now with 30% more starch.
"""
from __future__ import absolute_import, division
from zope.interface import implementer, Interface
from binascii import hexlify, unhexlify
from twisted.trial import unittest
from twisted.python.compat import nativeString, networkString
from twisted.python import components
from twisted.internet import defer
from twisted.cred import checkers, credentials, portal, error
try:
from crypt import crypt
except ImportError:
crypt = None
class ITestable(Interface):
"""
An interface for a theoretical protocol.
"""
pass
class TestAvatar(object):
"""
A test avatar.
"""
def __init__(self, name):
self.name = name
self.loggedIn = False
self.loggedOut = False
def login(self):
assert not self.loggedIn
self.loggedIn = True
def logout(self):
self.loggedOut = True
@implementer(ITestable)
class Testable(components.Adapter):
"""
A theoretical protocol for testing.
"""
pass
components.registerAdapter(Testable, TestAvatar, ITestable)
class IDerivedCredentials(credentials.IUsernamePassword):
pass
@implementer(IDerivedCredentials, ITestable)
class DerivedCredentials(object):
def __init__(self, username, password):
self.username = username
self.password = password
def checkPassword(self, password):
return password == self.password
@implementer(portal.IRealm)
class TestRealm(object):
"""
A basic test realm.
"""
def __init__(self):
self.avatars = {}
def requestAvatar(self, avatarId, mind, *interfaces):
if avatarId in self.avatars:
avatar = self.avatars[avatarId]
else:
avatar = TestAvatar(avatarId)
self.avatars[avatarId] = avatar
avatar.login()
return (interfaces[0], interfaces[0](avatar),
avatar.logout)
class CredTests(unittest.TestCase):
"""
Tests for the meat of L{twisted.cred} -- realms, portals, avatars, and
checkers.
"""
def setUp(self):
self.realm = TestRealm()
self.portal = portal.Portal(self.realm)
self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.checker.addUser(b"bob", b"hello")
self.portal.registerChecker(self.checker)
def test_listCheckers(self):
"""
The checkers in a portal can check only certain types of credentials.
Since this portal has
L{checkers.InMemoryUsernamePasswordDatabaseDontUse} registered, it
"""
expected = [credentials.IUsernamePassword,
credentials.IUsernameHashedPassword]
got = self.portal.listCredentialsInterfaces()
self.assertEqual(sorted(got), sorted(expected))
def test_basicLogin(self):
"""
Calling C{login} on a portal with correct credentials and an interface
that the portal's realm supports works.
"""
login = self.successResultOf(self.portal.login(
credentials.UsernamePassword(b"bob", b"hello"), self, ITestable))
iface, impl, logout = login
# whitebox
self.assertEqual(iface, ITestable)
self.assertTrue(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.assertTrue(impl.original.loggedIn)
self.assertTrue(not impl.original.loggedOut)
logout()
self.assertTrue(impl.original.loggedOut)
def test_derivedInterface(self):
"""
Logging in with correct derived credentials and an interface
that the portal's realm supports works.
"""
login = self.successResultOf(self.portal.login(
DerivedCredentials(b"bob", b"hello"), self, ITestable))
iface, impl, logout = login
# whitebox
self.assertEqual(iface, ITestable)
self.assertTrue(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.assertTrue(impl.original.loggedIn)
self.assertTrue(not impl.original.loggedOut)
logout()
self.assertTrue(impl.original.loggedOut)
def test_failedLoginPassword(self):
"""
Calling C{login} with incorrect credentials (in this case a wrong
password) causes L{error.UnauthorizedLogin} to be raised.
"""
login = self.failureResultOf(self.portal.login(
credentials.UsernamePassword(b"bob", b"h3llo"), self, ITestable))
self.assertTrue(login)
self.assertEqual(error.UnauthorizedLogin, login.type)
def test_failedLoginName(self):
"""
Calling C{login} with incorrect credentials (in this case no known
user) causes L{error.UnauthorizedLogin} to be raised.
"""
login = self.failureResultOf(self.portal.login(
credentials.UsernamePassword(b"jay", b"hello"), self, ITestable))
self.assertTrue(login)
self.assertEqual(error.UnauthorizedLogin, login.type)
class OnDiskDatabaseTests(unittest.TestCase):
users = [
(b'user1', b'pass1'),
(b'user2', b'pass2'),
(b'user3', b'pass3'),
]
def setUp(self):
self.dbfile = self.mktemp()
with open(self.dbfile, 'wb') as f:
for (u, p) in self.users:
f.write(u + b":" + p + b"\n")
def test_getUserNonexistentDatabase(self):
"""
A missing db file will cause a permanent rejection of authorization
attempts.
"""
self.db = checkers.FilePasswordDB('test_thisbetternoteverexist.db')
self.assertRaises(error.UnauthorizedLogin, self.db.getUser, 'user')
def testUserLookup(self):
self.db = checkers.FilePasswordDB(self.dbfile)
for (u, p) in self.users:
self.assertRaises(KeyError, self.db.getUser, u.upper())
self.assertEqual(self.db.getUser(u), (u, p))
def testCaseInSensitivity(self):
self.db = checkers.FilePasswordDB(self.dbfile, caseSensitive=False)
for (u, p) in self.users:
self.assertEqual(self.db.getUser(u.upper()), (u, p))
def testRequestAvatarId(self):
self.db = checkers.FilePasswordDB(self.dbfile)
creds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(self.db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testRequestAvatarId_hashed(self):
self.db = checkers.FilePasswordDB(self.dbfile)
creds = [credentials.UsernameHashedPassword(u, p)
for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(self.db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
class HashedPasswordOnDiskDatabaseTests(unittest.TestCase):
users = [
(b'user1', b'pass1'),
(b'user2', b'pass2'),
(b'user3', b'pass3'),
]
def setUp(self):
dbfile = self.mktemp()
self.db = checkers.FilePasswordDB(dbfile, hash=self.hash)
with open(dbfile, 'wb') as f:
for (u, p) in self.users:
f.write(u + b":" + self.hash(u, p, u[:2]) + b"\n")
r = TestRealm()
self.port = portal.Portal(r)
self.port.registerChecker(self.db)
def hash(self, u, p, s):
return networkString(crypt(nativeString(p), nativeString(s)))
def testGoodCredentials(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.db.requestAvatarId(c)
for c in goodCreds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testGoodCredentials_login(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.port.login(c, None, ITestable)
for c in goodCreds])
d.addCallback(lambda x: [a.original.name for i, a, l in x])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testBadCredentials(self):
badCreds = [credentials.UsernamePassword(u, 'wrong password')
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in badCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnauthorizedLogin)
return d
def testHashedCredentials(self):
hashedCreds = [credentials.UsernameHashedPassword(
u, self.hash(None, p, u[:2])) for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in hashedCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnhandledCredentials)
return d
def _assertFailures(self, failures, *expectedFailures):
for flag, failure in failures:
self.assertEqual(flag, defer.FAILURE)
failure.trap(*expectedFailures)
return None
if crypt is None:
skip = "crypt module not available"
class CheckersMixin(object):
"""
L{unittest.TestCase} mixin for testing that some checkers accept
and deny specified credentials.
Subclasses must provide
- C{getCheckers} which returns a sequence of
L{checkers.ICredentialChecker}
- C{getGoodCredentials} which returns a list of 2-tuples of
credential to check and avaterId to expect.
- C{getBadCredentials} which returns a list of credentials
which are expected to be unauthorized.
"""
@defer.inlineCallbacks
def test_positive(self):
"""
The given credentials are accepted by all the checkers, and give
the expected C{avatarID}s
"""
for chk in self.getCheckers():
for (cred, avatarId) in self.getGoodCredentials():
r = yield chk.requestAvatarId(cred)
self.assertEqual(r, avatarId)
@defer.inlineCallbacks
def test_negative(self):
"""
The given credentials are rejected by all the checkers.
"""
for chk in self.getCheckers():
for cred in self.getBadCredentials():
d = chk.requestAvatarId(cred)
yield self.assertFailure(d, error.UnauthorizedLogin)
class HashlessFilePasswordDBMixin(object):
credClass = credentials.UsernamePassword
diskHash = None
networkHash = staticmethod(lambda x: x)
_validCredentials = [
(b'user1', b'password1'),
(b'user2', b'password2'),
(b'user3', b'password3')]
def getGoodCredentials(self):
for u, p in self._validCredentials:
yield self.credClass(u, self.networkHash(p)), u
def getBadCredentials(self):
for u, p in [(b'user1', b'password3'),
(b'user2', b'password1'),
(b'bloof', b'blarf')]:
yield self.credClass(u, self.networkHash(p))
def getCheckers(self):
diskHash = self.diskHash or (lambda x: x)
hashCheck = self.diskHash and (lambda username, password,
stored: self.diskHash(password))
for cache in True, False:
fn = self.mktemp()
with open(fn, 'wb') as fObj:
for u, p in self._validCredentials:
fObj.write(u + b":" + diskHash(p) + b"\n")
yield checkers.FilePasswordDB(fn, cache=cache, hash=hashCheck)
fn = self.mktemp()
with open(fn, 'wb') as fObj:
for u, p in self._validCredentials:
fObj.write(diskHash(p) + b' dingle dongle ' + u + b'\n')
yield checkers.FilePasswordDB(fn, b' ', 3, 0,
cache=cache, hash=hashCheck)
fn = self.mktemp()
with open(fn, 'wb') as fObj:
for u, p in self._validCredentials:
fObj.write(b'zip,zap,' + u.title() + b',zup,'\
+ diskHash(p) + b'\n',)
yield checkers.FilePasswordDB(fn, b',', 2, 4, False,
cache=cache, hash=hashCheck)
class LocallyHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
diskHash = staticmethod(lambda x: hexlify(x))
class NetworkHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
networkHash = staticmethod(lambda x: hexlify(x))
class credClass(credentials.UsernameHashedPassword):
def checkPassword(self, password):
return unhexlify(self.hashed) == password
class HashlessFilePasswordDBCheckerTests(HashlessFilePasswordDBMixin,
CheckersMixin, unittest.TestCase):
pass
class LocallyHashedFilePasswordDBCheckerTests(LocallyHashedFilePasswordDBMixin,
CheckersMixin,
unittest.TestCase):
pass
class NetworkHashedFilePasswordDBCheckerTests(NetworkHashedFilePasswordDBMixin,
CheckersMixin,
unittest.TestCase):
pass
| gpl-2.0 | 1,550,437,384,784,342,500 | 29.814059 | 79 | 0.609169 | false |
leezu/mxnet | tests/python/unittest/test_gluon_data.py | 3 | 23820 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tarfile
import tempfile
import unittest
import mxnet as mx
import numpy as np
import random
from mxnet import gluon
import platform
from mxnet.gluon.data import DataLoader
import mxnet.ndarray as nd
from mxnet import context
from mxnet.gluon.data.dataset import Dataset
from mxnet.gluon.data.dataset import ArrayDataset
import pytest
def test_array_dataset():
X = np.random.uniform(size=(10, 20))
Y = np.random.uniform(size=(10,))
dataset = gluon.data.ArrayDataset(X, Y)
loader = gluon.data.DataLoader(dataset, 2)
for i, (x, y) in enumerate(loader):
assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])
assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2])
dataset = gluon.data.ArrayDataset(X)
loader = gluon.data.DataLoader(dataset, 2)
for i, x in enumerate(loader):
assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])
@pytest.fixture(scope="session")
def prepare_record(tmpdir_factory):
test_images = tmpdir_factory.mktemp("test_images")
test_images_tar = test_images.join("test_images.tar.gz")
gluon.utils.download("https://repo.mxnet.io/gluon/dataset/test/test_images-9cebe48a.tar.gz", str(test_images_tar))
tarfile.open(test_images_tar).extractall(str(test_images))
imgs = os.listdir(str(test_images.join("test_images")))
record = mx.recordio.MXIndexedRecordIO(str(test_images.join("test.idx")), str(test_images.join("test.rec")), 'w')
for i, img in enumerate(imgs):
with open(str(test_images.join("test_images").join(img)), 'rb') as f:
str_img = f.read()
s = mx.recordio.pack((0, i, i, 0), str_img)
record.write_idx(i, s)
return str(test_images.join('test.rec'))
def test_recordimage_dataset(prepare_record):
recfile = prepare_record
fn = lambda x, y : (x, y)
dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(fn)
loader = gluon.data.DataLoader(dataset, 1)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
def test_recordimage_dataset_handle(prepare_record):
recfile = prepare_record
class TmpTransform(mx.gluon.HybridBlock):
def hybrid_forward(self, F, x):
return x
fn = TmpTransform()
dataset = gluon.data.vision.ImageRecordDataset(recfile).transform_first(fn).__mx_handle__()
loader = gluon.data.DataLoader(dataset, 1)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
def _dataset_transform_fn(x, y):
"""Named transform function since lambda function cannot be pickled."""
return x, y
def _dataset_transform_first_fn(x):
"""Named transform function since lambda function cannot be pickled."""
return x
def test_recordimage_dataset_with_data_loader_multiworker(prepare_record):
recfile = prepare_record
dataset = gluon.data.vision.ImageRecordDataset(recfile)
loader = gluon.data.DataLoader(dataset, 1, num_workers=5, try_nopython=False)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
# with transform
dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(_dataset_transform_fn)
loader = gluon.data.DataLoader(dataset, 1, num_workers=5, try_nopython=None)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
# with transform_first
dataset = gluon.data.vision.ImageRecordDataset(recfile).transform_first(_dataset_transform_first_fn)
loader = gluon.data.DataLoader(dataset, 1, num_workers=5, try_nopython=None)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
def test_sampler():
seq_sampler = gluon.data.SequentialSampler(10)
assert list(seq_sampler) == list(range(10))
rand_sampler = gluon.data.RandomSampler(10)
assert sorted(list(rand_sampler)) == list(range(10))
seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep')
assert sum(list(seq_batch_keep), []) == list(range(10))
seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard')
assert sum(list(seq_batch_discard), []) == list(range(9))
rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')
assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))
def test_datasets(tmpdir):
p = tmpdir.mkdir("test_datasets")
assert len(gluon.data.vision.MNIST(root=str(p.join('mnist')))) == 60000
assert len(gluon.data.vision.MNIST(root=str(p.join('mnist')), train=False)) == 10000
assert len(gluon.data.vision.FashionMNIST(root=str(p.join('fashion-mnist')))) == 60000
assert len(gluon.data.vision.FashionMNIST(root=str(p.join('fashion-mnist')), train=False)) == 10000
assert len(gluon.data.vision.CIFAR10(root=str(p.join('cifar10')))) == 50000
assert len(gluon.data.vision.CIFAR10(root=str(p.join('cifar10')), train=False)) == 10000
assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')))) == 50000
assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), fine_label=True)) == 50000
assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), train=False)) == 10000
def test_datasets_handles(tmpdir):
p = tmpdir.mkdir("test_datasets_handles")
assert len(gluon.data.vision.MNIST(root=str(p.join('mnist'))).__mx_handle__()) == 60000
assert len(gluon.data.vision.MNIST(root=str(p.join('mnist')), train=False).__mx_handle__()) == 10000
assert len(gluon.data.vision.FashionMNIST(root=str(p.join('fashion-mnist'))).__mx_handle__()) == 60000
assert len(gluon.data.vision.FashionMNIST(root=str(p.join('fashion-mnist')), train=False).__mx_handle__()) == 10000
assert len(gluon.data.vision.CIFAR10(root=str(p.join('cifar10'))).__mx_handle__()) == 50000
assert len(gluon.data.vision.CIFAR10(root=str(p.join('cifar10')), train=False).__mx_handle__()) == 10000
assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100'))).__mx_handle__()) == 50000
assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), fine_label=True).__mx_handle__()) == 50000
assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), train=False).__mx_handle__()) == 10000
def test_image_folder_dataset(prepare_record):
dataset = gluon.data.vision.ImageFolderDataset(os.path.dirname(prepare_record))
assert dataset.synsets == ['test_images']
assert len(dataset.items) == 16
def test_image_folder_dataset_handle(prepare_record):
dataset = gluon.data.vision.ImageFolderDataset(os.path.dirname(prepare_record))
hd = dataset.__mx_handle__()
assert len(hd) == 16
assert (hd[1][0] == dataset[1][0]).asnumpy().all()
assert hd[5][1] == dataset[5][1]
def test_image_list_dataset(prepare_record):
root = os.path.join(os.path.dirname(prepare_record), 'test_images')
imlist = os.listdir(root)
imglist = [(0, path) for i, path in enumerate(imlist)]
dataset = gluon.data.vision.ImageListDataset(root=root, imglist=imglist)
assert len(dataset) == 16, len(dataset)
img, label = dataset[0]
assert len(img.shape) == 3
assert label == 0
# save to file as *.lst
imglist = ['\t'.join((str(i), '0', path)) for i, path in enumerate(imlist)]
with tempfile.NamedTemporaryFile('wt', delete=False) as fp:
for line in imglist:
fp.write(line + '\n')
fp.close()
dataset = gluon.data.vision.ImageListDataset(root=root, imglist=fp.name)
assert len(dataset) == 16, len(dataset)
img, label = dataset[0]
assert len(img.shape) == 3
assert label == 0
def test_image_list_dataset_handle(prepare_record):
root = os.path.join(os.path.dirname(prepare_record), 'test_images')
imlist = os.listdir(root)
imglist = [(0, path) for i, path in enumerate(imlist)]
dataset = gluon.data.vision.ImageListDataset(root=root, imglist=imglist).__mx_handle__()
assert len(dataset) == 16, len(dataset)
img, label = dataset[0]
assert len(img.shape) == 3
assert label == 0
# save to file as *.lst
imglist = ['\t'.join((str(i), '0', path)) for i, path in enumerate(imlist)]
with tempfile.NamedTemporaryFile('wt', delete=False) as fp:
for line in imglist:
fp.write(line + '\n')
fp.close()
dataset = gluon.data.vision.ImageListDataset(root=root, imglist=fp.name).__mx_handle__()
assert len(dataset) == 16
img, label = dataset[0]
assert len(img.shape) == 3
assert label == 0
@pytest.mark.garbage_expected
def test_list_dataset():
for num_worker in range(0, 3):
data = mx.gluon.data.DataLoader([([1,2], 0), ([3, 4], 1)], batch_size=1, num_workers=num_worker)
for d, l in data:
pass
class _Dataset(gluon.data.Dataset):
def __len__(self):
return 100
def __getitem__(self, key):
return mx.nd.full((10,), key)
@pytest.mark.garbage_expected
def test_multi_worker():
data = _Dataset()
for thread_pool in [True, False]:
loader = gluon.data.DataLoader(data, batch_size=1, num_workers=5, thread_pool=thread_pool)
for i, batch in enumerate(loader):
assert (batch.asnumpy() == i).all()
def test_multi_worker_shape():
for thread_pool in [True, False]:
batch_size = 1024
shape = (batch_size+1, 11, 12)
data = ArrayDataset(np.ones(shape))
loader = gluon.data.DataLoader(
data, batch_size=batch_size, num_workers=5, last_batch='keep', thread_pool=thread_pool)
for batch in loader:
if shape[0] > batch_size:
assert batch.shape == (batch_size, shape[1], shape[2])
shape = (shape[0] - batch_size, shape[1], shape[2])
else:
assert batch.shape == shape
class _Dummy(Dataset):
"""Dummy dataset for randomized shape arrays."""
def __init__(self, random_shape):
self.random_shape = random_shape
def __getitem__(self, idx):
key = idx
if self.random_shape:
out = np.random.uniform(size=(random.randint(1000, 1100), 40))
labels = np.random.uniform(size=(random.randint(10, 15)))
else:
out = np.random.uniform(size=(1000, 40))
labels = np.random.uniform(size=(10))
return key, out, labels
def __len__(self):
return 50
def _batchify_list(data):
"""
return list of ndarray without stack/concat/pad
"""
if isinstance(data, (tuple, list)):
return list(data)
if isinstance(data, mx.nd.NDArray):
return [data]
return data
def _batchify(data):
"""
Collate data into batch. Use shared memory for stacking.
:param data: a list of array, with layout of 'NTC'.
:return either x and x's unpadded lengths, or x, x's unpadded lengths, y and y's unpadded lengths
if labels are not supplied.
"""
# input layout is NTC
keys, inputs, labels = [item[0] for item in data], [item[1] for item in data], \
[item[2] for item in data]
if len(data) > 1:
max_data_len = max([seq.shape[0] for seq in inputs])
max_labels_len = 0 if not labels else max([seq.shape[0] for seq in labels])
else:
max_data_len = inputs[0].shape[0]
max_labels_len = 0 if not labels else labels[0].shape[0]
x_lens = [item.shape[0] for item in inputs]
y_lens = [item.shape[0] for item in labels]
for i, seq in enumerate(inputs):
pad_len = max_data_len - seq.shape[0]
inputs[i] = np.pad(seq, ((0, pad_len), (0, 0)), 'constant', constant_values=0)
labels[i] = np.pad(labels[i], (0, max_labels_len - labels[i].shape[0]),
'constant', constant_values=-1)
inputs = np.asarray(inputs, dtype=np.float32)
if labels is not None:
labels = np.asarray(labels, dtype=np.float32)
inputs = inputs.transpose((1, 0, 2))
labels = labels.transpose((1, 0))
return (nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(x_lens, ctx=context.Context('cpu_shared', 0))) \
if labels is None else (
nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(x_lens, ctx=context.Context('cpu_shared', 0)),
nd.array(labels, dtype=labels.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))
def test_multi_worker_forked_data_loader():
data = _Dummy(False)
loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2)
for epoch in range(1):
for i, data in enumerate(loader):
pass
data = _Dummy(True)
loader = DataLoader(data, batch_size=40, batchify_fn=_batchify_list, num_workers=2)
for epoch in range(1):
for i, data in enumerate(loader):
pass
def test_multi_worker_dataloader_release_pool():
# will trigger too many open file if pool is not released properly
if os.name == 'nt':
print('Skip for windows since spawn on windows is too expensive.')
return
for _ in range(10):
A = np.random.rand(999, 2000)
D = mx.gluon.data.DataLoader(A, batch_size=8, num_workers=8)
the_iter = iter(D)
next(the_iter)
del the_iter
del D
def test_dataloader_context():
X = np.random.uniform(size=(10, 20))
dataset = gluon.data.ArrayDataset(X)
default_dev_id = 0
custom_dev_id = 1
# use non-pinned memory
loader1 = gluon.data.DataLoader(dataset, 8)
for _, x in enumerate(loader1):
assert x.context == context.cpu(default_dev_id)
# use pinned memory with default device id
loader2 = gluon.data.DataLoader(dataset, 8, pin_memory=True)
for _, x in enumerate(loader2):
assert x.context == context.cpu_pinned(default_dev_id)
if mx.context.num_gpus() <= 1:
print('Bypassing custom_dev_id pinned mem test on system with < 2 gpus.')
else:
# use pinned memory with custom device id
loader3 = gluon.data.DataLoader(dataset, 8, pin_memory=True,
pin_device_id=custom_dev_id)
for _, x in enumerate(loader3):
assert x.context == context.cpu_pinned(custom_dev_id)
def batchify(a):
return a
def test_dataset_filter():
length = 100
a = mx.gluon.data.SimpleDataset([i for i in range(length)])
a_filtered = a.filter(lambda x: x % 10 == 0)
assert(len(a_filtered) == 10)
for idx, sample in enumerate(a_filtered):
assert sample % 10 == 0
a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0)
assert(len(a_xform_filtered) == 10)
# the filtered data is already transformed
for idx, sample in enumerate(a_xform_filtered):
assert sample % 10 == 0
def test_dataset_filter_handle():
length = 100
a = mx.gluon.data.SimpleDataset(np.arange(length))
a_filtered = a.filter(lambda x: x % 10 == 0).__mx_handle__()
assert(len(a_filtered) == 10)
for idx, sample in enumerate(a_filtered):
assert sample % 10 == 0
a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0)
assert(len(a_xform_filtered) == 10)
# the filtered data is already transformed
for idx, sample in enumerate(a_xform_filtered):
assert sample % 10 == 0
def test_dataset_shard():
length = 9
a = mx.gluon.data.SimpleDataset([i for i in range(length)])
shard_0 = a.shard(4, 0)
shard_1 = a.shard(4, 1)
shard_2 = a.shard(4, 2)
shard_3 = a.shard(4, 3)
assert len(shard_0) + len(shard_1) + len(shard_2) + len(shard_3) == length
assert len(shard_0) == 3
assert len(shard_1) == 2
assert len(shard_2) == 2
assert len(shard_3) == 2
total = 0
for shard in [shard_0, shard_1, shard_2, shard_3]:
for idx, sample in enumerate(shard):
total += sample
assert total == sum(a)
def test_dataset_shard_handle():
length = 9
a = mx.gluon.data.SimpleDataset(np.arange(length))
shard_0 = a.shard(4, 0).__mx_handle__()
shard_1 = a.shard(4, 1).__mx_handle__()
shard_2 = a.shard(4, 2).__mx_handle__()
shard_3 = a.shard(4, 3).__mx_handle__()
assert len(shard_0) + len(shard_1) + len(shard_2) + len(shard_3) == length
assert len(shard_0) == 3
assert len(shard_1) == 2
assert len(shard_2) == 2
assert len(shard_3) == 2
total = 0
for shard in [shard_0, shard_1, shard_2, shard_3]:
for idx, sample in enumerate(shard):
total += sample
assert total == sum(a)
def test_dataset_take():
length = 100
a = mx.gluon.data.SimpleDataset([i for i in range(length)])
a_take_full = a.take(1000)
assert len(a_take_full) == length
a_take_full = a.take(None)
assert len(a_take_full) == length
count = 10
a_take_10 = a.take(count)
assert len(a_take_10) == count
expected_total = sum([i for i in range(count)])
total = 0
for idx, sample in enumerate(a_take_10):
assert sample < count
total += sample
assert total == expected_total
a_xform_take_10 = a.transform(lambda x: x * 10).take(count)
assert len(a_xform_take_10) == count
expected_total = sum([i * 10 for i in range(count)])
total = 0
for idx, sample in enumerate(a_xform_take_10):
assert sample < count * 10
total += sample
assert total == expected_total
def test_dataset_take_handle():
length = 100
a = mx.gluon.data.SimpleDataset(np.arange(length))
a_take_full = a.take(1000).__mx_handle__()
assert len(a_take_full) == length
a_take_full = a.take(None).__mx_handle__()
assert len(a_take_full) == length
count = 10
a_take_10 = a.take(count).__mx_handle__()
assert len(a_take_10) == count
expected_total = sum([i for i in range(count)])
total = 0
for idx, sample in enumerate(a_take_10):
assert sample < count
total += sample
assert total == expected_total
a_xform_take_10 = a.take(count).__mx_handle__()
assert len(a_xform_take_10) == count
expected_total = sum([i for i in range(count)])
total = 0
for idx, sample in enumerate(a_xform_take_10):
assert sample < count
total += sample
assert total == expected_total
@pytest.mark.garbage_expected
def test_dataloader_scope():
"""
Bug: Gluon DataLoader terminates the process pool early while
_MultiWorkerIter is operating on the pool.
Tests that DataLoader is not garbage collected while the iterator is
in use.
"""
args = {'num_workers': 1, 'batch_size': 2}
dataset = nd.ones(5)
iterator = iter(DataLoader(
dataset,
batchify_fn=batchify,
**args
)
)
item = next(iterator)
assert item is not None
def test_mx_datasets_handle():
# _DownloadedDataset
mnist = mx.gluon.data.vision.MNIST(train=False).__mx_handle__()
assert len(mnist) == 10000
cifar10 = mx.gluon.data.vision.CIFAR10(train=False).__mx_handle__()
assert len(cifar10) == 10000
# _SampledDataset
s_mnist = mnist.take(100).__mx_handle__()
assert len(s_mnist) == 100
assert np.all(s_mnist[0][0].asnumpy() == mnist[0][0].asnumpy())
assert s_mnist[0][1] == mnist[0][1]
# ArrayDataset
mc = mx.gluon.data.ArrayDataset(mnist.take(100), cifar10.take(100)).__mx_handle__()
assert len(mc) == 100
assert len(mc[0]) == 4 # two from mnist, two from cifar10
assert mc[0][1] == mnist[0][1]
assert mc[0][3] == cifar10[0][1]
def test_mx_data_loader():
from mxnet.gluon.data.dataloader import DataLoader
dataset = mx.gluon.data.vision.MNIST(train=False)
dl = DataLoader(num_workers=0, dataset=dataset, batch_size=32)
for _ in dl:
pass
def test_mx_data_loader_nopython():
from mxnet.gluon.data.dataloader import DataLoader
from mxnet.gluon.data.vision.transforms import ToTensor
dataset = mx.gluon.data.vision.MNIST(train=False)
dl1 = DataLoader(dataset=dataset.transform_first(ToTensor()), batch_size=32, try_nopython=True, shuffle=False)
dl2 = DataLoader(dataset=dataset.transform_first(ToTensor()), batch_size=32, try_nopython=False, shuffle=False)
assert len(dl1) == len(dl2)
assert np.all(next(iter(dl1))[1].asnumpy() == next(iter(dl2))[1].asnumpy())
for _ in dl1:
pass
def test_batchify_stack():
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
b = np.array([[5, 6, 7, 8], [1, 2, 3, 4]])
bf = mx.gluon.data.batchify.Stack()
bf_handle = bf.__mx_handle__()
c = bf([a, b])
d = bf_handle([a, b])
assert c.shape == d.shape
assert mx.test_utils.almost_equal(c.asnumpy(), d.asnumpy())
assert mx.test_utils.almost_equal(c.asnumpy(), np.stack((a, b)))
def test_batchify_pad():
a = np.array([[1, 2, 3, 4], [11, 12, 13, 14]])
b = np.array([[4, 5, 6]])
c = np.array([[9, 10]])
bf = mx.gluon.data.batchify.Pad(val=-1)
bf_handle = bf.__mx_handle__()
d = bf([a, b, c])
e = bf_handle([a, b, c])
assert d.shape == e.shape
assert mx.test_utils.almost_equal(d.asnumpy(), e.asnumpy())
expected = np.array([[[ 1., 2., 3., 4.], [11., 12., 13., 14.]],
[[ 4., 5., 6., -1.], [-1., -1., -1., -1.]],
[[ 9., 10., -1., -1.], [-1., -1., -1., -1.]]])
assert mx.test_utils.almost_equal(d.asnumpy(), expected)
def test_batchify_group():
a = [np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), np.array([[1, 2, 3, 4], [11, 12, 13, 14]])]
b = [np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), np.array([[4, 5, 6]])]
c = [np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), np.array([[9, 10]])]
bf = mx.gluon.data.batchify.Group(mx.gluon.data.batchify.Stack(), mx.gluon.data.batchify.Pad(val=-1))
bf_handle = bf.__mx_handle__()
d = bf([a, b, c])
e = bf_handle([a, b, c])
assert d[0].shape == e[0].shape
assert d[1].shape == e[1].shape
print(d[0].asnumpy(), ',', e[0].asnumpy(), ',', e[1].asnumpy())
assert mx.test_utils.almost_equal(d[0].asnumpy(), e[0].asnumpy())
assert mx.test_utils.almost_equal(d[1].asnumpy(), e[1].asnumpy())
assert mx.test_utils.almost_equal(d[0].asnumpy(), np.stack((a[0], b[0], c[0])))
expected = np.array([[[ 1., 2., 3., 4.], [11., 12., 13., 14.]],
[[ 4., 5., 6., -1.], [-1., -1., -1., -1.]],
[[ 9., 10., -1., -1.], [-1., -1., -1., -1.]]])
assert mx.test_utils.almost_equal(d[1].asnumpy(), expected)
def test_sampler():
interval_sampler = mx.gluon.data.IntervalSampler(10, 3)
assert sorted(list(interval_sampler)) == list(range(10))
interval_sampler = mx.gluon.data.IntervalSampler(10, 3, rollover=False)
assert list(interval_sampler) == [0, 3, 6, 9]
| apache-2.0 | -8,165,386,192,729,708,000 | 38.568106 | 119 | 0.623426 | false |
jimallman/temporal-modelling | source/ResourceLibrary.py | 1 | 39272 | ##
## Copyright 2010 by the Rector and Visitors of the University of Virginia
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
# The Resource Library is a Zope object--a Folder with these
# external methods, plus internal storage areas for private and
# public resources (data models, etc)
#
# By calling the ResourceLibrary, these methods can be called
# using XML-RPC, or (within reason) directly via URL+querystring
#
# NOTE: Using external methods means we can handle XML-RPC
# arguments in a normal fashion (versus DTML clumsiness).
# Assume the following structured args in all methods:
#
# 'authInfo' is a dictionary in this format:
# {'name':'Joe Blow', 'password':'secretsauce'}
# This should always be optional, for normal HTML requests, but
# is available to support authentication from Flash XML-RPC (which
# doesn't support HTTP Basic Authentication)
#
# 'resourceID' is a string (NOT necessarily an int), a unique identifier
#
# 'resourceMetadata' is a dictionary in this format:
# {'type':'story', 'name':'Tale of Two Cities', 'description':'It was the best...'}
#
# 'resourceData' is an XML string
def userHasPermission( user, permission, object ):
import AccessControl.PermissionRole
# While this is similar to User.has_permission, NOTE that this
# really will check for permission on behalf of *any* User!
# First we get the roles they'll need to do what they want
neededRoles = AccessControl.PermissionRole.rolesForPermissionOn( permission, object )
if type(neededRoles) is type('a'): # make sure it's a list!
neededRoles = [neededRoles]
# Now test to see if this user *has* any of the needed roles
return user.allowed( object, neededRoles )
def listResources( self, whichType='all', authInfo=None ):
# Return an XML index of all resources (with IDs and metadata)
# that are visible to this user
#TODO: Why is this receiving 3 arguments? Who's sending the 3rd?!
theUser = getCurrentUser( self, authInfo )
# 'whichType' should be 'private', 'shared', or 'all' (default)
# Retrieve from appropriate storage containers in each case
if whichType == 'private':
privateFolder = getPrivateFolderForUser( self, theUser )
if privateFolder is None:
return "ERROR: No private folder for user '%s'!" % (theUser)
containerList = [privateFolder]
elif whichType == 'shared':
containerList = [self.SharedData]
else: # include models from both libraries (MY private, and shared)
privateFolder = getPrivateFolderForUser( self, theUser )
if privateFolder is None:
return "ERROR: No private folder for user '%s'!" % (theUser)
containerList = [self.SharedData, privateFolder]
### DIAGNOSTIC
# testOut = "found %s containers:\n" % len(containerList)
# for aContainer in containerList:
# testOut += " %s\n" % aContainer
# return testOut
listXML = """<xml>
<!-- Here's a list of %s resources on this server -->
<!-- current user is '%s' -->
<ResourceCollection>
""" % (escapeForXML(whichType), escapeForXML(theUser.getUserName()) )
# Buzz through all the listed containers, listing resources in each
for aContainer in containerList:
# Add an XML node for each Container (insert ID and Zope-path)
containerID = escapeForXML( aContainer.getId() )
containerPath = escapeForXML( aContainer.absolute_url( relative=self.ResourceLibrary ) )
listXML += """
<Container id="%s" path="%s">
""" % (containerID, containerPath)
for aModel in aContainer.objectValues('DTML Document'):
# For each model found, add an XML node describing its metadata
# (using a formatting string to build the XML)
listXML += """
<%s
ID="%s"
name="%s"
description="%s"
path="%s" />
""" % (
getattr(aModel, 'resourceType', "resource"),
escapeForXML( aModel.getId() ),
escapeForXML( aModel.title ),
escapeForXML( getattr(aModel, 'description', "No description") ),
escapeForXML( aModel.absolute_url( relative=self.ResourceLibrary ) )
)
#, aModel.getId(), aModel.title(), aModel.description(), aModel.absolute_url())
# Add closing Container tag
listXML += """
</Container>
"""
# Add closing collection tag
listXML += """
</ResourceCollection>
</xml>"""
return listXML
########### Managing resources (stored items in Library) ###############
def fetchResource( self, resourcePath='', authInfo=None ):
import AccessControl, AccessControl.PermissionRole
# Return a structure including XML data *and* library meta-data,
# or an error message (resource not found, or authorization failed, etc)
if resourcePath == '':
return "ERROR: No resourcePath provided!"
theUser = getCurrentUser( self, authInfo )
# Try to find the named resource
desiredResource = self.unrestrictedTraverse( path=resourcePath, default=None )
if not desiredResource:
return "ERROR: Resource at '%s' not found!" % (resourcePath)
#return theUser.has_permission( 'View', desiredResource )
# Hm, this returns 1 through the HTML view, 0 through XML-RPC!?
# OH, I see the problem. It stupidly assumes that we're only
# interested in the current authenticated user, and would never
# want to check on someone else's permissions. (D'oh!)
# If they have permission to view it, send the data
# NOTE: We'll use our own version of 'has_permission', which works
if userHasPermission( theUser, 'View', desiredResource ):
#return desiredResource.document_src()
resourceInfo = {
'resourceType': desiredResource.resourceType,
'name': desiredResource.title,
'description': desiredResource.description,
'folderPath': desiredResource.aq_parent.absolute_url(relative=self.ResourceLibrary),
'ID': desiredResource.getId(),
'ownerID': desiredResource.owner_info()['id'],
'data': desiredResource.document_src()
}
return resourceInfo
else:
return "ERROR: User '%s' not authorized to view the resource '%s'!" % (theUser, resourcePath)
def addOrUpdateResource( self, folderPath, ID, name, description, resourceData, type='', authInfo=None, requestor='XML-RPC' ):
# Add the data as a new resource, or (if it exists already) update its
# metadata or XML-data to reflect recent changes
# NOTE: 'folderPath' is a Zope path *relative* to 'ResourceLibrary'
# Let's try to traverse there now..
libRoot = self.ResourceLibrary
##return folderPath# == 'PrivateData/JeffersonHigh/3rdPeriod/Billy'
targetFolder = libRoot.unrestrictedTraverse( path=folderPath, default=None )
if (targetFolder != None):
# keep going, we found the folder
x=0 # this is pointless, to justify this indented block (because this test doesn't work with '==' or 'is')!
else:
return "ERROR: Unable to find this Folder: '%s'!" % (folderPath)
# Check to see if the user has permission to add or update resources here
theUser = getCurrentUser( self, authInfo )
if not userHasPermission( theUser, 'Change DTML Documents', targetFolder ):
# never mind, they're not authorized to do this
return "ERROR: User '%s' is NOT allowed to add or update in folder '%s'!" % (theUser, targetFolder.getId())
if ID == '':
# It's a new resource, create it in the Folder at 'folderPath'
# Pull the next (serial) ID of the Library's stack
newIDnumber = _getNextAvailableID(self);
if type == '': # no type specified, just use generic ID "generic_123"
itsType = 'generic';
else:
itsType = type;
# reset the argument 'ID' to have its new ID
ID = "%s_%s" % (itsType, newIDnumber);
# OK, create the new resource. NOTE the funky intermediate object (ObjectManager?)
# that's necessary here...
targetFolder.manage_addProduct['OFSP'].manage_addDTMLDocument(id=ID, title=name);
#return targetFolder[ID].absolute_url()
# Now test to see if the object was added successfully
testObj = getattr(targetFolder, ID, None);
if testObj is None: # not found!
return "ERROR: Unable to add resource '%s/%s'!" % (folderPath, ID);
else: # found our new child object successfully
testObj.manage_addProperty('description', "", 'string');
# NOTE that this will be immediately over-written downstream..
testObj.manage_addProperty('resourceType', itsType, 'string')
# end of exceptional behavior for new resources
# Traverse to the target resource at "folderPath/ID"
#TODO: Confirm that it exists, and that we have permission
fullPathToResource = "%s/%s" % (folderPath, ID)
targetResource = libRoot.unrestrictedTraverse( path=fullPathToResource, default=None )
if targetResource is None: #TODO: Test for failure? Is this right?
return "ERROR:Unable to traverse to the target resource at '%s'!" % (fullPathToResource)
# Modify the object's attributes and data
targetResource.manage_edit(data=resourceData, title=name) # changes its resource-data and name
targetResource.manage_changeProperties( description = description )
# Now provide the appropriate return value based on who's asking
if requestor == 'XML-RPC': # respond with the updated resource (to update metadata)
return fetchResource(self, resourcePath=fullPathToResource, authInfo=authInfo)
else:
# we were called from the HTML interface, so render the target Folder's page
##return targetResource.aq_parent.index_html(self.REQUEST)
self.REQUEST.RESPONSE.redirect( targetResource.aq_parent.absolute_url() )
#self.REQUEST.RESPONSE.redirect( self.REQUEST.HTTP_REFERER )
def deleteResource( self, resourcePath='', authInfo=None, requestor='XML-RPC', ignoreMe='BOGUS' ):
# Remove the resource at the specified path, IF this user
# has the necessary permission to do so; return result code (if XML request)
# or re-direct to its container (if HTML request)
# - 'resourcePath' should be relative to 'ResourceLibrary'
# Once again, we're receiving a mysterious third argument (None?) -- Why?!
##return "self=%s, resourcePath=%s, ignoreMe=%s" % (self, resourcePath, ignoreMe)
targetResource = self.ResourceLibrary.unrestrictedTraverse( path=resourcePath, default=None )
if not targetResource:
return "ERROR: No resource found at '%s'!" % (resourcePath)
# Check to see if the user has permission to delete this resource
theUser = getCurrentUser( self, authInfo )
if userHasPermission( theUser, 'Delete Objects', targetResource ):
targetID = targetResource.getId()
targetParent = targetResource.aq_parent
targetParent.manage_delObjects( [targetID] )
if requestor == 'XML-RPC':
return "<OK/>"
else: # we were probably called from HTML
self.REQUEST.RESPONSE.redirect( targetParent.absolute_url() )
else:
return "ERROR: User '%s' is NOT authorized to delete this resource! \n\n%s" % (theUser, targetResource.getId())
########### Managing user accounts (incl. a private storage folder) ###############
def addOrUpdateAccount( self, isNewAccount, loginName, folderName, parentFolderPath, newPassword, confirmPassword, canAddSubgroups=0, newFolderName=None, sizeAllowedKB="32", requestor='XML-RPC' ):
# Add (or modify) a user account with Library features:
# - We assume the loginName is valid for uniqueness (test client-side)
# - Each user has a private folder, which they manage (local role)
libRoot = self.ResourceLibrary
# Let's try to traverse to the group folder first
groupFolder = libRoot.unrestrictedTraverse( path=parentFolderPath, default=None )
if not groupFolder:
return "ERROR: group folder '%s' not found!" % (parentFolderPath)
# Oh, and grab our big user folder (for the entire app)
bigUserFolder = libRoot.aq_parent.acl_users
if not bigUserFolder:
return "ERROR: No main User Folder found for app '%s'!" % (libRoot.aq_parent.getId())
# Are we creating a new account, or modifying an old one?
if int(isNewAccount) == 1:
# Test to make sure it's a unique user-name
if bigUserFolder.getUser( loginName ):
# This account already exists, let's not mess it up
return "ERROR: Sorry, a user with the name '%s' already exists." % (loginName)
# Create the new user in our main User Folder
bigUserFolder._addUser(
loginName,
newPassword,
confirmPassword,
['Authenticated'],
domains=[])
# If no folder name was specified, use the specified userid as its name
if folderName == '':
folderName = loginName
# Create their private Folder inside current group folder
# Is there already a folder by this name? That would be a Bad Thing
testFolder = getattr(groupFolder, folderName, None)
if testFolder:
return "ERROR: There's already a user folder here: 's'" % (testFolder.absolute_url(relative=self.ResourceLibrary))
# OK, create the private folder.NOTE the funky intermediate object (ObjectManager?)
# that's necessary here...
groupFolder.manage_addProduct['OFSP'].manage_addFolder(id=folderName, title=folderName);
# Set up its local role (new user is Manager here)
privateFolder = getattr(groupFolder, folderName, None)
if not privateFolder:
return "ERROR: couldn't create the private folder!"
privateFolder.manage_setLocalRoles(userid=loginName, roles=['Manager'])
# Set up other special properties
privateFolder.manage_addProperty( id='subtype', value='MEMBER', type='string' )
privateFolder.manage_addProperty( id='canAddSubgroups', value=canAddSubgroups, type='boolean' )
privateFolder.manage_addProperty( id='sizeAllowed', value=sizeAllowedKB, type='int' )
else: # we're editing an existing account
# Test to make sure the account already exists
# grab the private folders
privateFolder = groupFolder[folderName]
if not privateFolder:
return "ERROR: Sorry, I can't find the private folder '%s'!" % ("%s/%s" % (parentFolderPath, folderName))
# and grab the assigned user
if not bigUserFolder.getUser( loginName ):
return "ERROR: Sorry, I can't find user '%s'." % (loginName)
# Now update the values of its special properties
privateFolder.manage_changeProperties(
{'canAddSubgroups':int(canAddSubgroups), 'sizeAllowed':int(sizeAllowedKB) }
)
# Rename the private folder?
if newFolderName != '' and newFolderName != None: # did they pass a new folder name?
if privateFolder.getId() != newFolderName: # has it changed?
groupFolder.manage_renameObject(id=(privateFolder.getId()), new_id=newFolderName)
privateFolder.manage_changeProperties( {'title':newFolderName} )
else: # if they've cleared the newFolderName field, then it should mirror the login name!
if not getattr(groupFolder, loginName, None): # sanity check for like-named folder..
groupFolder.manage_renameObject(id=(privateFolder.getId()), new_id=loginName)
privateFolder.manage_changeProperties( {'title':loginName} )
# Change this account's password?
if newPassword == confirmPassword:
if len(newPassword) > 0: # ignore this if field is blank
#bigUserFolder.userFolderEditUser(name=loginName, password=newPassword, roles=['Manager'])
bigUserFolder._changeUser(name=loginName,
password=newPassword,
confirm=newPassword,
roles=['Authenticated'],
domains=[]
)
else:
return "ERROR: Password didn't match, please try again."
# Now provide the appropriate return value based on who's asking
if requestor == 'XML-RPC': # respond with simple XML, including the resource's path
return '<OK>%s</OK>' % ( privateFolder.absolute_url() )
else:
# we were called from the HTML interface, so render the target Folder's page
self.REQUEST.RESPONSE.redirect( groupFolder.absolute_url() )
def deleteAccount( self, privateFolderPath='', ignoreMe='BOGUS' ):
# Remove the account folder at the specified path, IF this user
# has the necessary permission to do so. NOTE that this also requires
# that we remove its appointed Manager (see local roles) from the
# main User Folder, and destroy (or re-assign) their resources in
# the shared area.
#
# Perhaps there's an option 'deleteAllData=T/F'? If false, then we
# change ownership of all resources and folders to the acting
# manager (current user)
# Return result-fragment (if XML request), or re-direct to its parent
# container (a group folder) if it's an HTML request
# - 'privateFolderPath' should be relative to 'ResourceLibrary'
# Grab the private folder and its parent container
privateFolder = self.ResourceLibrary.unrestrictedTraverse( privateFolderPath, default=None )
if privateFolder is None:
return "ERROR: private folder '%s' not found!" % (privateFolderPath)
itsGroupFolder = privateFolder.aq_parent
# Fetch the current user, and test for needed permissions
currentUser = self.REQUEST.AUTHENTICATED_USER
if currentUser.has_permission( 'Delete Objects', itsGroupFolder ):
# identify the private folder's associated user (local Manager)
localManagers = privateFolder.users_with_local_role('Manager')
if len(localManagers) == 0:
return "ERROR: No local manager found in folder '%s'!" % (privateFolder.getId())
if len(localManagers) > 1:
return "ERROR: Multiple local managers found in folder '%s'!" % (privateFolder.getId())
localManagerName = localManagers[0]
# remove any of this user's resources in the shared area
doomedResourceIDs = []
sharedDataFolder = self.ResourceLibrary.SharedData
for aSharedResource in sharedDataFolder.objectValues('DTML Document'):
# get the name of the owner of this resource
itsOwnerName = aSharedResource.owner_info()['id']
if itsOwnerName == localManagerName:
# it's owned by the account we're deleting
doomedResourceIDs.append( aSharedResource.getId() )
if len(doomedResourceIDs) > 0:
sharedDataFolder.manage_delObjects( doomedResourceIDs )
# delete any groups defined inside (recurse until all contents are clear)
for aSubfolder in privateFolder.objectValues('Folder'):
# delete each sub-folder
self.deleteGroup( groupFolderPath=aSubfolder.absolute_url(relative=self.ResourceLibrary) )
# delete the private folder
targetID = privateFolder.getId()
itsGroupFolder.manage_delObjects( [targetID] )
# remove the associated user
bigUserFolder = self.ResourceLibrary.aq_parent.acl_users
bigUserFolder._delUsers( [localManagerName] )
if self.REQUEST: # we were probably called from HTML
# redirect to a view of its parent group (updated member list)
self.REQUEST.RESPONSE.redirect( itsGroupFolder.absolute_url() )
else: # we were probably called from script, or XML-RPC
return "<OK/>"
else:
return "ERROR: User '%s' is NOT authorized to delete this account!" % currentUser
########### Managing user groups (clusters of user accounts) ###############
def addOrUpdateGroup( self, isNewGroup, parentFolderPath, groupName, newGroupName=None, requestor='XML-RPC' ):
# Add (or modify) a user group (cluster of private data folders)
libRoot = self.ResourceLibrary
# Let's try to traverse to the parent folder first
parentFolder = libRoot.unrestrictedTraverse( path=parentFolderPath, default=None )
if not parentFolder:
return "ERROR: group's parent folder '%s' not found!" % (parentFolderPath)
# Are we creating a new group, or modifying an old one?
if int(isNewGroup) == 1:
# Create the group's Folder inside the parent folder
# Is there already a local group by this name? That would be a Bad Thing
testGroup = getattr(parentFolder, groupName, None)
if testGroup:
foundPath = testGroup.absolute_url()
return "ERROR: There's already a group folder here: \n\n'%s'" % (foundPath)
# OK, create the group folder. NOTE the funky intermediate object (ObjectManager?)
# that's necessary here...
parentFolder.manage_addProduct['OFSP'].manage_addFolder(id=groupName, title=groupName);
# make sure we can get the new group folder
groupFolder = getattr(parentFolder, groupName, None)
if not groupFolder:
return "ERROR: couldn't create the group folder!"
# Set up other special properties
groupFolder.manage_addProperty( id='subtype', value='GROUP', type='string' )
else: # we're editing an existing account
# Test to make sure the account already exists
# grab the group folder
groupFolder = getattr( parentFolder, groupName, None )
if not groupFolder:
return "ERROR: Sorry, I can't find the group folder '%s'!" % ("%s/%s" % (parentFolderPath, folderName))
# Rename the group folder?
if newGroupName: # did they pass a new group name?
if groupFolder.getId() != newGroupName: # has it changed?
parentFolder.manage_renameObject(id=(groupFolder.getId()), new_id=newGroupName)
groupFolder.manage_changeProperties( {'title':newGroupName} )
# Now provide the appropriate return value based on who's asking
if requestor == 'XML-RPC': # respond with simple XML, including the resource's path
return '<OK>%s</OK>' % ( groupFolder.absolute_url() )
else:
# we were called from the HTML interface, so render the target Folder's page
self.REQUEST.RESPONSE.redirect( parentFolder.absolute_url() )
def deleteGroup( self, groupFolderPath='', ignoreMe='BOGUS' ):
# Remove the group folder at the specified path, IF this user
# has the necessary permission to do so. NOTE that this also requires
# that we delete all of its member accounts (see child 'MEMBER' folders)
#
# Perhaps there's an option 'deleteAllData=T/F'? If false, then we
# change ownership of all resources and folders to the acting
# manager (current user)
# Return result-fragment (if XML request), or re-direct to its parent
# container (the administrative account folder) if it's an HTML request
# - 'groupFolderPath' should be relative to 'ResourceLibrary'
# Grab the group folder and its parent container
groupFolder = self.ResourceLibrary.unrestrictedTraverse( groupFolderPath, default=None )
if not groupFolder:
return "ERROR: group folder '%s' not found!" % (privateFolderPath)
parentFolder = groupFolder.aq_parent
# Fetch the current user, and test for needed permissions
currentUser = self.REQUEST.AUTHENTICATED_USER
if currentUser.has_permission( 'Delete Objects', parentFolder ):
# remove any of this group's member accounts (should destroy their shared resources, too)
for aSubfolder in groupFolder.objectValues('Folder'):
# delete each sub-folder
self.deleteAccount( privateFolderPath=aSubfolder.absolute_url(relative=self.ResourceLibrary) )
# delete the group folder
targetID = groupFolder.getId()
parentFolder.manage_delObjects( [targetID] )
if self.REQUEST: # we were probably called from HTML
# redirect to a view of its parent group (updated member list)
self.REQUEST.RESPONSE.redirect( parentFolder.absolute_url() )
else: # we were probably called from script, or XML-RPC
return "<OK/>"
else:
return "ERROR: User '%s' is NOT authorized to delete this group!" % currentUser
##### Get lists of sibling names (userids, private folder names, group folder names,
##### or resource names) to prompt uesrs for unique input
def getListOfNames( nameType, parentContainer, skipName=None ):
import string
# 'nameType' should be 'user', 'private folder', 'group folder', 'resource'
# 'parentContainer' should be whatever object holds the items we're listing
# 'skipName' is optional; use to omit the name of an item whose name we're editing
if nameType == 'user':
# parentContainer should be a UserFolder, just get its names
nameList = parentContainer.getUserNames()
elif nameType == 'resource':
# parentContainer should be a Folder, get its DTML Documents
docList = parentContainer.objectValues( 'DTML Document' )
# if a skip-name was specified, leave it out!
# (NOTE: We're replacing all single quotes with escaped versions
# for safe quoting in Javascript)
nameList = [string.replace(aDoc.title, "\'", "\\\'") for aDoc in docList if (aDoc.title != skipName)]
elif nameType == 'folder': # private or group folder
# parentContainer should be a Folder, get its DTML Documents
folderList = parentContainer.objectValues( 'Folder' )
# if a skip-name was specified, leave it out!
nameList = [aFolder.title for aFolder in folderList] # if (aFolder.title != skipName)]
else:
return "ERROR: I don't know this nameType: '%s'" % (nameType)
return nameList
##### Here are some internal (private) methods that support the API methods
##### above. NOTE that these require or return complex Zope objects, so
##### they won't respond well to XML-RPC requests!
def _getResourceObject( resourceID ):
# Retrieve the resource (Zope object) with the specified ID, for closnoer inspection
return 0
def _isAllowed( objResource, objUser ):
# ?? Detailed confirmation (by whatever method) that the current user has permission
# to read/write/share/revoke/delete the specified resource?
#
# Perhaps this is better done through inline permission checks, eg.
# canWrite = AUTHENTICATED_USER.getBlah()
return 0
def getPrivateFolderForUser( self, objUser, searchFolder=None ):
import AccessControl.SpecialUsers
# Crawl the PrivateData area for this user's private Folder, and return it (or None
# if no matching Folder can be found). NOTE that this function is recursive; it
# searches the children of the main 'PrivateData' folder (by default), or the
# specified Folder object if we're digging through children..
# Test 'objUser' to make sure it's User object (check for method)
if not getattr(objUser, 'getUserName', None): # it's not a User of any kind..
return "ERROR: 'objUser' is not a User! (it's a '%s')" % (type(objUser))
# Test 'searchFolder' to make sure it's an object, and a Folder
if searchFolder is None:
searchFolder = self.PrivateData
# Test the ACL Users Folder, looking for the specified objUser;
# if they're found, return searchFolder!
isLocalManager = objUser.has_role( ['Manager'], object=searchFolder )
if isLocalManager:
return searchFolder
userFolders = searchFolder.objectValues("User Folder")
##return "found %s user folders in '%s'" % (len(userFolders), searchFolder.getId())
if len(userFolders) != 0:
return "found a user folder in '%s'" % (searchFolder.getId())
# Found a User Folder here! Let's take a look..
localUserFolder = userFolders[0]
# Search for the objUser; are they listed as a Manager here?
targetName = objUser.getUserName()
testUser = localUserFolder.getUser( targetName )
if testUser != None: # found 'em! this is their private folder..
return searchFolder
# Didn't find the User listed here? Then loop through this Folder's
# children and recursively search any Folders found. If any of them
# returns the user's private Folder, return it to my caller!
for subFolder in searchFolder.objectValues("Folder"):
foundFolder = getPrivateFolderForUser( self, objUser, searchFolder=subFolder )
if foundFolder != None: # We found it! return to my caller
return foundFolder
# Apparently it's not in here, return the bad news
return None
def _getNextAvailableID( self ):
# Read the next available ID from the ResourceLibrary object, then increment it;
# return the found ID for use by the caller
###return self.id();
libRoot = self.ResourceLibrary
nextID = libRoot.nextAvailableID;
libRoot.manage_changeProperties( REQUEST={'nextAvailableID':libRoot.nextAvailableID + 1} );
return nextID;
def testID( self ):
theID = _getNextAvailableID( self );
return theID;
def getRawInput(self, REQUEST):
# Return the complete content of REQUEST from FlashXML.sendAndLoad()
meth = REQUEST.environ.get('REQUEST_METHOD','GET');
# NOTE: Flash has a broken .sendAndLoad() method on Windows.. so we need to
# force a "POST" response rather than handle "GET" differently.
if meth != 'GET': # leave this here in case Flash is fixed someday.
# presumably the method is 'POST' (let's assume this for Flash)
REQUEST.stdin.seek(0);
rawResult = REQUEST.stdin.read();
else:
# presumably the method is 'GET'
rawResult = REQUEST.environ.get('QUERY_STRING','');
###result = unicode(rawResult, 'application/x-www-form-urlencoded' ); #, 'x-www-form-urlencoded' );
# TODO: Find the right encoding to do this?
result = rawResult;
return result;
def assertXMLRPCContentType( self, REQUEST ):
# Attempt to reset the Content-Type of tihis
REQUEST.CONTENT_TYPE = 'text/xml';
# REQUEST.environ.set('CONTENT_TYPE','text/xml');
# REQUEST.set('CONTENT_TYPE','text/xml');
# REQUEST.setHeader('Content-Type', 'text/xml');
return;
def readFullRequest( self ):
self.REQUEST.stdin.seek(0);
rawInput = self.REQUEST.stdin.read();
decodedInput = (rawInput);
return
def challengeUser( self ):
# Try to provoke an authentication challenge on the current user
# (should *always* work, we call it on any Anonymous User)
###self.AccessControl.SecurityManager.validate(None, None, None, None, None)
return
def debug( ):
# Activates the step-trace debugger; effectively, this should
# give us the ability to invoke debugging from DTML, thus:
# <dtml-call "debug"> <!--calls external method 'debug()'-->
#
# WARNING: This uses Python's 'pdb' debugging module, which means:
# - we have to be looking at the console running Zope to use it!
# - it will STOP Zope (like under hypnosis) until you're done!
import pdb
pdb.set_trace( )
return
def moveResource( self, resourcePath, toContainerPath, redirectURL=None ):
# Attempt to move the specified resource, e.g. from this user's
# private folder to the shared-data folder. Do NOT change
# ownership of the resource, just its location!
# Expecting 2 (maybe 3) arguments. NOTE that all the 'path' arguments
# are assumed to be relative URLs within this ResourceLibrary!
# 'resourcePath'
# 'toContainerPath'
# 'redirectURL' [optional *absolute* URL to which we'll bounce when finished]
targetResource = self.ResourceLibrary.unrestrictedTraverse( resourcePath, default=None )
if targetResource is None:
return "ERROR: Invalid target resource path!"
fromContainer = targetResource.aq_parent
toContainer = self.ResourceLibrary.unrestrictedTraverse( toContainerPath, default=None )
if targetResource is None:
return "ERROR: Invalid to-container path!"
# Make sure the user has permissions
currentUser = self.REQUEST.AUTHENTICATED_USER
if currentUser.has_permission( 'Delete objects', fromContainer ):
if currentUser.has_permission( 'Add Documents, Images, and Files', toContainer ):
# OK, looks like this operation is legit.. Let's move it!
targetID = targetResource.getId()
clipboardData = fromContainer.manage_cutObjects( ids=[targetID] )
toContainer.manage_pasteObjects( clipboardData )
else:
return "ERROR: User not authorized to add to '%s'!" % (toContainer.getId())
else:
return "ERROR: User not authorized to delete from '%s'!" % (fromContainer.getId() )
if redirectURL is None:
# presumably called from XML-RPC
return "<OK>It's moved!</OK>"
else:
# called from HTML, bounce to the requested URL (eg. user's private area)
self.REQUEST.RESPONSE.redirect( redirectURL )
def cloneResource( self, resourcePath, newName ):
# Make a copy of an existing library resource, and save it with the
# new name in this user's private folder. NOTE that we'll also need
# a fresh, serialized unique ID--in fact, only the description and data
# from the resource will be copied.
#
# If this request came from an HTML page, redirect to the user's private
# folder (where the new clone will be waiting). If it's from XML or
# called by script, just send a simple XML response (new resource path)
targetResource = self.ResourceLibrary.unrestrictedTraverse( resourcePath, default=None )
if targetResource is None:
return "ERROR: Invalid target resource path!"
# find this user's private folder
currentUser = self.REQUEST.AUTHENTICATED_USER
privateFolder = getPrivateFolderForUser( self, currentUser )
if not privateFolder:
return "ERROR: Unable to locate private folder for user '%s'" % currentUser
# make a new resource in the private folder, and transfer data from the old
result = addOrUpdateResource( self,
folderPath = privateFolder.absolute_url(relative = self.ResourceLibrary),
ID = '',
name = newName,
description = getattr( targetResource, 'description', '' ),
resourceData = targetResource.document_src(),
type = getattr( targetResource, 'resourceType', 'generic' )
)
# look in resulting Resource object for a known attribute
if getattr(result, 'ID', None):
return "ERROR: Unable to add new resource!"
else:
newPath = "%s/%s" % (result['folderPath'], result['ID'])
theClone = self.ResourceLibrary.unrestrictedTraverse( newPath )
if self.REQUEST: # we were probably called from HTML
# redirect to a view of its parent group (updated member list)
self.REQUEST.RESPONSE.redirect( privateFolder.absolute_url() )
else:
# presumably called from XML-RPC
return "<OK>%s</OK>" % ( theClone.absolute_url(relative = self.ResourceLibrary) )
def getCurrentUser( self, authInfo=None, ignoreMe=None, meToo=None ):
import AccessControl.SpecialUsers
# Based on expected 'authInfo' { 'name':'joe', 'password':'123' },
# verify and return a matching User object, if any.
# IF authInfo is None, or the name or password don't check out, then
# either the auth-info is bad, or this user is truly anonymous, or
# we're in a call-chain from HTML pages.
if authInfo: # TODO: Determine that it's a dictionary?
# Carefully look for supplied info
#testName = getattr( authInfo, 'name', None )
testName = authInfo.get('name', None)
testPassword = authInfo.get('password', None)
if testName and testPassword: # values found in both
# test the supplied name and password against our app's user folder
bigUserFolder = self.ResourceLibrary.aq_parent.acl_users
if not bigUserFolder:
return "ERROR: No main User Folder found for app '%s'!" % (libRoot.aq_parent.getId())
testUser = bigUserFolder.getUser( testName ) # returns None if not found
if testUser:
# check their password against the one supplied
if testUser.authenticate( testPassword, request=None ):
return testUser
# Still here? Then look for the authenticated user in REQUEST and
# return it (since we may have been called from an HTML situation,
# with proper authentication). If there is no request, return the
# Anonymous User
if self.REQUEST: # WAS: if self.REQUEST.AUTHENTICATED_USER:
return self.REQUEST.AUTHENTICATED_USER
else:
# Return the Anonymous User (defined in 'User.py')
return AccessControl.SpecialUsers.nobody
def whoAmI( self, authInfo=None, ignoreMe=None, meToo=None ):
# A support method that returns just the userid to an XML-RPC caller;
# this allows us to authenticate the name+password in authInfo. If
# there's no matching user, returns 'Anonymous User'
theUser = getCurrentUser( self, authInfo )
return theUser.getUserName()
def ping( self, authInfo=None ):
# Slightly enhanced form of whoAmI(), also returns an unambiguous
# status message
theUser = getCurrentUser( self, authInfo )
return "OK:%s" % ( theUser.getUserName() )
def escapeForXML(s):
"""Replace special characters '&', "'", '<', '>' and '"' by XML entities."""
s = s.replace("&", "&") # Must be done first!
s = s.replace("'", "'")
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace('"', """)
return s
# TODO:
# - Use proper XMLDOM manipulation script to build XML trees, rather
# than string manipulation
#
# Remember that we can call other external ResourceLibrary methods thus:
# msg=self.sayHello()
#
# We can also get a pointer to my parent container in Zope
# myParentContainer = self.aq_parent
# return myParentContainer.getId()
| apache-2.0 | -8,110,495,114,696,879,000 | 44.420804 | 196 | 0.666327 | false |
scootergrisen/virtaal | virtaal/plugins/tm/models/tinytm.py | 4 | 5263 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2011 Zuza Software Foundation
# Copyright 2014 F Wolff
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import gtk
import logging
from virtaal.common import pan_app
from basetmmodel import BaseTMModel
from virtaal.controllers.baseplugin import PluginUnsupported
MAX_ERRORS = 5
class TMModel(BaseTMModel):
"""This is a TinyTM translation memory model.
Built according the l{protocol<http://tinytm.org/en/technology/protocol.html>} defined
by the TinyTM project.
"""
__gtype_name__ = 'TinyTmTMModel'
display_name = _('TinyTM')
description = _('A TinyTM translation memory server')
default_config = {
"server": "localhost",
"username": "postgres",
"password": "",
"database": "tinytm",
"port": "5432",
}
# INITIALIZERS #
def __init__(self, internal_name, controller):
self.internal_name = internal_name
self.load_config()
try:
import psycopg2
self.psycopg2 = psycopg2
except ImportError:
raise PluginUnsupported("The psycopg2 package is required for TinyTM")
# We count errors so that we can disable the plugin if it experiences
# multiple problems. If still negative, it means we were never able to
# connect, so we can disable the plugin completely.
self._errors = -1
self._db_con = self.psycopg2.connect(
database=self.config["database"],
user=self.config["username"],
password=self.config["password"],
host=self.config["server"],
async=1,
port=self.config["port"],
)
self.wait()
self._errors = 0
super(TMModel, self).__init__(controller)
# METHODS #
def query(self, tmcontroller, unit):
if self._db_con.closed or self._db_con.isexecuting():
# Two cursors can't execute concurrently on an asynchronous
# connection. We could try to cancel the old one, but if it hasn't
# finished yet, it might be busy. So let's rather not pile on
# another query to avoid overloading the server.
return
query_str = unit.source
matches = []
cursor = self._db_con.cursor()
try:
cursor.execute(
"""SELECT * FROM tinytm_get_fuzzy_matches(%s, %s, %s, '', '')""",
(self.source_lang, self.target_lang, query_str.encode('utf-8'))
)
# You can connect to any postgres database and use this for basic
# testing:
#cursor.execute("""select pg_sleep(2); SELECT 99, 'source', 'target';""")
# Uncomment this if you don't trust the results
#cursor.execute("""SELECT * FROM tinytm_get_fuzzy_matches('en', 'de', 'THE EUROPEAN ECONOMIC COMMUNITY', '', '')""")
except self.psycopg2.Error, e:
self.error(e)
self.wait()
for result in cursor.fetchall():
quality, source, target = result[:3]
if not isinstance(target, unicode):
target = unicode(target, 'utf-8')
matches.append({
'source': source,
'target': target,
'quality': quality,
'tmsource': self.display_name,
})
self.emit('match-found', query_str, matches)
def wait(self):
import select
while 1:
while gtk.events_pending():
gtk.main_iteration()
try:
state = self._db_con.poll()
except self.psycopg2.Error, e:
self.error(e)
if state == self.psycopg2.extensions.POLL_OK:
break
elif state == self.psycopg2.extensions.POLL_WRITE:
select.select([], [self._db_con.fileno()], [], 0.05)
elif state == self.psycopg2.extensions.POLL_READ:
select.select([self._db_con.fileno()], [], [], 0.05)
else:
self.error()
raise self.psycopg2.OperationalError("poll() returned %s" % state)
def error(self, e=None):
if self._errors < 0:
# We're still busy initialising
raise PluginUnsupported("Unable to connect to the TinyTM server.")
if e:
logging.error("[%s] %s" % (e.pgcode, e.pgerror))
self._errors += 1
if self._errors > MAX_ERRORS:
self._db_con.close()
def destroy(self):
super(TMModel, self).destroy()
self._db_con.close()
| gpl-2.0 | 1,360,874,650,982,526,700 | 33.175325 | 128 | 0.583698 | false |
olivierdalang/QGIS | tests/code_layout/doxygen_parser.py | 6 | 24239 | # -*- coding: utf-8 -*-
"""
***************************************************************************
mocked
---------------------
Date : May 2017
Copyright : (C) 2017 by Denis Rouzaud
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Denis Rouzaud'
__date__ = 'May 2017'
__copyright__ = '(C) 2017, Denis Rouzaud'
import re
import glob
import os
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class DoxygenParser():
"""
Parses the XML files generated by Doxygen which describe the API docs
"""
def __init__(self, path, acceptable_missing={}, acceptable_missing_added_note=[], acceptable_missing_brief=[]):
"""
Initializes the parser.
:param path: Path to Doxygen XML output
"""
self.acceptable_missing = acceptable_missing
self.acceptable_missing_added_note = acceptable_missing_added_note
self.acceptable_missing_brief = acceptable_missing_brief
self.documentable_members = 0
self.documented_members = 0
self.undocumented_members = {}
self.noncompliant_members = {}
self.broken_links = {}
self.bindable_members = []
self.groups = {}
self.classes_missing_group = []
self.classes_missing_brief = []
self.classes_missing_version_added = []
# for some reason the Doxygen generation on Travis refuses to assign these classes to groups
self.acceptable_missing_group = ['QgsOgcUtils::LayerProperties',
'QgsSQLStatement::Node',
'QgsSQLStatement::NodeBinaryOperator',
'QgsSQLStatement::NodeColumnRef',
'QgsSQLStatement::NodeFunction',
'QgsSQLStatement::NodeInOperator',
'QgsSQLStatement::NodeList',
'QgsSQLStatement::NodeLiteral',
'QgsSQLStatement::NodeUnaryOperator',
'QgsRuleBasedLabeling::Rule',
'QgsSQLStatement::Visitor']
self.version_regex = re.compile(r'QGIS [\d\.]+.*')
self.parseFiles(path)
def parseFiles(self, path):
""" Parses all the Doxygen XML files in a folder
:param path: Path to Doxygen XML output
"""
found = False
# find groups
for f in glob.glob(os.path.join(path, 'group__*.xml')):
found = True
group, members = self.parseGroup(f)
self.groups[group] = members
assert found, "Could not find doxygen groups xml"
found = False
# parse docs
for f in glob.glob(os.path.join(path, '*.xml')):
found = True
self.parseFile(f)
assert found, "Could not find doxygen files xml"
def parseGroup(self, f):
""" Parses a single Doxygen Group XML file
:param f: XML file path
"""
name = None
members = []
# Wrap everything in a try, as sometimes Doxygen XML is malformed
try:
for event, elem in ET.iterparse(f):
if event == 'end' and elem.tag == 'compoundname':
name = elem.text
if event == 'end' and elem.tag == 'innerclass':
members.append(elem.text)
except:
pass
return name, members
def hasGroup(self, class_name):
""" Returns true if a class has been assigned to a group
:param class_name class name to test
"""
for g in self.groups:
if class_name in self.groups[g]:
return True
return False
@staticmethod
def standardize_signature(signature):
"""
Standardizes a method's signature for comparison
"""
return signature.lower().replace('* >', '*>').replace('< ', '<')
def parseFile(self, f):
""" Parses a single Doxygen XML file
:param f: XML file path
"""
documentable_members = 0
documented_members = 0
# Wrap everything in a try, as sometimes Doxygen XML is malformed
try:
for event, elem in ET.iterparse(f):
if event == 'end' and elem.tag == 'compounddef':
if self.elemIsPublicClass(elem):
# store documentation status
members, documented, undocumented, noncompliant, bindable, has_brief_description, found_version_added, broken_links = self.parseClassElem(
elem)
documentable_members += members
documented_members += documented
class_name = elem.find('compoundname').text
acceptable_missing = self.acceptable_missing.get(class_name, [])
if not self.hasGroup(class_name) and class_name not in self.acceptable_missing_group:
self.classes_missing_group.append(class_name)
if class_name not in self.acceptable_missing_brief and not has_brief_description:
self.classes_missing_brief.append(class_name)
if class_name not in self.acceptable_missing_added_note and not found_version_added:
self.classes_missing_version_added.append(class_name)
# GEN LIST
# if len(undocumented) > 0:
# print('"%s": [%s],' % (class_name, ", ".join(['"%s"' % e.replace('"', '\\"') for e in undocumented])))
unacceptable_undocumented = undocumented - set(acceptable_missing)
# do a case insensitive check too
unacceptable_undocumented_insensitive = set(
[DoxygenParser.standardize_signature(u) for u in undocumented]) - set(
[DoxygenParser.standardize_signature(u) for u in acceptable_missing])
if len(unacceptable_undocumented_insensitive) > 0:
self.undocumented_members[class_name] = {}
self.undocumented_members[class_name]['documented'] = documented
self.undocumented_members[class_name]['members'] = members
self.undocumented_members[class_name]['missing_members'] = unacceptable_undocumented
if len(noncompliant) > 0:
self.noncompliant_members[class_name] = noncompliant
if broken_links:
self.broken_links[class_name] = broken_links
# store bindable members
if self.classElemIsBindable(elem):
for m in bindable:
self.bindable_members.append(m)
elem.clear()
except ET.ParseError as e:
# sometimes Doxygen generates malformed xml (e.g., for < and > operators)
line_num, col = e.position
with open(f, 'r') as xml_file:
for i, l in enumerate(xml_file):
if i == line_num - 1:
line = l
break
caret = '{:=>{}}'.format('^', col)
print(('ParseError in {}\n{}\n{}\n{}'.format(f, e, line, caret)))
self.documentable_members += documentable_members
self.documented_members += documented_members
def elemIsPublicClass(self, elem):
""" Tests whether an XML element corresponds to a public (or protected) class
:param elem: XML element
"""
# only looking for classes
if not elem.get('kind') == 'class':
return False
# only looking for public or protected classes
return elem.get('prot') in ('public', 'protected')
def classElemIsBindable(self, elem):
""" Tests whether a class should have SIP bindings
:param elem: XML element corresponding to a class
"""
try:
# check for classes with special python doc notes (probably 'not available' or renamed classes, either way
# they should be safe to ignore as obviously some consideration has been given to Python bindings)
detailed_sec = elem.find('detaileddescription')
for p in detailed_sec.iter('para'):
for s in p.iter('simplesect'):
for ps in s.iter('para'):
if ps.text and 'python' in ps.text.lower():
return False
return True
except:
return True
def parseClassElem(self, e):
""" Parses an XML element corresponding to a Doxygen class
:param e: XML element
"""
documentable_members = 0
documented_members = 0
undocumented_members = set()
noncompliant_members = []
bindable_members = []
broken_links = {}
# loop through all members
for m in e.iter('memberdef'):
signature = self.memberSignature(m)
if signature is None:
continue
if self.elemIsBindableMember(m):
bindable_member = [e.find('compoundname').text, m.find('name').text]
if bindable_member not in bindable_members:
bindable_members.append(bindable_member)
if self.elemIsDocumentableMember(m):
documentable_members += 1
if self.memberIsDocumented(m):
documented_members += 1
error = self.memberDocIsNonCompliant(m)
if error:
noncompliant_members.append({m.find('name').text: error})
else:
undocumented_members.add(signature)
broken_see_also_links = self.checkForBrokenSeeAlsoLinks(m)
if broken_see_also_links:
broken_links[m.find('name').text] = broken_see_also_links
# test for brief description
d = e.find('briefdescription')
has_brief_description = False
if d:
has_brief_description = True
for para in d.iter('para'):
if para.text and re.search(r'\btodo\b', para.text.lower()) is not None:
noncompliant_members.append({'Brief description': 'Don\'t add TODO comments to public doxygen documentation. Leave these as c++ code comments only.'})
break
# test for "added in QGIS xxx" string
d = e.find('detaileddescription')
found_version_added = False
for para in d.iter('para'):
for s in para.iter('simplesect'):
if s.get('kind') == 'since':
for p in s.iter('para'):
if self.version_regex.match(p.text):
found_version_added = True
break
if para.text and re.search(r'\btodo\b', para.text.lower()) is not None:
noncompliant_members.append({
'Detailed description': 'Don\'t add TODO comments to public doxygen documentation. Leave these as c++ code comments only.'})
return documentable_members, documented_members, undocumented_members, noncompliant_members, bindable_members, has_brief_description, found_version_added, broken_links
def memberSignature(self, elem):
""" Returns the signature for a member
:param elem: XML element for a class member
"""
a = elem.find('argsstring')
try:
if a is not None:
signature = elem.find('name').text + a.text
else:
signature = elem.find('name').text
if signature.endswith('= default'):
signature = signature[:-len('= default')]
return signature.strip()
except:
return None
def elemIsBindableMember(self, elem):
""" Tests whether an member should be included in SIP bindings
:param elem: XML element for a class member
"""
# only public or protected members are bindable
if not self.visibility(elem) in ('public', 'protected'):
return False
# property themselves are not bound, only getters and setters
if self.isProperty(elem):
return False
# ignore friend classes
if self.isFriendClass(elem):
return False
# ignore typedefs (can't test for them)
if self.isTypeDef(elem):
return False
if self.isVariable(elem) and self.visibility(elem) == 'protected':
# protected variables can't be bound in SIP
return False
# check for members with special python doc notes (probably 'not available' or renamed methods, either way
# they should be safe to ignore as obviously some consideration has been given to Python bindings)
try:
detailed_sec = elem.find('detaileddescription')
for p in detailed_sec.iter('para'):
for s in p.iter('simplesect'):
for ps in s.iter('para'):
if ps.text and 'python' in ps.text.lower():
return False
except:
pass
# ignore constructors and destructor, can't test for these
if self.isDestructor(elem) or self.isConstructor(elem):
return False
# ignore operators, also can't test
if self.isOperator(elem):
return False
# ignore deprecated members
if self.isDeprecated(elem):
return False
return True
def elemIsDocumentableMember(self, elem):
""" Tests whether an member should be included in Doxygen docs
:param elem: XML element for a class member
"""
# ignore variables (for now, eventually public/protected variables should be documented)
if self.isVariable(elem):
return False
# only public or protected members should be documented
if not self.visibility(elem) in ('public', 'protected'):
return False
# ignore reimplemented methods
if self.isReimplementation(elem):
return False
# ignore friend classes
if self.isFriendClass(elem):
return False
# ignore destructor
if self.isDestructor(elem):
return False
# ignore constructors with no arguments
if self.isConstructor(elem):
try:
if elem.find('argsstring').text == '()':
return False
except:
pass
name = elem.find('name')
# ignore certain obvious operators
try:
if name.text in ('operator=', 'operator==', 'operator!=', 'Q_ENUM'):
return False
except:
pass
# ignore on_* slots
try:
if name.text.startswith('on_'):
return False
except:
pass
# ignore deprecated members
if self.isDeprecated(elem):
return False
return True
def visibility(self, elem):
""" Returns the visibility of a class or member
:param elem: XML element for a class or member
"""
try:
return elem.get('prot')
except:
return ''
def isVariable(self, member_elem):
""" Tests whether an member is a variable
:param member_elem: XML element for a class member
"""
try:
if member_elem.get('kind') == 'variable':
return True
except:
pass
return False
def isProperty(self, member_elem):
""" Tests whether an member is a property
:param member_elem: XML element for a class member
"""
try:
if member_elem.get('kind') == 'property':
return True
except:
pass
return False
def isDestructor(self, member_elem):
""" Tests whether an member is a destructor
:param member_elem: XML element for a class member
"""
try:
name = member_elem.find('name').text
if name.startswith('~'):
# destructor
return True
except:
pass
return False
def isConstructor(self, member_elem):
""" Tests whether an member is a constructor
:param member_elem: XML element for a class member
"""
try:
definition = member_elem.find('definition').text
name = member_elem.find('name').text
if '{}::{}'.format(name, name) in definition:
return True
except:
pass
return False
def isOperator(self, member_elem):
""" Tests whether an member is an operator
:param member_elem: XML element for a class member
"""
try:
name = member_elem.find('name').text
if re.match(r'^operator\W.*', name):
return True
except:
pass
return False
def isFriendClass(self, member_elem):
""" Tests whether an member is a friend class
:param member_elem: XML element for a class member
"""
try:
if member_elem.get('kind') == 'friend':
return True
except:
pass
return False
def isTypeDef(self, member_elem):
""" Tests whether an member is a type def
:param member_elem: XML element for a class member
"""
try:
if member_elem.get('kind') == 'typedef':
return True
except:
pass
return False
def isReimplementation(self, member_elem):
""" Tests whether an member is a reimplementation
:param member_elem: XML element for a class member
"""
# use two different tests, as Doxygen will not detect reimplemented Qt methods
try:
if member_elem.find('reimplements') is not None:
return True
if ' override' in member_elem.find('argsstring').text:
return True
except:
pass
return False
def isDeprecated(self, member_elem):
""" Tests whether an member is deprecated
:param member_elem: XML element for a class member
"""
# look for both Q_DECL_DEPRECATED and Doxygen deprecated tag
decl_deprecated = False
type_elem = member_elem.find('type')
try:
if 'Q_DECL_DEPRECATED' in type_elem.text:
decl_deprecated = True
except:
pass
doxy_deprecated = False
has_description = True
try:
for p in member_elem.find('detaileddescription').iter('para'):
for s in p.iter('xrefsect'):
if s.find('xreftitle') is not None and 'Deprecated' in s.find('xreftitle').text:
doxy_deprecated = True
if s.find('xrefdescription') is None or s.find('xrefdescription').find('para') is None:
has_description = False
break
except:
assert 0, member_elem.find('definition').text
if not decl_deprecated and not doxy_deprecated:
return False
if doxy_deprecated and not has_description:
assert has_description, 'Error: Missing description for deprecated method {}'.format(
member_elem.find('definition').text)
# only functions for now, but in future this should also apply for enums and variables
if member_elem.get('kind') in ('function', 'variable'):
assert decl_deprecated, 'Error: Missing Q_DECL_DEPRECATED for {}'.format(
member_elem.find('definition').text)
assert doxy_deprecated, 'Error: Missing Doxygen deprecated tag for {}'.format(
member_elem.find('definition').text)
return True
def memberIsDocumented(self, member_elem):
""" Tests whether an member has documentation
:param member_elem: XML element for a class member
"""
for doc_type in ('inbodydescription', 'briefdescription', 'detaileddescription'):
doc = member_elem.find(doc_type)
if doc is not None and list(doc):
return True
return False
def memberDocIsNonCompliant(self, member_elem):
""" Tests whether an member's documentation is non-compliant
:param member_elem: XML element for a class member
"""
def _check_compliance(elem):
for para in elem.iter('para'):
for sect in para.iter('simplesect'):
res = _check_compliance(sect)
if res:
return res
for t in para.itertext():
if doc_type == 'briefdescription':
if t.strip().lower().startswith('getter'):
return 'Use "Returns the..." instead of "getter"'
if t.strip().lower().startswith('get '):
return 'Use "Gets..." (or better, "Returns ...") instead of "get ..."'
elif t.strip().lower().startswith('setter'):
return 'Use "Sets the..." instead of "setter"'
elif t.strip().lower().startswith('mutator'):
return 'Use "Sets the..." instead of "mutator for..."'
elif t.strip().lower().startswith('accessor'):
return 'Use "Returns the..." instead of "accessor for..."'
elif t.strip().lower().startswith('return '):
return 'Use "Returns the..." instead of "return ..."'
if re.search(r'\btodo\b', t.lower()) is not None:
return 'Don\'t add TODO comments to public doxygen documentation. Leave these as c++ code comments only.'
for doc_type in ['briefdescription', 'detaileddescription']:
doc = member_elem.find(doc_type)
if doc is not None:
res = _check_compliance(doc)
if res:
return res
return False
def checkForBrokenSeeAlsoLinks(self, elem):
"""
Checks for any broken 'see also' links
"""
broken = []
detailed_sec = elem.find('detaileddescription')
for p in detailed_sec.iter('para'):
for s in p.iter('simplesect'):
if s.get('kind') != 'see':
continue
para = list(s.iter())[1]
if para.find('ref') is None and para.text and (
not para.text.startswith('Q') or para.text.startswith('Qgs')):
broken.append(para.text)
return broken
| gpl-2.0 | 2,371,958,975,442,173,000 | 37.658692 | 175 | 0.526136 | false |
rosarior/mayan | apps/checkouts/events.py | 2 | 1228 | from django.utils.translation import ugettext_lazy as _
HISTORY_DOCUMENT_CHECKED_OUT = {
'namespace': 'checkouts', 'name': 'document_checked_out',
'label': _(u'Document checked out'),
'summary': _(u'Document "%(document)s" checked out by %(fullname)s.'),
'expressions': {'fullname': 'user.get_full_name() if user.get_full_name() else user'}
}
HISTORY_DOCUMENT_CHECKED_IN = {
'namespace': 'checkouts', 'name': 'document_checked_in',
'label': _(u'Document checked in'),
'summary': _(u'Document "%(document)s" checked in by %(fullname)s.'),
'expressions': {'fullname': 'user.get_full_name() if user.get_full_name() else user'}
}
HISTORY_DOCUMENT_AUTO_CHECKED_IN = {
'namespace': 'checkouts', 'name': 'document_auto_checked_in',
'label': _(u'Document automatically checked in'),
'summary': _(u'Document "%(document)s" automatically checked in.'),
}
HISTORY_DOCUMENT_FORCEFUL_CHECK_IN = {
'namespace': 'checkouts', 'name': 'document_forefull_check_in',
'label': _(u'Document forcefully checked in'),
'summary': _(u'Document "%(document)s" forcefully checked in by %(fullname)s.'),
'expressions': {'fullname': 'user.get_full_name() if user.get_full_name() else user'}
}
| gpl-3.0 | 8,579,461,176,201,736,000 | 42.857143 | 89 | 0.656352 | false |
Pointedstick/ReplicatorG | skein_engines/skeinforge-40/skeinforge_application/skeinforge_plugins/analyze_plugins/skeinlayer.py | 2 | 29183 | """
This page is in the table of contents.
Skeinlayer is a script to display each layer of a gcode file.
The skeinlayer manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Skeinlayer
Skeinlayer is derived from Nophead's preview script. The extruded lines are in the resistor colors red, orange, yellow, green, blue, purple & brown. When the extruder is off, the travel line is grey. Skeinlayer is useful for a detailed view of the extrusion, skeiniso is better to see the orientation of the shape. To get an initial overview of the skein, when the skeinlayer display window appears, click the Soar button (double right arrow button beside the layer field).
==Operation==
The default 'Activate Skeinlayer' checkbox is on. When it is on, the functions described below will work when called from the skeinforge toolchain, when it is off, the functions will not be called from the toolchain. The functions will still be called, whether or not the 'Activate Skeinlayer' checkbox is on, when skeinlayer is run directly. Skeinlayer has trouble separating the layers when it reads gcode without comments.
==Settings==
===Animation===
====Animation Line Quickening====
Default is one.
The quickness of the tool animation over the quickness of the actual tool.
====Animation Slide Show Rate====
Default is two layers per second.
The rate, in layers per second, at which the layer changes when the soar or dive button is pressed..
===Draw Arrows===
Default is on.
When selected, arrows will be drawn at the end of each line segment.
===Export Menu===
When the submenu in the export menu item in the file menu is clicked, an export canvas dialog will be displayed, which can export the canvas to a file.
===Go Around Extruder Off Travel===
Default is off.
When selected, the display will include the travel when the extruder is off, which means it will include the nozzle wipe path if any.
===Layers===
====Layer====
Default is zero.
On the display window, the Up button increases the 'Layer' by one, and the Down button decreases the layer by one. When the layer displayed in the layer spin box is changed then <Return> is hit, the layer shown will be set to the spin box, to a mimimum of zero and to a maximum of the highest index layer.The Soar button increases the layer at the 'Animation Slide Show Rate', and the Dive (double left arrow button beside the layer field) button decreases the layer at the slide show rate.
====Layer Extra Span====
Default is zero.
The viewer will draw the layers in the range including the 'Layer' index and the 'Layer' index plus the 'Layer Extra Span'. If the 'Layer Extra Span' is negative, the layers viewed will start at the 'Layer' index, plus the 'Layer Extra Span', and go up to and include the 'Layer' index. If the 'Layer Extra Span' is zero, only the 'Layer' index layer will be displayed. If the 'Layer Extra Span' is positive, the layers viewed will start at the 'Layer' index, and go up to and include the 'Layer' index plus the 'Layer Extra Span'.
===Line===
Default is zero.
The index of the selected line on the layer that is highlighted when the 'Display Line' mouse tool is chosen. The line spin box up button increases the 'Line' by one. If the line index of the layer goes over the index of the last line, the layer index will be increased by one and the new line index will be zero. The down button decreases the line index by one. If the line index goes below the index of the first line, the layer index will be decreased by one and the new line index will be at the last line. When the line displayed in the line field is changed then <Return> is hit, the line shown will be set to the line field, to a mimimum of zero and to a maximum of the highest index line. The Soar button increases the line at the speed at which the extruder would move, times the 'Animation Line Quickening' ratio, and the Dive (double left arrow button beside the line field) button decreases the line at the animation line quickening ratio.
===Mouse Mode===
Default is 'Display Line'.
The mouse tool can be changed from the 'Mouse Mode' menu button or picture button. The mouse tools listen to the arrow keys when the canvas has the focus. Clicking in the canvas gives the canvas the focus, and when the canvas has the focus a thick black border is drawn around the canvas.
====Display Line====
The 'Display Line' tool will display the highlight the selected line, and display the file line count, counting from one, and the gcode line itself. When the 'Display Line' tool is active, clicking the canvas will select the nearest line to the mouse click.
====Viewpoint Move====
The 'Viewpoint Move' tool will move the viewpoint in the xy plane when the mouse is clicked and dragged on the canvas.
===Numeric Pointer===
Default is on.
When selected, the distance along the ruler of the arrow pointers will be drawn next to the pointers.
===Scale===
Default is ten.
The scale setting is the scale of the image in pixels per millimeter, the higher the number, the greater the size of the display.
The zoom in mouse tool will zoom in the display at the point where the mouse was clicked, increasing the scale by a factor of two. The zoom out tool will zoom out the display at the point where the mouse was clicked, decreasing the scale by a factor of two.
===Screen Inset===
====Screen Horizontal Inset====
Default is one hundred.
The "Screen Horizontal Inset" determines how much the canvas will be inset in the horizontal direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be.
====Screen Vertical Inset====
Default is two hundred and twenty.
The "Screen Vertical Inset" determines how much the canvas will be inset in the vertical direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be.
===Width===
The width of each type of thread and of each axis can be changed. If the width is set to zero, the thread will not be visible.
====Width of Extrusion Thread====
Default is three.
The "Width of Extrusion Thread" sets the width of the extrusion threads.
====Width of Selection Thread====
Default is six.
The "Width of Selection Thread" sets the width of the selected line.
====Width of Travel Thread====
Default is one.
The "Width of Travel Thread" sets the width of the grey extruder off travel threads.
==Icons==
The dive, soar and zoom icons are from Mark James' soarSilk icon set 1.3 at:
http://www.famfamfam.com/lab/icons/silk/
==Gcodes==
An explanation of the gcodes is at:
http://reprap.org/bin/view/Main/Arduino_GCode_Interpreter
and at:
http://reprap.org/bin/view/Main/MCodeReference
A gode example is at:
http://forums.reprap.org/file.php?12,file=565
==Examples==
Below are examples of skeinlayer being used. These examples are run in a terminal in the folder which contains Screw Holder_penultimate.gcode and skeinlayer.py.
> python skeinlayer.py
This brings up the skeinlayer dialog.
> python skeinlayer.py Screw Holder_penultimate.gcode
This brings up the skeinlayer viewer to view each layer of a gcode file.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import display_line
from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import tableau
from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import view_move
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getNewRepository():
'Get new repository.'
return SkeinlayerRepository()
def getRankIndex( rulingSeparationWidthMillimeters, screenOrdinate ):
"Get rank index."
return int( round( screenOrdinate / rulingSeparationWidthMillimeters ) )
def getWindowAnalyzeFile(fileName):
"Display a gcode file in a skeinlayer window."
gcodeText = archive.getFileText(fileName)
return getWindowAnalyzeFileGivenText(fileName, gcodeText)
def getWindowAnalyzeFileGivenText( fileName, gcodeText, repository=None):
"Display a gcode file in a skeinlayer window given the text."
if gcodeText == '':
return None
if repository == None:
repository = settings.getReadRepository( SkeinlayerRepository() )
skeinWindow = getWindowGivenTextRepository( fileName, gcodeText, repository )
skeinWindow.updateDeiconify()
return skeinWindow
def getWindowGivenTextRepository( fileName, gcodeText, repository ):
"Display a gcode file in a skeinlayer window given the text and settings."
skein = SkeinlayerSkein()
skein.parseGcode( fileName, gcodeText, repository )
return SkeinWindow( repository, skein )
def writeOutput( fileName, fileNameSuffix, gcodeText = ''):
"Display a skeinlayered gcode file for a skeinforge gcode file, if 'Activate Skeinlayer' is selected."
repository = settings.getReadRepository( SkeinlayerRepository() )
if repository.activateSkeinlayer.value:
gcodeText = archive.getTextIfEmpty( fileNameSuffix, gcodeText )
return getWindowAnalyzeFileGivenText( fileNameSuffix, gcodeText, repository )
class SkeinlayerRepository( tableau.TableauRepository ):
"A class to handle the skeinlayer settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.analyze_plugins.skeinlayer.html', self)
self.baseNameSynonym = 'skeinview.csv'
self.fileNameInput = settings.FileNameInput().getFromFileName( [ ('Gcode text files', '*.gcode') ], 'Open File for Skeinlayer', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Skeinlayer')
self.activateSkeinlayer = settings.BooleanSetting().getFromValue('Activate Skeinlayer', self, True )
self.addAnimation()
self.drawArrows = settings.BooleanSetting().getFromValue('Draw Arrows', self, True )
self.goAroundExtruderOffTravel = settings.BooleanSetting().getFromValue('Go Around Extruder Off Travel', self, False )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Layers -', self )
self.layer = settings.IntSpinNotOnMenu().getSingleIncrementFromValue( 0, 'Layer (index):', self, 912345678, 0 )
self.layerExtraSpan = settings.IntSpinUpdate().getSingleIncrementFromValue( - 3, 'Layer Extra Span (integer):', self, 3, 0 )
settings.LabelSeparator().getFromRepository(self)
self.line = settings.IntSpinNotOnMenu().getSingleIncrementFromValue( 0, 'Line (index):', self, 912345678, 0 )
self.mouseMode = settings.MenuButtonDisplay().getFromName('Mouse Mode:', self )
self.displayLine = settings.MenuRadio().getFromMenuButtonDisplay( self.mouseMode, 'Display Line', self, True )
self.viewMove = settings.MenuRadio().getFromMenuButtonDisplay( self.mouseMode, 'View Move', self, False )
self.addScaleScreenSlide()
self.showPosition = settings.BooleanSetting().getFromValue('Show Position', self, True )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Width -', self )
self.widthOfExtrusionThread = settings.IntSpinUpdate().getSingleIncrementFromValue( 0, 'Width of Extrusion Thread (pixels):', self, 5, 3 )
self.widthOfSelectionThread = settings.IntSpinUpdate().getSingleIncrementFromValue( 0, 'Width of Selection Thread (pixels):', self, 10, 6 )
self.widthOfTravelThread = settings.IntSpinUpdate().getSingleIncrementFromValue( 0, 'Width of Travel Thread (pixels):', self, 5, 1 )
self.executeTitle = 'Skeinlayer'
def execute(self):
"Write button has been clicked."
fileNames = skeinforge_polyfile.getFileOrGcodeDirectory( self.fileNameInput.value, self.fileNameInput.wasCancelled )
for fileName in fileNames:
getWindowAnalyzeFile(fileName)
class SkeinlayerSkein:
"A class to write a get a scalable vector graphics text for a gcode skein."
def __init__(self):
'Initialize.'
self.extrusionNumber = 0
self.feedRateMinute = 960.1
self.isThereALayerStartWord = False
self.layerCount = settings.LayerCount()
self.oldZ = - 999987654321.0
self.skeinPane = None
self.skeinPanes = []
def addToPath( self, line, location ):
"Add a point to travel and maybe extrusion."
if self.oldLocation == None:
return
colorName = 'gray'
locationComplex = location.dropAxis()
oldLocationComplex = self.oldLocation.dropAxis()
begin = self.getScreenCoordinates( oldLocationComplex )
end = self.getScreenCoordinates( locationComplex )
if self.extruderActive:
colorName = self.colorNames[ self.extrusionNumber % len( self.colorNames ) ]
displayString = '%s %s' % ( self.lineIndex + 1, line )
tagString = 'colored_line_index: %s %s' % ( len( self.skeinPane ), len( self.skeinPanes ) - 1 )
coloredLine = tableau.ColoredLine( begin, colorName, displayString, end, tagString )
coloredLine.isExtrusionThread = self.extruderActive
self.skeinPane.append( coloredLine )
def getModelCoordinates( self, screenCoordinates ):
"Get the model coordinates."
modelCoordinates = ( screenCoordinates + self.marginCornerLow ) / self.scale
return complex( modelCoordinates.real, self.cornerImaginaryTotal - modelCoordinates.imag )
def getScreenCoordinates( self, pointComplex ):
"Get the screen coordinates."
pointComplex = complex( pointComplex.real, self.cornerImaginaryTotal - pointComplex.imag )
return self.scale * pointComplex - self.marginCornerLow
def initializeActiveLocation(self):
"Set variables to default."
self.extruderActive = False
self.oldLocation = None
def linearCorner( self, splitLine ):
"Update the bounding corners."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
if self.extruderActive or self.repository.goAroundExtruderOffTravel.value:
self.cornerMaximum.maximize(location)
self.cornerMinimum.minimize(location)
self.oldLocation = location
def linearMove( self, line, location ):
"Get statistics for a linear move."
if self.skeinPane != None:
self.addToPath( line, location )
def parseCorner(self, line):
"Parse a gcode line and use the location to update the bounding corners."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.linearCorner(splitLine)
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
def parseGcode( self, fileName, gcodeText, repository ):
"Parse gcode text and store the vector output."
self.fileName = fileName
self.gcodeText = gcodeText
self.repository = repository
self.initializeActiveLocation()
self.cornerMaximum = Vector3(-987654321.0, -987654321.0, -987654321.0)
self.cornerMinimum = Vector3(987654321.0, 987654321.0, 987654321.0)
self.lines = archive.getTextLines(gcodeText)
self.isThereALayerStartWord = (gcodec.getFirstWordIndexReverse('(<layer>', self.lines, 1) > -1)
self.parseInitialization()
for line in self.lines[self.lineIndex :]:
self.parseCorner(line)
self.cornerMaximumComplex = self.cornerMaximum.dropAxis()
self.cornerMinimumComplex = self.cornerMinimum.dropAxis()
self.scale = repository.scale.value
self.scaleCornerHigh = self.scale * self.cornerMaximumComplex
self.scaleCornerLow = self.scale * self.cornerMinimumComplex
self.cornerImaginaryTotal = self.cornerMaximum.y + self.cornerMinimum.y
self.margin = complex( 10.0, 10.0 )
self.marginCornerHigh = self.scaleCornerHigh + self.margin
self.marginCornerLow = self.scaleCornerLow - self.margin
self.screenSize = self.marginCornerHigh - self.marginCornerLow
self.initializeActiveLocation()
self.colorNames = ['brown', 'red', 'orange', 'yellow', 'green', 'blue', 'purple']
for self.lineIndex in xrange( self.lineIndex, len(self.lines) ):
line = self.lines[self.lineIndex]
self.parseLine(line)
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == '(</extruderInitialization>)':
return
elif firstWord == '(<operatingFeedRatePerSecond>':
self.feedRateMinute = 60.0 * float(splitLine[1])
self.lineIndex = 0
def parseLine(self, line):
"Parse a gcode line and add it to the vector output."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if tableau.getIsLayerStart(firstWord, self, splitLine):
self.extrusionNumber = 0
self.layerCount.printProgressIncrement('skeinlayer')
self.skeinPane = []
self.skeinPanes.append( self.skeinPane )
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.linearMove( line, location )
self.oldLocation = location
elif firstWord == 'M101':
self.extruderActive = True
self.extrusionNumber += 1
elif firstWord == 'M103':
self.extruderActive = False
if firstWord == 'G2' or firstWord == 'G3':
relativeLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
relativeLocation.z = 0.0
location = self.oldLocation + relativeLocation
self.linearMove( line, location )
self.oldLocation = location
class SkeinWindow( tableau.TableauWindow ):
def __init__(self, repository, skein):
"Initialize the skein window.setWindowNewMouseTool"
self.addCanvasMenuRootScrollSkein(repository, skein, '_skeinlayer', 'Skeinlayer')
horizontalRulerBoundingBox = (0, 0, int( skein.screenSize.real ), self.rulingExtent)
self.horizontalRulerCanvas = settings.Tkinter.Canvas(self.root, width = self.canvasWidth, height = self.rulingExtent, scrollregion=horizontalRulerBoundingBox)
self.horizontalRulerCanvas.grid(row=1, column=2, columnspan=96, sticky=settings.Tkinter.E+settings.Tkinter.W)
self.horizontalRulerCanvas['xscrollcommand'] = self.xScrollbar.set
verticalRulerBoundingBox = (0, 0, self.rulingExtent, int(skein.screenSize.imag))
self.verticalRulerCanvas = settings.Tkinter.Canvas(self.root, width=self.rulingExtent, height=self.canvasHeight, scrollregion=verticalRulerBoundingBox)
self.verticalRulerCanvas.grid(row=2, rowspan=96, column=1, sticky=settings.Tkinter.N+settings.Tkinter.S)
self.verticalRulerCanvas['yscrollcommand'] = self.yScrollbar.set
self.xStringVar = settings.Tkinter.StringVar(self.root)
self.xLabel = settings.Tkinter.Label(self.root, textvariable=self.xStringVar)
self.xLabel.grid(row=0, column=3, sticky=settings.Tkinter.W)
self.yStringVar = settings.Tkinter.StringVar(self.root)
self.yLabel = settings.Tkinter.Label(self.root, textvariable=self.yStringVar)
self.yLabel.grid(row=0, column=4, sticky=settings.Tkinter.W)
self.setWindowNewMouseTool(display_line.getNewMouseTool, repository.displayLine)
self.setWindowNewMouseTool(view_move.getNewMouseTool, repository.viewMove)
repository.showPosition.setUpdateFunction(self.setWindowToDisplaySaveUpdate)
repository.widthOfExtrusionThread.setUpdateFunction(self.setWindowToDisplaySaveUpdate)
self.addMouseToolsBind()
self.createRulers()
def addHorizontalRulerRuling( self, xMillimeters ):
"Add a ruling to the horizontal ruler."
xPixel = self.skein.getScreenCoordinates( complex( xMillimeters, 0.0 ) ).real
self.createVerticalLine( 0.0, xPixel )
self.horizontalRulerCanvas.create_text( xPixel + 2, 0, anchor = settings.Tkinter.NW, text = self.getRoundedRulingText( 1, xMillimeters ) )
cumulativeDistance = xMillimeters
self.createVerticalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( xMillimeters + self.separationWidthMillimetersTenth, 0.0 ) ).real )
for subRulingIndex in xrange(4):
cumulativeDistance += self.separationWidthMillimetersFifth
self.createVerticalLine( self.rulingExtentShort, self.skein.getScreenCoordinates( complex( cumulativeDistance, 0.0 ) ).real )
self.createVerticalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( cumulativeDistance + self.separationWidthMillimetersTenth, 0.0 ) ).real )
def addVerticalRulerRuling( self, yMillimeters ):
"Add a ruling to the vertical ruler."
fontHeight = 12
yPixel = self.skein.getScreenCoordinates( complex( 0.0, yMillimeters ) ).imag
self.createHorizontalLine( 0.0, yPixel )
yPixel += 2
roundedRulingText = self.getRoundedRulingText( 1, yMillimeters )
effectiveRulingTextLength = len( roundedRulingText )
if roundedRulingText.find('.') != - 1:
effectiveRulingTextLength -= 1
cumulativeDistance = yMillimeters
self.createHorizontalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( 0.0, yMillimeters + self.separationWidthMillimetersTenth ) ).imag )
for subRulingIndex in xrange(4):
cumulativeDistance += self.separationWidthMillimetersFifth
self.createHorizontalLine( self.rulingExtentShort, self.skein.getScreenCoordinates( complex( 0.0, cumulativeDistance ) ).imag )
self.createHorizontalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( 0.0, cumulativeDistance + self.separationWidthMillimetersTenth ) ).imag )
if effectiveRulingTextLength < 4:
self.verticalRulerCanvas.create_text( 0, yPixel, anchor = settings.Tkinter.NW, text = roundedRulingText )
return
for character in roundedRulingText:
if character == '.':
yPixel -= fontHeight * 2 / 3
self.verticalRulerCanvas.create_text( 0, yPixel, anchor = settings.Tkinter.NW, text = character )
yPixel += fontHeight
def createHorizontalLine( self, begin, yPixel ):
"Create a horizontal line for the horizontal ruler."
self.verticalRulerCanvas.create_line( begin, yPixel, self.rulingExtent, yPixel, fill = 'black')
def createRulers(self):
"Create the rulers.."
self.rulingExtentShort = 0.382 * self.rulingExtent
self.rulingExtentTiny = 0.764 * self.rulingExtent
self.rulingExtentPointer = 0.5 * ( self.rulingExtentShort + self.rulingExtentTiny )
self.rulingPointerRadius = self.rulingExtent - self.rulingExtentPointer
self.textBoxHeight = int( 0.8 * self.rulingExtent )
self.textBoxWidth = int( 2.5 * self.rulingExtent )
self.separationWidthMillimetersFifth = 0.2 * self.rulingSeparationWidthMillimeters
self.separationWidthMillimetersTenth = 0.1 * self.rulingSeparationWidthMillimeters
rulingSeparationWidthPixels = self.getRulingSeparationWidthPixels( self.rank )
marginOverScale = self.skein.margin / self.skein.scale
cornerMaximumMargin = self.skein.cornerMaximumComplex + marginOverScale
cornerMinimumMargin = self.skein.cornerMinimumComplex - marginOverScale
xRankIndexHigh = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMaximumMargin.real )
xRankIndexLow = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMinimumMargin.real )
for xRankIndex in xrange( xRankIndexLow - 2, xRankIndexHigh + 2 ): # 1 is enough, 2 is to be on the safe side
self.addHorizontalRulerRuling( xRankIndex * self.rulingSeparationWidthMillimeters )
yRankIndexHigh = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMaximumMargin.imag )
yRankIndexLow = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMinimumMargin.imag )
for yRankIndex in xrange( yRankIndexLow - 2, yRankIndexHigh + 2 ): # 1 is enough, 2 is to be on the safe side
self.addVerticalRulerRuling( yRankIndex * self.rulingSeparationWidthMillimeters )
def createVerticalLine( self, begin, xPixel ):
"Create a vertical line for the horizontal ruler."
self.horizontalRulerCanvas.create_line( xPixel, begin, xPixel, self.rulingExtent, fill = 'black')
def getColoredLines(self):
"Get the colored lines from the skein pane."
return self.skeinPanes[self.repository.layer.value]
def getCopy(self):
"Get a copy of this window."
return SkeinWindow(self.repository, self.skein)
def getCopyWithNewSkein(self):
"Get a copy of this window with a new skein."
return getWindowGivenTextRepository( self.skein.fileName, self.skein.gcodeText, self.repository )
def getDrawnColoredLine( self, coloredLine, tags, width ):
"Get the drawn colored line."
return self.canvas.create_line(
coloredLine.begin.real,
coloredLine.begin.imag,
coloredLine.end.real,
coloredLine.end.imag,
fill = coloredLine.colorName,
arrow = self.arrowType,
tags = tags,
width = width )
def getDrawnColoredLineIfThick( self, coloredLine, width ):
"Get the drawn colored line if it has a positive thickness."
if width > 0:
return self.getDrawnColoredLine( coloredLine, coloredLine.tagString, width )
def getDrawnSelectedColoredLine(self, coloredLine):
"Get the drawn selected colored line."
return self.getDrawnColoredLine(coloredLine, 'selection_line', self.repository.widthOfSelectionThread.value)
def motion(self, event):
"The mouse moved."
self.mouseTool.motion(event)
xString = ''
yString = ''
x = self.canvas.canvasx( event.x )
y = self.canvas.canvasy( event.y )
self.horizontalRulerCanvas.delete('pointer')
self.horizontalRulerCanvas.create_polygon( x - self.rulingPointerRadius, self.rulingExtentPointer, x + self.rulingPointerRadius, self.rulingExtentPointer, x, self.rulingExtent, tag = 'pointer')
self.verticalRulerCanvas.delete('pointer')
self.verticalRulerCanvas.create_polygon( self.rulingExtentPointer, y - self.rulingPointerRadius, self.rulingExtentPointer, y + self.rulingPointerRadius, self.rulingExtent, y, tag = 'pointer')
if self.repository.showPosition.value:
motionCoordinate = complex(x, y)
modelCoordinates = self.skein.getModelCoordinates( motionCoordinate )
roundedXText = self.getRoundedRulingText(3, modelCoordinates.real)
roundedYText = self.getRoundedRulingText(3, modelCoordinates.imag)
xString = 'X: ' + roundedXText
yString = 'Y: ' + roundedYText
self.xStringVar.set(xString)
self.yStringVar.set(yString)
def qqqmotion(self, event):
"The mouse moved."
self.mouseTool.motion(event)
x = self.canvas.canvasx( event.x )
y = self.canvas.canvasy( event.y )
self.horizontalRulerCanvas.delete('pointer')
self.horizontalRulerCanvas.create_polygon( x - self.rulingPointerRadius, self.rulingExtentPointer, x + self.rulingPointerRadius, self.rulingExtentPointer, x, self.rulingExtent, tag = 'pointer')
self.verticalRulerCanvas.delete('pointer')
self.verticalRulerCanvas.create_polygon( self.rulingExtentPointer, y - self.rulingPointerRadius, self.rulingExtentPointer, y + self.rulingPointerRadius, self.rulingExtent, y, tag = 'pointer')
if not self.repository.numericPointer.value:
return
motionCoordinate = complex(x, y)
modelCoordinates = self.skein.getModelCoordinates( motionCoordinate )
roundedXText = self.getRoundedRulingText( 3, modelCoordinates.real )
yStart = self.canvas.canvasy( 0 )
self.canvas.create_rectangle( x - 2, yStart, x + self.textBoxWidth, yStart + self.textBoxHeight + 5, fill = self.canvas['background'], tag = 'pointer')
self.canvas.create_text( x, yStart + 5, anchor = settings.Tkinter.NW, tag = 'pointer', text = roundedXText )
roundedYText = self.getRoundedRulingText( 3, modelCoordinates.imag )
xStart = self.canvas.canvasx( 0 )
self.canvas.create_rectangle( xStart, y - 2, xStart + self.textBoxWidth + 5, y + self.textBoxHeight, fill = self.canvas['background'], tag = 'pointer')
self.canvas.create_text( xStart + 5, y, anchor = settings.Tkinter.NW, tag = 'pointer', text = roundedYText )
xString = ''
xString = 'X: ' + roundedXText
self.xStringVar.set(xString)
def relayXview( self, *args ):
"Relay xview changes."
self.canvas.xview( *args )
self.horizontalRulerCanvas.xview( *args )
def relayYview( self, *args ):
"Relay yview changes."
self.canvas.yview( *args )
self.verticalRulerCanvas.yview( *args )
def update(self):
"Update the window."
if len( self.skeinPanes ) < 1:
return
self.limitIndexSetArrowMouseDeleteCanvas()
for coloredLines in self.getUpdateSkeinPanes():
for coloredLine in coloredLines:
if coloredLine.isExtrusionThread:
self.getDrawnColoredLineIfThick( coloredLine, self.repository.widthOfExtrusionThread.value )
else:
self.getDrawnColoredLineIfThick( coloredLine, self.repository.widthOfTravelThread.value )
self.setDisplayLayerIndex()
def main():
"Display the skeinlayer dialog."
if len(sys.argv) > 1:
settings.startMainLoopFromWindow(getWindowAnalyzeFile(' '.join(sys.argv[1 :])))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| gpl-2.0 | -7,832,921,377,551,226,000 | 50.198246 | 957 | 0.769283 | false |
libvirt/libvirt-test-API | libvirttestapi/repos/domain/domain_name.py | 1 | 1810 | # Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# To test "virsh domname" command
from libvirttestapi.utils import process
required_params = ()
optional_params = {}
VIRSH_DOMNAME = "virsh domname"
VIRSH_IDS = "virsh --quiet list |awk '{print $1}'"
VIRSH_DOMS = "virsh --quiet list |awk '{print $2}'"
def get_output(logger, command):
"""execute shell command
"""
ret = process.run(command, shell=True, ignore_status=True)
if ret.exit_status:
logger.error("executing " + "\"" + command + "\"" + " failed")
logger.error(ret.stderr)
return ret.exit_status, ret.stdout
def domain_name(params):
"""check virsh domname command
"""
logger = params['logger']
ids = []
if 'domainid' in params:
ids.append(params['domainid'])
else:
status, id_ret = get_output(logger, VIRSH_IDS)
if not status:
ids = id_ret.split('\n')
else:
return 1
status, ids_ret = get_output(logger, VIRSH_IDS)
if not status:
ids_list = ids_ret.split('\n')
else:
return 1
status, doms_ret = get_output(logger, VIRSH_DOMS)
if not status:
doms_list = doms_ret.split('\n')
else:
return 1
id_domname = {}
for id in ids_list:
index = ids_list.index(id)
id_domname[id] = doms_list[index]
for id in ids:
status, domname_ret = get_output(logger, VIRSH_DOMNAME + " %s" % id)
if status:
return 1
domname = domname_ret[:-1]
if id_domname[id] == domname:
logger.info("domid %s corresponds to guest %s" % (id, domname))
else:
logger.error("domid %s fails to match to guest %s" % (id, domname))
return 1
return 0
| gpl-2.0 | 2,441,313,483,463,076,400 | 25.617647 | 79 | 0.578453 | false |
javierrodriguezcuevas/git-cola | cola/models/prefs.py | 1 | 6045 | from __future__ import division, absolute_import, unicode_literals
import sys
from .. import core
from .. import hidpi
from .. import observable
from .. import utils
from ..cmd import Command
BACKGROUND_EDITOR = 'cola.backgroundeditor'
BLAME_VIEWER = 'cola.blameviewer'
BOLD_HEADERS = 'cola.boldheaders'
CHECKCONFLICTS = 'cola.checkconflicts'
COMMENT_CHAR = 'core.commentchar'
DIFFCONTEXT = 'gui.diffcontext'
DIFFTOOL = 'diff.tool'
DISPLAY_UNTRACKED = 'gui.displayuntracked'
EDITOR = 'gui.editor'
FONTDIFF = 'cola.fontdiff'
HISTORY_BROWSER = 'gui.historybrowser'
LINEBREAK = 'cola.linebreak'
MAXRECENT = 'cola.maxrecent'
MERGE_DIFFSTAT = 'merge.diffstat'
MERGE_KEEPBACKUP = 'merge.keepbackup'
MERGE_SUMMARY = 'merge.summary'
MERGE_VERBOSITY = 'merge.verbosity'
MERGETOOL = 'merge.tool'
EXPANDTAB = 'cola.expandtab'
SAVEWINDOWSETTINGS = 'cola.savewindowsettings'
SORT_BOOKMARKS = 'cola.sortbookmarks'
STATUS_INDENT = 'cola.statusindent'
STATUS_SHOW_TOTALS = 'cola.statusshowtotals'
TABWIDTH = 'cola.tabwidth'
TEXTWIDTH = 'cola.textwidth'
USER_EMAIL = 'user.email'
USER_NAME = 'user.name'
SAFE_MODE = 'cola.safemode'
SHOW_PATH = 'cola.showpath'
SPELL_CHECK = 'cola.spellcheck'
THEME = 'cola.theme'
HIDPI = 'cola.hidpi'
class Defaults(object):
"""Read-only class for holding defaults that get overridden"""
# These should match Git's defaults for git-defined values.
blame_viewer = 'git gui blame'
bold_headers = False
check_conflicts = True
comment_char = '#'
display_untracked = True
diff_context = 5
difftool = 'xxdiff'
editor = 'gvim'
expandtab = False
history_browser = 'gitk'
linebreak = True
maxrecent = 8
mergetool = difftool
merge_diffstat = True
merge_keep_backup = True
merge_summary = True
merge_verbosity = 4
save_window_settings = True
safe_mode = False
show_path = True
sort_bookmarks = True
spellcheck = False
tabwidth = 8
textwidth = 72
theme = 'default'
hidpi = hidpi.EChoice.AUTO
status_indent = False
status_show_totals = False
def blame_viewer(context):
default = Defaults.blame_viewer
return context.cfg.get(BLAME_VIEWER, default=default)
def bold_headers(context):
return context.cfg.get(BOLD_HEADERS, default=Defaults.bold_headers)
def check_conflicts(context):
return context.cfg.get(CHECKCONFLICTS, default=Defaults.check_conflicts)
def display_untracked(context):
return context.cfg.get(DISPLAY_UNTRACKED,
default=Defaults.display_untracked)
def editor(context):
app = context.cfg.get(EDITOR, default=Defaults.editor)
return _remap_editor(app)
def background_editor(context):
app = context.cfg.get(BACKGROUND_EDITOR, default=editor(context))
return _remap_editor(app)
def _remap_editor(app):
return {'vim': 'gvim -f'}.get(app, app)
def comment_char(context):
return context.cfg.get(COMMENT_CHAR, default=Defaults.comment_char)
def default_history_browser():
if utils.is_win32():
# On Windows, a sensible default is "python git-cola dag"
# which is different than `gitk` below, but is preferred
# because we don't have to guess paths.
git_cola = sys.argv[0].replace('\\', '/')
python = sys.executable.replace('\\', '/')
cwd = core.getcwd().replace('\\', '/')
argv = [python, git_cola, 'dag', '--repo', cwd]
argv = core.prep_for_subprocess(argv)
default = core.list2cmdline(argv)
else:
# The `gitk` script can be launched as-is on unix
default = Defaults.history_browser
return default
def history_browser(context):
default = default_history_browser()
return context.cfg.get(HISTORY_BROWSER, default=default)
def linebreak(context):
return context.cfg.get(LINEBREAK, default=Defaults.linebreak)
def maxrecent(context):
value = Defaults.maxrecent
if context:
value = context.cfg.get(MAXRECENT, default=value)
return value
def spellcheck(context):
return context.cfg.get(SPELL_CHECK, default=Defaults.spellcheck)
def expandtab(context):
return context.cfg.get(EXPANDTAB, default=Defaults.expandtab)
def sort_bookmarks(context):
return context.cfg.get(SORT_BOOKMARKS, default=Defaults.sort_bookmarks)
def tabwidth(context):
return context.cfg.get(TABWIDTH, default=Defaults.tabwidth)
def textwidth(context):
return context.cfg.get(TEXTWIDTH, default=Defaults.textwidth)
def status_indent(context):
return context.cfg.get(STATUS_INDENT, default=Defaults.status_indent)
def status_show_totals(context):
return context.cfg.get(STATUS_SHOW_TOTALS,
default=Defaults.status_show_totals)
class PreferencesModel(observable.Observable):
message_config_updated = 'config_updated'
def __init__(self, context):
observable.Observable.__init__(self)
self.context = context
self.config = context.cfg
def set_config(self, source, config, value):
if source == 'repo':
self.config.set_repo(config, value)
else:
self.config.set_user(config, value)
message = self.message_config_updated
self.notify_observers(message, source, config, value)
def get_config(self, source, config):
if source == 'repo':
value = self.config.get_repo(config)
else:
value = self.config.get(config)
return value
class SetConfig(Command):
"""Store a gitconfig value"""
UNDOABLE = True
def __init__(self, model, source, config, value):
self.source = source
self.config = config
self.value = value
self.old_value = None
self.model = model
def do(self):
self.old_value = self.model.get_config(self.source, self.config)
self.model.set_config(self.source, self.config, self.value)
def undo(self):
if self.old_value is None:
return
self.model.set_config(self.source, self.config, self.old_value)
| gpl-2.0 | -2,676,137,785,137,438,700 | 26.477273 | 76 | 0.67957 | false |
debugger06/MiroX | tv/osx/plat/frontends/widgets/drawingwidgets.py | 3 | 3025 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""drawingviews.py -- views that support custom drawing."""
from miro.plat.frontends.widgets import wrappermap
from miro.plat.frontends.widgets import drawing
from miro.plat.frontends.widgets.base import Widget, SimpleBin, FlippedView
from miro.plat.frontends.widgets.layoutmanager import LayoutManager
class DrawingView(FlippedView):
def init(self):
self = super(DrawingView, self).init()
self.layout_manager = LayoutManager()
return self
def isOpaque(self):
return wrappermap.wrapper(self).is_opaque()
def drawRect_(self, rect):
context = drawing.DrawingContext(self, self.bounds(), rect)
context.style = drawing.DrawingStyle()
wrappermap.wrapper(self).draw(context, self.layout_manager)
class DrawingArea(drawing.DrawingMixin, Widget):
"""See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class."""
def __init__(self):
Widget.__init__(self)
self.view = DrawingView.alloc().init()
class Background(drawing.DrawingMixin, SimpleBin):
"""See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class."""
def __init__(self):
SimpleBin.__init__(self)
self.view = DrawingView.alloc().init()
def calc_size_request(self):
drawing_size = drawing.DrawingMixin.calc_size_request(self)
container_size = SimpleBin.calc_size_request(self)
return (max(container_size[0], drawing_size[0]),
max(container_size[1], drawing_size[1]))
| gpl-2.0 | 4,768,326,888,688,978,000 | 44.149254 | 119 | 0.728264 | false |
openstack/neutron | neutron/objects/quota.py | 2 | 5331 | # Copyright (c) 2016 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.objects import common_types
from oslo_versionedobjects import fields as obj_fields
import sqlalchemy as sa
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from neutron.db.quota import models
from neutron.objects import base
@base.NeutronObjectRegistry.register
class ResourceDelta(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.ResourceDelta
primary_keys = ['resource', 'reservation_id']
foreign_keys = {'Reservation': {'reservation_id': 'id'}}
fields = {
'resource': obj_fields.StringField(),
'reservation_id': common_types.UUIDField(),
'amount': obj_fields.IntegerField(nullable=True),
}
@base.NeutronObjectRegistry.register
class Reservation(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.Reservation
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(nullable=True),
'expiration': obj_fields.DateTimeField(tzinfo_aware=False,
nullable=True),
'resource_deltas': obj_fields.ListOfObjectsField(
ResourceDelta.__name__, nullable=True),
}
synthetic_fields = ['resource_deltas']
def create(self):
deltas = self.resource_deltas
with self.db_context_writer(self.obj_context):
super(Reservation, self).create()
if deltas:
for delta in deltas:
delta.reservation_id = self.id
delta.create()
self.resource_deltas.append(delta)
self.obj_reset_changes(['resource_deltas'])
@classmethod
def delete_expired(cls, context, now, project_id):
resv_query = context.session.query(models.Reservation)
if project_id:
project_expr = (models.Reservation.project_id == project_id)
else:
project_expr = sql.true()
# TODO(manjeets) Fetch and delete objects using
# object/db/api.py once comparison operations are
# supported
resv_query = resv_query.filter(sa.and_(
project_expr, models.Reservation.expiration < now))
return resv_query.delete()
@classmethod
def get_total_reservations_map(cls, context, now, project_id,
resources, expired):
if not resources:
return
resv_query = context.session.query(
models.ResourceDelta.resource,
models.Reservation.expiration,
sql.func.cast(
sql.func.sum(models.ResourceDelta.amount),
sqltypes.Integer)).join(
models.Reservation)
if expired:
exp_expr = (models.Reservation.expiration < now)
else:
exp_expr = (models.Reservation.expiration >= now)
resv_query = resv_query.filter(sa.and_(
models.Reservation.project_id == project_id,
models.ResourceDelta.resource.in_(resources),
exp_expr)).group_by(
models.ResourceDelta.resource,
models.Reservation.expiration)
return dict((resource, total_reserved)
for (resource, exp, total_reserved) in resv_query)
@base.NeutronObjectRegistry.register
class Quota(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.Quota
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(nullable=True),
'resource': obj_fields.StringField(nullable=True),
'limit': obj_fields.IntegerField(nullable=True),
}
@base.NeutronObjectRegistry.register
class QuotaUsage(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.QuotaUsage
primary_keys = ['resource', 'project_id']
fields = {
'resource': obj_fields.StringField(),
'project_id': obj_fields.StringField(),
'dirty': obj_fields.BooleanField(default=False),
'in_use': obj_fields.IntegerField(default=0),
'reserved': obj_fields.IntegerField(default=0),
}
@classmethod
def get_object_dirty_protected(cls, context, **kwargs):
query = context.session.query(cls.db_model)
query = query.filter_by(**cls.modify_fields_to_db(kwargs))
# NOTE(manjeets) as lock mode was just for protecting dirty bits
# an update on dirty will prevent the race.
query.filter_by(dirty=True).update({'dirty': True})
res = query.first()
if res:
return cls._load_object(context, res)
| apache-2.0 | 7,260,780,459,108,543,000 | 33.843137 | 78 | 0.633089 | false |
Gazzonyx/samba | third_party/waf/wafadmin/Task.py | 32 | 34311 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
Running tasks in parallel is a simple problem, but in practice it is more complicated:
* dependencies discovered during the build (dynamic task creation)
* dependencies discovered after files are compiled
* the amount of tasks and dependencies (graph size) can be huge
This is why the dependency management is split on three different levels:
1. groups of tasks that run all after another group of tasks
2. groups of tasks that can be run in parallel
3. tasks that can run in parallel, but with possible unknown ad-hoc dependencies
The point #1 represents a strict sequential order between groups of tasks, for example a compiler is produced
and used to compile the rest, whereas #2 and #3 represent partial order constraints where #2 applies to the kind of task
and #3 applies to the task instances.
#1 is held by the task manager: ordered list of TaskGroups (see bld.add_group)
#2 is held by the task groups and the task types: precedence after/before (topological sort),
and the constraints extracted from file extensions
#3 is held by the tasks individually (attribute run_after),
and the scheduler (Runner.py) use Task::runnable_status to reorder the tasks
--
To try, use something like this in your code:
import Constants, Task
Task.algotype = Constants.MAXPARALLEL
--
There are two concepts with the tasks (individual units of change):
* dependency (if 1 is recompiled, recompile 2)
* order (run 2 after 1)
example 1: if t1 depends on t2 and t2 depends on t3 it is not necessary to make t1 depend on t3 (dependency is transitive)
example 2: if t1 depends on a node produced by t2, it is not immediately obvious that t1 must run after t2 (order is not obvious)
The role of the Task Manager is to give the tasks in order (groups of task that may be run in parallel one after the other)
"""
import os, shutil, sys, re, random, datetime, tempfile, shlex
from Utils import md5
import Build, Runner, Utils, Node, Logs, Options
from Logs import debug, warn, error
from Constants import *
algotype = NORMAL
#algotype = JOBCONTROL
#algotype = MAXPARALLEL
COMPILE_TEMPLATE_SHELL = '''
def f(task):
env = task.env
wd = getattr(task, 'cwd', None)
p = env.get_flat
cmd = \'\'\' %s \'\'\' % s
return task.exec_command(cmd, cwd=wd)
'''
COMPILE_TEMPLATE_NOSHELL = '''
def f(task):
env = task.env
wd = getattr(task, 'cwd', None)
def to_list(xx):
if isinstance(xx, str): return [xx]
return xx
lst = []
%s
lst = [x for x in lst if x]
return task.exec_command(lst, cwd=wd)
'''
"""
Enable different kind of dependency algorithms:
1 make groups: first compile all cpps and then compile all links (NORMAL)
2 parallelize all (each link task run after its dependencies) (MAXPARALLEL)
3 like 1 but provide additional constraints for the parallelization (MAXJOBS)
In theory 1. will be faster than 2 for waf, but might be slower for builds
The scheme 2 will not allow for running tasks one by one so it can cause disk thrashing on huge builds
"""
file_deps = Utils.nada
"""
Additional dependency pre-check may be added by replacing the function file_deps.
e.g. extract_outputs, extract_deps below.
"""
class TaskManager(object):
"""The manager is attached to the build object, it holds a list of TaskGroup"""
def __init__(self):
self.groups = []
self.tasks_done = []
self.current_group = 0
self.groups_names = {}
def group_name(self, g):
"""name for the group g (utility)"""
if not isinstance(g, TaskGroup):
g = self.groups[g]
for x in self.groups_names:
if id(self.groups_names[x]) == id(g):
return x
return ''
def group_idx(self, tg):
"""group the task generator tg is in"""
se = id(tg)
for i in range(len(self.groups)):
g = self.groups[i]
for t in g.tasks_gen:
if id(t) == se:
return i
return None
def get_next_set(self):
"""return the next set of tasks to execute
the first parameter is the maximum amount of parallelization that may occur"""
ret = None
while not ret and self.current_group < len(self.groups):
ret = self.groups[self.current_group].get_next_set()
if ret: return ret
else:
self.groups[self.current_group].process_install()
self.current_group += 1
return (None, None)
def add_group(self, name=None, set=True):
#if self.groups and not self.groups[0].tasks:
# error('add_group: an empty group is already present')
g = TaskGroup()
if name and name in self.groups_names:
error('add_group: name %s already present' % name)
self.groups_names[name] = g
self.groups.append(g)
if set:
self.current_group = len(self.groups) - 1
def set_group(self, idx):
if isinstance(idx, str):
g = self.groups_names[idx]
for x in xrange(len(self.groups)):
if id(g) == id(self.groups[x]):
self.current_group = x
else:
self.current_group = idx
def add_task_gen(self, tgen):
if not self.groups: self.add_group()
self.groups[self.current_group].tasks_gen.append(tgen)
def add_task(self, task):
if not self.groups: self.add_group()
self.groups[self.current_group].tasks.append(task)
def total(self):
total = 0
if not self.groups: return 0
for group in self.groups:
total += len(group.tasks)
return total
def add_finished(self, tsk):
self.tasks_done.append(tsk)
bld = tsk.generator.bld
if bld.is_install:
f = None
if 'install' in tsk.__dict__:
f = tsk.__dict__['install']
# install=0 to prevent installation
if f: f(tsk)
else:
tsk.install()
class TaskGroup(object):
"the compilation of one group does not begin until the previous group has finished (in the manager)"
def __init__(self):
self.tasks = [] # this list will be consumed
self.tasks_gen = []
self.cstr_groups = Utils.DefaultDict(list) # tasks having equivalent constraints
self.cstr_order = Utils.DefaultDict(set) # partial order between the cstr groups
self.temp_tasks = [] # tasks put on hold
self.ready = 0
self.post_funs = []
def reset(self):
"clears the state of the object (put back the tasks into self.tasks)"
for x in self.cstr_groups:
self.tasks += self.cstr_groups[x]
self.tasks = self.temp_tasks + self.tasks
self.temp_tasks = []
self.cstr_groups = Utils.DefaultDict(list)
self.cstr_order = Utils.DefaultDict(set)
self.ready = 0
def process_install(self):
for (f, k, kw) in self.post_funs:
f(*k, **kw)
def prepare(self):
"prepare the scheduling"
self.ready = 1
file_deps(self.tasks)
self.make_cstr_groups()
self.extract_constraints()
def get_next_set(self):
"next list of tasks to execute using max job settings, returns (maxjobs, task_list)"
global algotype
if algotype == NORMAL:
tasks = self.tasks_in_parallel()
maxj = MAXJOBS
elif algotype == JOBCONTROL:
(maxj, tasks) = self.tasks_by_max_jobs()
elif algotype == MAXPARALLEL:
tasks = self.tasks_with_inner_constraints()
maxj = MAXJOBS
else:
raise Utils.WafError("unknown algorithm type %s" % (algotype))
if not tasks: return ()
return (maxj, tasks)
def make_cstr_groups(self):
"unite the tasks that have similar constraints"
self.cstr_groups = Utils.DefaultDict(list)
for x in self.tasks:
h = x.hash_constraints()
self.cstr_groups[h].append(x)
def set_order(self, a, b):
self.cstr_order[a].add(b)
def compare_exts(self, t1, t2):
"extension production"
x = "ext_in"
y = "ext_out"
in_ = t1.attr(x, ())
out_ = t2.attr(y, ())
for k in in_:
if k in out_:
return -1
in_ = t2.attr(x, ())
out_ = t1.attr(y, ())
for k in in_:
if k in out_:
return 1
return 0
def compare_partial(self, t1, t2):
"partial relations after/before"
m = "after"
n = "before"
name = t2.__class__.__name__
if name in Utils.to_list(t1.attr(m, ())): return -1
elif name in Utils.to_list(t1.attr(n, ())): return 1
name = t1.__class__.__name__
if name in Utils.to_list(t2.attr(m, ())): return 1
elif name in Utils.to_list(t2.attr(n, ())): return -1
return 0
def extract_constraints(self):
"extract the parallelization constraints from the tasks with different constraints"
keys = self.cstr_groups.keys()
max = len(keys)
# hopefully the length of this list is short
for i in xrange(max):
t1 = self.cstr_groups[keys[i]][0]
for j in xrange(i + 1, max):
t2 = self.cstr_groups[keys[j]][0]
# add the constraints based on the comparisons
val = (self.compare_exts(t1, t2)
or self.compare_partial(t1, t2)
)
if val > 0:
self.set_order(keys[i], keys[j])
elif val < 0:
self.set_order(keys[j], keys[i])
def tasks_in_parallel(self):
"(NORMAL) next list of tasks that may be executed in parallel"
if not self.ready: self.prepare()
keys = self.cstr_groups.keys()
unconnected = []
remainder = []
for u in keys:
for k in self.cstr_order.values():
if u in k:
remainder.append(u)
break
else:
unconnected.append(u)
toreturn = []
for y in unconnected:
toreturn.extend(self.cstr_groups[y])
# remove stuff only after
for y in unconnected:
try: self.cstr_order.__delitem__(y)
except KeyError: pass
self.cstr_groups.__delitem__(y)
if not toreturn and remainder:
raise Utils.WafError("circular order constraint detected %r" % remainder)
return toreturn
def tasks_by_max_jobs(self):
"(JOBCONTROL) returns the tasks that can run in parallel with the max amount of jobs"
if not self.ready: self.prepare()
if not self.temp_tasks: self.temp_tasks = self.tasks_in_parallel()
if not self.temp_tasks: return (None, None)
maxjobs = MAXJOBS
ret = []
remaining = []
for t in self.temp_tasks:
m = getattr(t, "maxjobs", getattr(self.__class__, "maxjobs", MAXJOBS))
if m > maxjobs:
remaining.append(t)
elif m < maxjobs:
remaining += ret
ret = [t]
maxjobs = m
else:
ret.append(t)
self.temp_tasks = remaining
return (maxjobs, ret)
def tasks_with_inner_constraints(self):
"""(MAXPARALLEL) returns all tasks in this group, but add the constraints on each task instance
as an optimization, it might be desirable to discard the tasks which do not have to run"""
if not self.ready: self.prepare()
if getattr(self, "done", None): return None
for p in self.cstr_order:
for v in self.cstr_order[p]:
for m in self.cstr_groups[p]:
for n in self.cstr_groups[v]:
n.set_run_after(m)
self.cstr_order = Utils.DefaultDict(set)
self.cstr_groups = Utils.DefaultDict(list)
self.done = 1
return self.tasks[:] # make a copy
class store_task_type(type):
"store the task types that have a name ending in _task into a map (remember the existing task types)"
def __init__(cls, name, bases, dict):
super(store_task_type, cls).__init__(name, bases, dict)
name = cls.__name__
if name.endswith('_task'):
name = name.replace('_task', '')
if name != 'TaskBase':
TaskBase.classes[name] = cls
class TaskBase(object):
"""Base class for all Waf tasks
The most important methods are (by usual order of call):
1 runnable_status: ask the task if it should be run, skipped, or if we have to ask later
2 __str__: string to display to the user
3 run: execute the task
4 post_run: after the task is run, update the cache about the task
This class should be seen as an interface, it provides the very minimum necessary for the scheduler
so it does not do much.
For illustration purposes, TaskBase instances try to execute self.fun (if provided)
"""
__metaclass__ = store_task_type
color = "GREEN"
maxjobs = MAXJOBS
classes = {}
stat = None
def __init__(self, *k, **kw):
self.hasrun = NOT_RUN
try:
self.generator = kw['generator']
except KeyError:
self.generator = self
self.bld = Build.bld
if kw.get('normal', 1):
self.generator.bld.task_manager.add_task(self)
def __repr__(self):
"used for debugging"
return '\n\t{task: %s %s}' % (self.__class__.__name__, str(getattr(self, "fun", "")))
def __str__(self):
"string to display to the user"
if hasattr(self, 'fun'):
return 'executing: %s\n' % self.fun.__name__
return self.__class__.__name__ + '\n'
def exec_command(self, *k, **kw):
"use this for executing commands from tasks"
# TODO in waf 1.6, eliminate bld.exec_command, and move the cwd processing to here
if self.env['env']:
kw['env'] = self.env['env']
return self.generator.bld.exec_command(*k, **kw)
def runnable_status(self):
"RUN_ME SKIP_ME or ASK_LATER"
return RUN_ME
def can_retrieve_cache(self):
return False
def call_run(self):
if self.can_retrieve_cache():
return 0
return self.run()
def run(self):
"called if the task must run"
if hasattr(self, 'fun'):
return self.fun(self)
return 0
def post_run(self):
"update the dependency tree (node stats)"
pass
def display(self):
"print either the description (using __str__) or the progress bar or the ide output"
col1 = Logs.colors(self.color)
col2 = Logs.colors.NORMAL
if Options.options.progress_bar == 1:
return self.generator.bld.progress_line(self.position[0], self.position[1], col1, col2)
if Options.options.progress_bar == 2:
ela = Utils.get_elapsed_time(self.generator.bld.ini)
try:
ins = ','.join([n.name for n in self.inputs])
except AttributeError:
ins = ''
try:
outs = ','.join([n.name for n in self.outputs])
except AttributeError:
outs = ''
return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (self.position[1], self.position[0], ins, outs, ela)
total = self.position[1]
n = len(str(total))
fs = '[%%%dd/%%%dd] %%s%%s%%s' % (n, n)
return fs % (self.position[0], self.position[1], col1, str(self), col2)
def attr(self, att, default=None):
"retrieve an attribute from the instance or from the class (microoptimization here)"
ret = getattr(self, att, self)
if ret is self: return getattr(self.__class__, att, default)
return ret
def hash_constraints(self):
"identify a task type for all the constraints relevant for the scheduler: precedence, file production"
a = self.attr
sum = hash((self.__class__.__name__,
str(a('before', '')),
str(a('after', '')),
str(a('ext_in', '')),
str(a('ext_out', '')),
self.__class__.maxjobs))
return sum
def format_error(self):
"error message to display to the user (when a build fails)"
if getattr(self, "err_msg", None):
return self.err_msg
elif self.hasrun == CRASHED:
try:
return " -> task failed (err #%d): %r" % (self.err_code, self)
except AttributeError:
return " -> task failed: %r" % self
elif self.hasrun == MISSING:
return " -> missing files: %r" % self
else:
return ''
def install(self):
"""
installation is performed by looking at the task attributes:
* install_path: installation path like "${PREFIX}/bin"
* filename: install the first node in the outputs as a file with a particular name, be certain to give os.sep
* chmod: permissions
"""
bld = self.generator.bld
d = self.attr('install')
if self.attr('install_path'):
lst = [a.relpath_gen(bld.srcnode) for a in self.outputs]
perm = self.attr('chmod', O644)
if self.attr('src'):
# if src is given, install the sources too
lst += [a.relpath_gen(bld.srcnode) for a in self.inputs]
if self.attr('filename'):
dir = self.install_path.rstrip(os.sep) + os.sep + self.attr('filename')
bld.install_as(dir, lst[0], self.env, perm)
else:
bld.install_files(self.install_path, lst, self.env, perm)
class Task(TaskBase):
"""The parent class is quite limited, in this version:
* file system interaction: input and output nodes
* persistence: do not re-execute tasks that have already run
* caching: same files can be saved and retrieved from a cache directory
* dependencies:
implicit, like .c files depending on .h files
explicit, like the input nodes or the dep_nodes
environment variables, like the CXXFLAGS in self.env
"""
vars = []
def __init__(self, env, **kw):
TaskBase.__init__(self, **kw)
self.env = env
# inputs and outputs are nodes
# use setters when possible
self.inputs = []
self.outputs = []
self.dep_nodes = []
self.run_after = []
# Additionally, you may define the following
#self.dep_vars = 'PREFIX DATADIR'
def __str__(self):
"string to display to the user"
env = self.env
src_str = ' '.join([a.nice_path(env) for a in self.inputs])
tgt_str = ' '.join([a.nice_path(env) for a in self.outputs])
if self.outputs: sep = ' -> '
else: sep = ''
return '%s: %s%s%s\n' % (self.__class__.__name__.replace('_task', ''), src_str, sep, tgt_str)
def __repr__(self):
return "".join(['\n\t{task: ', self.__class__.__name__, " ", ",".join([x.name for x in self.inputs]), " -> ", ",".join([x.name for x in self.outputs]), '}'])
def unique_id(self):
"get a unique id: hash the node paths, the variant, the class, the function"
try:
return self.uid
except AttributeError:
"this is not a real hot zone, but we want to avoid surprizes here"
m = md5()
up = m.update
up(self.__class__.__name__)
up(self.env.variant())
p = None
for x in self.inputs + self.outputs:
if p != x.parent.id:
p = x.parent.id
up(x.parent.abspath())
up(x.name)
self.uid = m.digest()
return self.uid
def set_inputs(self, inp):
if isinstance(inp, list): self.inputs += inp
else: self.inputs.append(inp)
def set_outputs(self, out):
if isinstance(out, list): self.outputs += out
else: self.outputs.append(out)
def set_run_after(self, task):
"set (scheduler) order on another task"
# TODO: handle list or object
assert isinstance(task, TaskBase)
self.run_after.append(task)
def add_file_dependency(self, filename):
"TODO user-provided file dependencies"
node = self.generator.bld.path.find_resource(filename)
self.dep_nodes.append(node)
def signature(self):
# compute the result one time, and suppose the scan_signature will give the good result
try: return self.cache_sig[0]
except AttributeError: pass
self.m = md5()
# explicit deps
exp_sig = self.sig_explicit_deps()
# env vars
var_sig = self.sig_vars()
# implicit deps
imp_sig = SIG_NIL
if self.scan:
try:
imp_sig = self.sig_implicit_deps()
except ValueError:
return self.signature()
# we now have the signature (first element) and the details (for debugging)
ret = self.m.digest()
self.cache_sig = (ret, exp_sig, imp_sig, var_sig)
return ret
def runnable_status(self):
"SKIP_ME RUN_ME or ASK_LATER"
#return 0 # benchmarking
if self.inputs and (not self.outputs):
if not getattr(self.__class__, 'quiet', None):
warn("invalid task (no inputs OR outputs): override in a Task subclass or set the attribute 'quiet' %r" % self)
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
env = self.env
bld = self.generator.bld
# first compute the signature
new_sig = self.signature()
# compare the signature to a signature computed previously
key = self.unique_id()
try:
prev_sig = bld.task_sigs[key][0]
except KeyError:
debug("task: task %r must run as it was never run before or the task code changed", self)
return RUN_ME
# compare the signatures of the outputs
for node in self.outputs:
variant = node.variant(env)
try:
if bld.node_sigs[variant][node.id] != new_sig:
return RUN_ME
except KeyError:
debug("task: task %r must run as the output nodes do not exist", self)
return RUN_ME
# debug if asked to
if Logs.verbose: self.debug_why(bld.task_sigs[key])
if new_sig != prev_sig:
return RUN_ME
return SKIP_ME
def post_run(self):
"called after a successful task run"
bld = self.generator.bld
env = self.env
sig = self.signature()
ssig = sig.encode('hex')
variant = env.variant()
for node in self.outputs:
# check if the node exists ..
try:
os.stat(node.abspath(env))
except OSError:
self.hasrun = MISSING
self.err_msg = '-> missing file: %r' % node.abspath(env)
raise Utils.WafError
# important, store the signature for the next run
bld.node_sigs[variant][node.id] = sig
bld.task_sigs[self.unique_id()] = self.cache_sig
# file caching, if possible
# try to avoid data corruption as much as possible
if not Options.cache_global or Options.options.nocache or not self.outputs:
return None
if getattr(self, 'cached', None):
return None
dname = os.path.join(Options.cache_global, ssig)
tmpdir = tempfile.mkdtemp(prefix=Options.cache_global + os.sep + 'waf')
try:
shutil.rmtree(dname)
except:
pass
try:
i = 0
for node in self.outputs:
variant = node.variant(env)
dest = os.path.join(tmpdir, str(i) + node.name)
shutil.copy2(node.abspath(env), dest)
i += 1
except (OSError, IOError):
try:
shutil.rmtree(tmpdir)
except:
pass
else:
try:
os.rename(tmpdir, dname)
except OSError:
try:
shutil.rmtree(tmpdir)
except:
pass
else:
try:
os.chmod(dname, O755)
except:
pass
def can_retrieve_cache(self):
"""
Retrieve build nodes from the cache
update the file timestamps to help cleaning the least used entries from the cache
additionally, set an attribute 'cached' to avoid re-creating the same cache files
suppose there are files in cache/dir1/file1 and cache/dir2/file2
first, read the timestamp of dir1
then try to copy the files
then look at the timestamp again, if it has changed, the data may have been corrupt (cache update by another process)
should an exception occur, ignore the data
"""
if not Options.cache_global or Options.options.nocache or not self.outputs:
return None
env = self.env
sig = self.signature()
ssig = sig.encode('hex')
# first try to access the cache folder for the task
dname = os.path.join(Options.cache_global, ssig)
try:
t1 = os.stat(dname).st_mtime
except OSError:
return None
i = 0
for node in self.outputs:
variant = node.variant(env)
orig = os.path.join(dname, str(i) + node.name)
try:
shutil.copy2(orig, node.abspath(env))
# mark the cache file as used recently (modified)
os.utime(orig, None)
except (OSError, IOError):
debug('task: failed retrieving file')
return None
i += 1
# is it the same folder?
try:
t2 = os.stat(dname).st_mtime
except OSError:
return None
if t1 != t2:
return None
for node in self.outputs:
self.generator.bld.node_sigs[variant][node.id] = sig
if Options.options.progress_bar < 1:
self.generator.bld.printout('restoring from cache %r\n' % node.bldpath(env))
self.cached = True
return 1
def debug_why(self, old_sigs):
"explains why a task is run"
new_sigs = self.cache_sig
def v(x):
return x.encode('hex')
debug("Task %r", self)
msgs = ['Task must run', '* Source file or manual dependency', '* Implicit dependency', '* Environment variable']
tmp = 'task: -> %s: %s %s'
for x in xrange(len(msgs)):
if (new_sigs[x] != old_sigs[x]):
debug(tmp, msgs[x], v(old_sigs[x]), v(new_sigs[x]))
def sig_explicit_deps(self):
bld = self.generator.bld
up = self.m.update
# the inputs
for x in self.inputs + getattr(self, 'dep_nodes', []):
if not x.parent.id in bld.cache_scanned_folders:
bld.rescan(x.parent)
variant = x.variant(self.env)
try:
up(bld.node_sigs[variant][x.id])
except KeyError:
raise Utils.WafError('Missing node signature for %r (required by %r)' % (x, self))
# manual dependencies, they can slow down the builds
if bld.deps_man:
additional_deps = bld.deps_man
for x in self.inputs + self.outputs:
try:
d = additional_deps[x.id]
except KeyError:
continue
for v in d:
if isinstance(v, Node.Node):
bld.rescan(v.parent)
variant = v.variant(self.env)
try:
v = bld.node_sigs[variant][v.id]
except KeyError:
raise Utils.WafError('Missing node signature for %r (required by %r)' % (v, self))
elif hasattr(v, '__call__'):
v = v() # dependency is a function, call it
up(v)
for x in self.dep_nodes:
v = bld.node_sigs[x.variant(self.env)][x.id]
up(v)
return self.m.digest()
def sig_vars(self):
bld = self.generator.bld
env = self.env
# dependencies on the environment vars
act_sig = bld.hash_env_vars(env, self.__class__.vars)
self.m.update(act_sig)
# additional variable dependencies, if provided
dep_vars = getattr(self, 'dep_vars', None)
if dep_vars:
self.m.update(bld.hash_env_vars(env, dep_vars))
return self.m.digest()
#def scan(self, node):
# """this method returns a tuple containing:
# * a list of nodes corresponding to real files
# * a list of names for files not found in path_lst
# the input parameters may have more parameters that the ones used below
# """
# return ((), ())
scan = None
# compute the signature, recompute it if there is no match in the cache
def sig_implicit_deps(self):
"the signature obtained may not be the one if the files have changed, we do it in two steps"
bld = self.generator.bld
# get the task signatures from previous runs
key = self.unique_id()
prev_sigs = bld.task_sigs.get(key, ())
if prev_sigs:
try:
# for issue #379
if prev_sigs[2] == self.compute_sig_implicit_deps():
return prev_sigs[2]
except (KeyError, OSError):
pass
del bld.task_sigs[key]
raise ValueError('rescan')
# no previous run or the signature of the dependencies has changed, rescan the dependencies
(nodes, names) = self.scan()
if Logs.verbose:
debug('deps: scanner for %s returned %s %s', str(self), str(nodes), str(names))
# store the dependencies in the cache
bld.node_deps[key] = nodes
bld.raw_deps[key] = names
# recompute the signature and return it
try:
sig = self.compute_sig_implicit_deps()
except KeyError:
try:
nodes = []
for k in bld.node_deps.get(self.unique_id(), []):
if k.id & 3 == 2: # Node.FILE:
if not k.id in bld.node_sigs[0]:
nodes.append(k)
else:
if not k.id in bld.node_sigs[self.env.variant()]:
nodes.append(k)
except:
nodes = '?'
raise Utils.WafError('Missing node signature for %r (for implicit dependencies %r)' % (nodes, self))
return sig
def compute_sig_implicit_deps(self):
"""it is intended for .cpp and inferred .h files
there is a single list (no tree traversal)
this is the hot spot so ... do not touch"""
upd = self.m.update
bld = self.generator.bld
tstamp = bld.node_sigs
env = self.env
for k in bld.node_deps.get(self.unique_id(), []):
# unlikely but necessary if it happens
if not k.parent.id in bld.cache_scanned_folders:
# if the parent folder is removed, an OSError may be thrown
bld.rescan(k.parent)
# if the parent folder is removed, a KeyError will be thrown
if k.id & 3 == 2: # Node.FILE:
upd(tstamp[0][k.id])
else:
upd(tstamp[env.variant()][k.id])
return self.m.digest()
def funex(c):
dc = {}
exec(c, dc)
return dc['f']
reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})", re.M)
def compile_fun_shell(name, line):
"""Compiles a string (once) into a function, eg:
simple_task_type('c++', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}')
The env variables (CXX, ..) on the task must not hold dicts (order)
The reserved keywords TGT and SRC represent the task input and output nodes
quick test:
bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"')
"""
extr = []
def repl(match):
g = match.group
if g('dollar'): return "$"
elif g('backslash'): return '\\\\'
elif g('subst'): extr.append((g('var'), g('code'))); return "%s"
return None
line = reg_act.sub(repl, line) or line
parm = []
dvars = []
app = parm.append
for (var, meth) in extr:
if var == 'SRC':
if meth: app('task.inputs%s' % meth)
else: app('" ".join([a.srcpath(env) for a in task.inputs])')
elif var == 'TGT':
if meth: app('task.outputs%s' % meth)
else: app('" ".join([a.bldpath(env) for a in task.outputs])')
else:
if not var in dvars: dvars.append(var)
app("p('%s')" % var)
if parm: parm = "%% (%s) " % (',\n\t\t'.join(parm))
else: parm = ''
c = COMPILE_TEMPLATE_SHELL % (line, parm)
debug('action: %s', c)
return (funex(c), dvars)
def compile_fun_noshell(name, line):
extr = []
def repl(match):
g = match.group
if g('dollar'): return "$"
elif g('subst'): extr.append((g('var'), g('code'))); return "<<|@|>>"
return None
line2 = reg_act.sub(repl, line)
params = line2.split('<<|@|>>')
buf = []
dvars = []
app = buf.append
for x in xrange(len(extr)):
params[x] = params[x].strip()
if params[x]:
app("lst.extend(%r)" % params[x].split())
(var, meth) = extr[x]
if var == 'SRC':
if meth: app('lst.append(task.inputs%s)' % meth)
else: app("lst.extend([a.srcpath(env) for a in task.inputs])")
elif var == 'TGT':
if meth: app('lst.append(task.outputs%s)' % meth)
else: app("lst.extend([a.bldpath(env) for a in task.outputs])")
else:
app('lst.extend(to_list(env[%r]))' % var)
if not var in dvars: dvars.append(var)
if params[-1]:
app("lst.extend(%r)" % shlex.split(params[-1]))
fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf)
debug('action: %s', fun)
return (funex(fun), dvars)
def compile_fun(name, line, shell=None):
"commands can be launched by the shell or not"
if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0:
shell = True
#else:
# shell = False
if shell is None:
if sys.platform == 'win32':
shell = False
else:
shell = True
if shell:
return compile_fun_shell(name, line)
else:
return compile_fun_noshell(name, line)
def simple_task_type(name, line, color='GREEN', vars=[], ext_in=[], ext_out=[], before=[], after=[], shell=None):
"""return a new Task subclass with the function run compiled from the line given"""
(fun, dvars) = compile_fun(name, line, shell)
fun.code = line
return task_type_from_func(name, fun, vars or dvars, color, ext_in, ext_out, before, after)
def task_type_from_func(name, func, vars=[], color='GREEN', ext_in=[], ext_out=[], before=[], after=[]):
"""return a new Task subclass with the function run compiled from the line given"""
params = {
'run': func,
'vars': vars,
'color': color,
'name': name,
'ext_in': Utils.to_list(ext_in),
'ext_out': Utils.to_list(ext_out),
'before': Utils.to_list(before),
'after': Utils.to_list(after),
}
cls = type(Task)(name, (Task,), params)
TaskBase.classes[name] = cls
return cls
def always_run(cls):
"""Set all task instances of this class to be executed whenever a build is started
The task signature is calculated, but the result of the comparation between
task signatures is bypassed
"""
old = cls.runnable_status
def always(self):
ret = old(self)
if ret == SKIP_ME:
return RUN_ME
return ret
cls.runnable_status = always
def update_outputs(cls):
"""When a command is always run, it is possible that the output only change
sometimes. By default the build node have as a hash the signature of the task
which may not change. With this, the output nodes (produced) are hashed,
and the hashes are set to the build nodes
This may avoid unnecessary recompilations, but it uses more resources
(hashing the output files) so it is not used by default
"""
old_post_run = cls.post_run
def post_run(self):
old_post_run(self)
bld = self.generator.bld
for output in self.outputs:
bld.node_sigs[self.env.variant()][output.id] = Utils.h_file(output.abspath(self.env))
bld.task_sigs[output.id] = self.unique_id()
cls.post_run = post_run
old_runnable_status = cls.runnable_status
def runnable_status(self):
status = old_runnable_status(self)
if status != RUN_ME:
return status
uid = self.unique_id()
try:
bld = self.outputs[0].__class__.bld
new_sig = self.signature()
prev_sig = bld.task_sigs[uid][0]
if prev_sig == new_sig:
for x in self.outputs:
if not x.id in bld.node_sigs[self.env.variant()]:
return RUN_ME
if bld.task_sigs[x.id] != uid: # ensure the outputs are associated with *this* task
return RUN_ME
return SKIP_ME
except KeyError:
pass
except IndexError:
pass
return RUN_ME
cls.runnable_status = runnable_status
def extract_outputs(tasks):
"""file_deps: Infer additional dependencies from task input and output nodes
"""
v = {}
for x in tasks:
try:
(ins, outs) = v[x.env.variant()]
except KeyError:
ins = {}
outs = {}
v[x.env.variant()] = (ins, outs)
for a in getattr(x, 'inputs', []):
try: ins[a.id].append(x)
except KeyError: ins[a.id] = [x]
for a in getattr(x, 'outputs', []):
try: outs[a.id].append(x)
except KeyError: outs[a.id] = [x]
for (ins, outs) in v.values():
links = set(ins.iterkeys()).intersection(outs.iterkeys())
for k in links:
for a in ins[k]:
for b in outs[k]:
a.set_run_after(b)
def extract_deps(tasks):
"""file_deps: Infer additional dependencies from task input and output nodes and from implicit dependencies
returned by the scanners - that will only work if all tasks are created
this is aimed at people who have pathological builds and who do not care enough
to implement the build dependencies properly
with two loops over the list of tasks, do not expect this to be really fast
"""
# first reuse the function above
extract_outputs(tasks)
# map the output nodes to the tasks producing them
out_to_task = {}
for x in tasks:
v = x.env.variant()
try:
lst = x.outputs
except AttributeError:
pass
else:
for node in lst:
out_to_task[(v, node.id)] = x
# map the dependencies found to the tasks compiled
dep_to_task = {}
for x in tasks:
try:
x.signature()
except: # this is on purpose
pass
v = x.env.variant()
key = x.unique_id()
for k in x.generator.bld.node_deps.get(x.unique_id(), []):
try: dep_to_task[(v, k.id)].append(x)
except KeyError: dep_to_task[(v, k.id)] = [x]
# now get the intersection
deps = set(dep_to_task.keys()).intersection(set(out_to_task.keys()))
# and add the dependencies from task to task
for idx in deps:
for k in dep_to_task[idx]:
k.set_run_after(out_to_task[idx])
# cleanup, remove the signatures
for x in tasks:
try:
delattr(x, 'cache_sig')
except AttributeError:
pass
| gpl-3.0 | 2,687,789,399,678,374,400 | 27.616347 | 159 | 0.663228 | false |
rue89-tech/edx-analytics-dashboard | analytics_dashboard/courses/tests/test_views/test_engagement.py | 3 | 12401 | from ddt import ddt
import mock
from mock import patch, Mock
import httpretty
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import analyticsclient.constants.activity_type as AT
from courses.tests.factories import CourseEngagementDataFactory
from courses.tests.test_views import (
DEMO_COURSE_ID,
CourseViewTestMixin,
PatchMixin,
CourseStructureViewMixin,
CourseAPIMixin)
from courses.tests import utils
class CourseEngagementViewTestMixin(PatchMixin, CourseAPIMixin): # pylint: disable=abstract-method
api_method = 'analyticsclient.course.Course.activity'
active_secondary_nav_label = None
def setUp(self):
super(CourseEngagementViewTestMixin, self).setUp()
self.toggle_switch('enable_engagement_videos_pages', True)
def get_mock_data(self, course_id):
return utils.mock_course_activity(course_id)
def assertPrimaryNav(self, nav, course_id):
expected = {
'icon': 'fa-bar-chart',
'href': reverse('courses:engagement:content', kwargs={'course_id': course_id}),
'label': _('Engagement'),
'name': 'engagement'
}
self.assertDictEqual(nav, expected)
def get_expected_secondary_nav(self, _course_id):
# override for each secondary page
return [
{
'active': True,
'href': '#',
'name': 'content',
'label': _('Content'),
},
{
'active': True,
'href': '#',
'name': 'videos',
'label': _('Videos'),
},
]
def assertSecondaryNavs(self, nav, course_id):
expected = self.get_expected_secondary_nav(course_id)
self.assertListEqual(nav, expected)
@ddt
class CourseEngagementContentViewTests(CourseViewTestMixin, CourseEngagementViewTestMixin, TestCase):
viewname = 'courses:engagement:content'
presenter_method = 'courses.presenters.engagement.CourseEngagementActivityPresenter.get_summary_and_trend_data'
active_secondary_nav_label = 'Content'
def get_expected_secondary_nav(self, course_id):
expected = super(CourseEngagementContentViewTests, self).get_expected_secondary_nav(course_id)
expected[1].update({
'href': reverse('courses:engagement:videos', kwargs={'course_id': course_id}),
'active': False
})
return expected
def assertViewIsValid(self, course_id):
rv = utils.mock_engagement_activity_summary_and_trend_data()
with mock.patch(self.presenter_method, mock.Mock(return_value=rv)):
response = self.client.get(self.path(course_id=course_id))
# make sure that we get a 200
self.assertEqual(response.status_code, 200)
# check page title
self.assertEqual(response.context['page_title'], 'Engagement Content')
# make sure the summary numbers are correct
self.assertEqual(response.context['summary'][AT.ANY], 100)
self.assertEqual(response.context['summary'][AT.ATTEMPTED_PROBLEM], 301)
self.assertEqual(response.context['summary'][AT.PLAYED_VIDEO], 1000)
self.assertEqual(response.context['summary'][AT.POSTED_FORUM], 0)
# check to make sure the activity trends are correct
trends = response.context['js_data']['course']['engagementTrends']
self.assertEqual(len(trends), 2)
expected = {
'weekEnding': '2013-01-08',
AT.ANY: 100,
AT.ATTEMPTED_PROBLEM: 301,
AT.PLAYED_VIDEO: 1000,
AT.POSTED_FORUM: 0
}
self.assertDictEqual(trends[0], expected)
expected = {
'weekEnding': '2013-01-01',
AT.ANY: 1000,
AT.ATTEMPTED_PROBLEM: 0,
AT.PLAYED_VIDEO: 10000,
AT.POSTED_FORUM: 45
}
self.assertDictEqual(trends[1], expected)
self.assertPrimaryNav(response.context['primary_nav_item'], course_id)
self.assertSecondaryNavs(response.context['secondary_nav_items'], course_id)
self.assertValidCourseName(course_id, response.context)
def assertValidMissingDataContext(self, context):
# summary and engagementTrends should evaluate to falsy values, which the
# template evaluates to render error messages
self.assertIsNone(context['summary'])
self.assertIsNone(context['js_data']['course']['engagementTrends'])
@ddt
class CourseEngagementVideoMixin(CourseEngagementViewTestMixin, CourseStructureViewMixin):
active_secondary_nav_label = 'Video'
sections = None
def get_expected_secondary_nav(self, course_id):
expected = super(CourseEngagementVideoMixin, self).get_expected_secondary_nav(course_id)
expected[0].update({
'href': reverse('courses:engagement:content', kwargs={'course_id': course_id}),
'active': False
})
return expected
@httpretty.activate
def test_invalid_course(self):
self._test_invalid_course('course_structures/{}/')
def setUp(self):
super(CourseEngagementVideoMixin, self).setUp()
self.factory = CourseEngagementDataFactory()
self.sections = self.factory.presented_sections
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.sections',
return_value=self.sections)
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.section',
return_value=self.sections[0])
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.subsections',
return_value=self.sections[0]['children'])
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.subsection',
return_value=self.sections[0]['children'][0])
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.subsection_children',
return_value=self.sections[0]['children'][0]['children'])
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.get_video_timeline',
return_value=self.factory.get_presented_video_timeline())
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.block',
return_value=self.sections[0]['children'][0]['children'][0])
self._patch('courses.presenters.engagement.CourseEngagementVideoPresenter.subsection_child',
return_value=self.sections[0]['children'][0]['children'][0])
self.start_patching()
def assertValidContext(self, context):
expected = {
'sections': self.sections,
}
self.assertDictContainsSubset(expected, context)
@httpretty.activate
@patch('courses.presenters.engagement.CourseEngagementVideoPresenter.sections', Mock(return_value=None))
def test_missing_sections(self):
""" Every video page will use sections and will return 200 if sections aren't available. """
self.mock_course_detail(DEMO_COURSE_ID)
response = self.client.get(self.path(course_id=DEMO_COURSE_ID))
# base page will should return a 200 even if no sections found
self.assertEqual(response.status_code, 200)
class EngagementVideoCourseTest(CourseEngagementVideoMixin, TestCase):
viewname = 'courses:engagement:videos'
class EngagementVideoCourseSectionTest(CourseEngagementVideoMixin, TestCase):
viewname = 'courses:engagement:video_section'
def path(self, **kwargs):
# Use default kwargs for tests that don't necessarily care about the specific argument values.
default_kwargs = {
'section_id': self.sections[0]['id'],
}
default_kwargs.update(kwargs)
kwargs = default_kwargs
return super(EngagementVideoCourseSectionTest, self).path(**kwargs)
def assertValidContext(self, context):
super(EngagementVideoCourseSectionTest, self).assertValidContext(context)
self.assertEqual(self.sections[0], context['section'])
self.assertListEqual(self.sections[0]['children'], context['subsections'])
@httpretty.activate
@patch('courses.presenters.engagement.CourseEngagementVideoPresenter.section', Mock(return_value=None))
def test_missing_section(self):
self.mock_course_detail(DEMO_COURSE_ID)
response = self.client.get(self.path(course_id=DEMO_COURSE_ID, section_id='Invalid'))
self.assertEqual(response.status_code, 404)
class EngagementVideoCourseSubsectionTest(CourseEngagementVideoMixin, TestCase):
viewname = 'courses:engagement:video_subsection'
def path(self, **kwargs):
# Use default kwargs for tests that don't necessarily care about the specific argument values.
default_kwargs = {
'section_id': self.sections[0]['id'],
'subsection_id': self.sections[0]['children'][0]['id'],
}
default_kwargs.update(kwargs)
kwargs = default_kwargs
return super(EngagementVideoCourseSubsectionTest, self).path(**kwargs)
def assertValidContext(self, context):
super(EngagementVideoCourseSubsectionTest, self).assertValidContext(context)
section = self.sections[0]
self.assertEqual(section, context['section'])
self.assertListEqual(section['children'], context['subsections'])
self.assertEqual(section['children'][0], context['subsection'])
@httpretty.activate
@patch('courses.presenters.engagement.CourseEngagementVideoPresenter.subsection', Mock(return_value=None))
def test_missing_subsection(self):
self.mock_course_detail(DEMO_COURSE_ID)
response = self.client.get(self.path(course_id=DEMO_COURSE_ID, section_id='Invalid', subsection_id='Nope'))
self.assertEqual(response.status_code, 404)
class EngagementVideoCourseTimelineTest(CourseEngagementVideoMixin, TestCase):
viewname = 'courses:engagement:video_timeline'
def path(self, **kwargs):
# Use default kwargs for tests that don't necessarily care about the specific argument values.
default_kwargs = {
'section_id': self.sections[0]['id'],
'subsection_id': self.sections[0]['children'][0]['id'],
'video_id': self.sections[0]['children'][0]['children'][0]['id']
}
default_kwargs.update(kwargs)
kwargs = default_kwargs
return super(EngagementVideoCourseTimelineTest, self).path(**kwargs)
def assertValidContext(self, context):
super(EngagementVideoCourseTimelineTest, self).assertValidContext(context)
section = self.sections[0]
self.assertEqual(section, context['section'])
self.assertListEqual(section['children'], context['subsections'])
self.assertEqual(section['children'][0], context['subsection'])
self.assertEqual(section['children'][0]['children'], context['subsection_children'])
self.assertEqual(section['children'][0]['children'][0], context['summary_metrics'])
self.assertListEqual(self.factory.get_presented_video_timeline(),
context['js_data']['course']['videoTimeline'])
@httpretty.activate
@patch('courses.presenters.engagement.CourseEngagementVideoPresenter.subsection_child', Mock(return_value=None))
def test_missing_video_module(self):
""" Every video page will use sections and will return 200 if sections aren't available. """
self.mock_course_detail(DEMO_COURSE_ID)
response = self.client.get(self.path(course_id=DEMO_COURSE_ID))
# base page will should return a 200 even if no sections found
self.assertEqual(response.status_code, 404)
@httpretty.activate
@patch('courses.presenters.engagement.CourseEngagementVideoPresenter.get_video_timeline', Mock(return_value=None))
def test_missing_video_data(self):
self.mock_course_detail(DEMO_COURSE_ID)
response = self.client.get(self.path(course_id=DEMO_COURSE_ID))
# page will still be displayed, but with error messages
self.assertEqual(response.status_code, 200)
| agpl-3.0 | -7,601,700,950,755,360,000 | 42.975177 | 118 | 0.66535 | false |
realsobek/freeipa | ipatests/test_webui/test_pwpolicy.py | 5 | 1697 | # Authors:
# Petr Vobornik <[email protected]>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Password policy tests
"""
from ipatests.test_webui.ui_driver import UI_driver
from ipatests.test_webui.ui_driver import screenshot
import pytest
ENTITY = 'pwpolicy'
DATA = {
'pkey': 'admins',
'add': [
('combobox', 'cn', 'admins'),
('textbox', 'cospriority', '364'),
],
'mod': [
('textbox', 'krbmaxpwdlife', '3000'),
('textbox', 'krbminpwdlife', '1'),
('textbox', 'krbpwdhistorylength', '0'),
('textbox', 'krbpwdmindiffchars', '2'),
('textbox', 'krbpwdminlength', '2'),
('textbox', 'krbpwdmaxfailure', '15'),
('textbox', 'krbpwdfailurecountinterval', '5'),
('textbox', 'krbpwdlockoutduration', '3600'),
],
}
@pytest.mark.tier1
class test_pwpolicy(UI_driver):
@screenshot
def test_crud(self):
"""
Basic CRUD: pwpolicy
"""
self.init_app()
self.basic_crud(ENTITY, DATA)
| gpl-3.0 | 3,548,179,279,743,340,500 | 28.77193 | 71 | 0.649381 | false |
infoxchange/lettuce | tests/unit/test_server.py | 17 | 1228 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import lettuce
import os
import commands
import sys
from nose.tools import assert_equals
from lettuce.fs import FileSystem
current_directory = FileSystem.dirname(__file__)
def test_server_threading():
"""
Test django httpd threading
"""
FileSystem.pushd(current_directory, "django", "coconut")
status, out = commands.getstatusoutput(
"python manage.py harvest --verbosity=1")
assert_equals(status, 0, out)
| gpl-3.0 | -8,531,463,943,411,658,000 | 36.181818 | 71 | 0.737571 | false |
raccoongang/edx-platform | lms/djangoapps/instructor/enrollment_report.py | 24 | 3296 | """
Defines abstract class for the Enrollment Reports.
"""
import abc
import collections
import json
from django.contrib.auth.models import User
from student.models import UserProfile
class AbstractEnrollmentReportProvider(object):
"""
Abstract interface for Detailed Enrollment Report Provider
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_enrollment_info(self, user, course_id):
"""
Returns the User Enrollment information.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_user_profile(self, user_id):
"""
Returns the UserProfile information.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_payment_info(self, user, course_id):
"""
Returns the User Payment information.
"""
raise NotImplementedError()
class BaseAbstractEnrollmentReportProvider(AbstractEnrollmentReportProvider):
"""
The base abstract class for all Enrollment Reports that can support multiple
backend such as MySQL/Django-ORM.
# don't allow instantiation of this class, it must be subclassed
"""
def get_user_profile(self, user_id):
"""
Returns the UserProfile information.
"""
user_info = User.objects.select_related('profile').get(id=user_id)
# extended user profile fields are stored in the user_profile meta column
meta = {}
if user_info.profile.meta:
meta = json.loads(user_info.profile.meta)
user_data = collections.OrderedDict()
user_data['User ID'] = user_info.id
user_data['Username'] = user_info.username
user_data['Email'] = user_info.email
user_data['Full Name'] = user_info.profile.name
user_data['First Name'] = meta.get('first-name', '')
user_data['Last Name'] = meta.get('last-name', '')
user_data['Company Name'] = meta.get('company', '')
user_data['Title'] = meta.get('title', '')
user_data['Language'] = user_info.profile.language
user_data['Country'] = user_info.profile.country
user_data['Year of Birth'] = user_info.profile.year_of_birth
user_data['Gender'] = None
gender = user_info.profile.gender
for _gender in UserProfile.GENDER_CHOICES:
if gender == _gender[0]:
user_data['Gender'] = _gender[1]
break
user_data['Level of Education'] = None
level_of_education = user_info.profile.level_of_education
for _loe in UserProfile.LEVEL_OF_EDUCATION_CHOICES:
if level_of_education == _loe[0]:
user_data['Level of Education'] = _loe[1]
user_data['Mailing Address'] = user_info.profile.mailing_address
user_data['Goals'] = user_info.profile.goals
user_data['City'] = user_info.profile.city
user_data['Country'] = user_info.profile.country
return user_data
def get_enrollment_info(self, user, course_id):
"""
Returns the User Enrollment information.
"""
raise NotImplementedError()
def get_payment_info(self, user, course_id):
"""
Returns the User Payment information.
"""
raise NotImplementedError()
| agpl-3.0 | 1,205,670,733,924,950,300 | 31.633663 | 81 | 0.622573 | false |
yajiedesign/mxnet | python/mxnet/optimizer/utils.py | 9 | 1777 | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Optimizer utility functions."""
from __future__ import absolute_import
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
def _as_classic(a, allow_np):
# TODO(junwu): This is a temp solution for allowing converting
# np.ndarray to mx.nd.NDArray to be fed into the optimizer since
# users may have custom optimizers implemented using mx.nd.NDArray ops.
from ..numpy import ndarray as np_ndarray
if isinstance(a, (tuple, list)):
if any(isinstance(x, np_ndarray) for x in a):
if allow_np:
return [x.as_nd_ndarray() for x in a]
else:
raise ValueError('Converting np.ndarray to mx.nd.NDArray is not allowed')
else:
if isinstance(a, np_ndarray):
if allow_np:
return a.as_nd_ndarray()
else:
raise ValueError('Converting np.ndarray to mx.nd.NDArray is not allowed')
return a
| apache-2.0 | 1,184,993,605,800,009,000 | 40.325581 | 89 | 0.691052 | false |
stefanv/aandete | app/lib/paste/debug/profile.py | 50 | 7607 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Middleware that profiles the request and displays profiling
information at the bottom of each page.
"""
import sys
import os
import hotshot
import hotshot.stats
import threading
import cgi
import six
import time
from cStringIO import StringIO
from paste import response
__all__ = ['ProfileMiddleware', 'profile_decorator']
class ProfileMiddleware(object):
"""
Middleware that profiles all requests.
All HTML pages will have profiling information appended to them.
The data is isolated to that single request, and does not include
data from previous requests.
This uses the ``hotshot`` module, which affects performance of the
application. It also runs in a single-threaded mode, so it is
only usable in development environments.
"""
style = ('clear: both; background-color: #ff9; color: #000; '
'border: 2px solid #000; padding: 5px;')
def __init__(self, app, global_conf=None,
log_filename='profile.log.tmp',
limit=40):
self.app = app
self.lock = threading.Lock()
self.log_filename = log_filename
self.limit = limit
def __call__(self, environ, start_response):
catch_response = []
body = []
def replace_start_response(status, headers, exc_info=None):
catch_response.extend([status, headers])
start_response(status, headers, exc_info)
return body.append
def run_app():
app_iter = self.app(environ, replace_start_response)
try:
body.extend(app_iter)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
self.lock.acquire()
try:
prof = hotshot.Profile(self.log_filename)
prof.addinfo('URL', environ.get('PATH_INFO', ''))
try:
prof.runcall(run_app)
finally:
prof.close()
body = ''.join(body)
headers = catch_response[1]
content_type = response.header_value(headers, 'content-type')
if content_type is None or not content_type.startswith('text/html'):
# We can't add info to non-HTML output
return [body]
stats = hotshot.stats.load(self.log_filename)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
output = capture_output(stats.print_stats, self.limit)
output_callers = capture_output(
stats.print_callers, self.limit)
body += '<pre style="%s">%s\n%s</pre>' % (
self.style, cgi.escape(output), cgi.escape(output_callers))
return [body]
finally:
self.lock.release()
def capture_output(func, *args, **kw):
# Not threadsafe! (that's okay when ProfileMiddleware uses it,
# though, since it synchronizes itself.)
out = StringIO()
old_stdout = sys.stdout
sys.stdout = out
try:
func(*args, **kw)
finally:
sys.stdout = old_stdout
return out.getvalue()
def profile_decorator(**options):
"""
Profile a single function call.
Used around a function, like::
@profile_decorator(options...)
def ...
All calls to the function will be profiled. The options are
all keywords, and are:
log_file:
The filename to log to (or ``'stdout'`` or ``'stderr'``).
Default: stderr.
display_limit:
Only show the top N items, default: 20.
sort_stats:
A list of string-attributes to sort on. Default
``('time', 'calls')``.
strip_dirs:
Strip directories/module names from files? Default True.
add_info:
If given, this info will be added to the report (for your
own tracking). Default: none.
log_filename:
The temporary filename to log profiling data to. Default;
``./profile_data.log.tmp``
no_profile:
If true, then don't actually profile anything. Useful for
conditional profiling.
"""
if options.get('no_profile'):
def decorator(func):
return func
return decorator
def decorator(func):
def replacement(*args, **kw):
return DecoratedProfile(func, **options)(*args, **kw)
return replacement
return decorator
class DecoratedProfile(object):
lock = threading.Lock()
def __init__(self, func, **options):
self.func = func
self.options = options
def __call__(self, *args, **kw):
self.lock.acquire()
try:
return self.profile(self.func, *args, **kw)
finally:
self.lock.release()
def profile(self, func, *args, **kw):
ops = self.options
prof_filename = ops.get('log_filename', 'profile_data.log.tmp')
prof = hotshot.Profile(prof_filename)
prof.addinfo('Function Call',
self.format_function(func, *args, **kw))
if ops.get('add_info'):
prof.addinfo('Extra info', ops['add_info'])
exc_info = None
try:
start_time = time.time()
try:
result = prof.runcall(func, *args, **kw)
except:
exc_info = sys.exc_info()
end_time = time.time()
finally:
prof.close()
stats = hotshot.stats.load(prof_filename)
os.unlink(prof_filename)
if ops.get('strip_dirs', True):
stats.strip_dirs()
stats.sort_stats(*ops.get('sort_stats', ('time', 'calls')))
display_limit = ops.get('display_limit', 20)
output = capture_output(stats.print_stats, display_limit)
output_callers = capture_output(
stats.print_callers, display_limit)
output_file = ops.get('log_file')
if output_file in (None, 'stderr'):
f = sys.stderr
elif output_file in ('-', 'stdout'):
f = sys.stdout
else:
f = open(output_file, 'a')
f.write('\n%s\n' % ('-'*60))
f.write('Date: %s\n' % time.strftime('%c'))
f.write('Function call: %s\n'
% self.format_function(func, *args, **kw))
f.write('Wall time: %0.2f seconds\n'
% (end_time - start_time))
f.write(output)
f.write(output_callers)
if output_file not in (None, '-', 'stdout', 'stderr'):
f.close()
if exc_info:
# We captured an exception earlier, now we re-raise it
six.reraise(exc_info[0], exc_info[1], exc_info[2])
return result
def format_function(self, func, *args, **kw):
args = map(repr, args)
args.extend(
['%s=%r' % (k, v) for k, v in kw.items()])
return '%s(%s)' % (func.__name__, ', '.join(args))
def make_profile_middleware(
app, global_conf,
log_filename='profile.log.tmp',
limit=40):
"""
Wrap the application in a component that will profile each
request. The profiling data is then appended to the output
of each page.
Note that this serializes all requests (i.e., removing
concurrency). Therefore never use this in production.
"""
limit = int(limit)
return ProfileMiddleware(
app, log_filename=log_filename, limit=limit)
| bsd-3-clause | -7,894,136,446,469,683,000 | 32.364035 | 84 | 0.568424 | false |
dwavesystems/dimod | tests/test_polytruncatecomposite.py | 1 | 2368 | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import dimod.testing as dtest
from dimod import BinaryQuadraticModel, HigherOrderComposite
from dimod import PolyTruncateComposite, ExactSolver
class TestConstruction(unittest.TestCase):
def test_10(self):
sampler = PolyTruncateComposite(HigherOrderComposite(ExactSolver()), 10)
dtest.assert_composite_api(sampler)
self.assertEqual(sampler.parameters, sampler.child.parameters)
def test_0(self):
with self.assertRaises(ValueError):
PolyTruncateComposite(HigherOrderComposite(ExactSolver()), 0)
class TestSample(unittest.TestCase):
def test_sampleset_shorter(self):
h = {'a': -4.0, 'b': -4.0, 'c': 0}
J = {('a', 'b'): 3.2}
sampler = PolyTruncateComposite(HigherOrderComposite(ExactSolver()), 10)
with self.assertWarns(DeprecationWarning):
samples = sampler.sample_ising(h, J)
# we should see 2**n < 10 rows
self.assertEqual(len(samples), 8)
def test_sampleset_trim(self):
h = {'a': -4.0, 'b': -4.0, 'c': 0}
J = {('a', 'b'): 3.2}
sampler = PolyTruncateComposite(HigherOrderComposite(ExactSolver()), 6)
with self.assertWarns(DeprecationWarning):
samples = sampler.sample_ising(h, J)
self.assertEqual(len(samples), 6)
def test_with_aggration(self):
# this is actually just a smoke test, needs better testing in the
# future...
h = {'a': -4.0, 'b': -4.0, 'c': 0}
J = {('a', 'b'): 3.2}
sampler = PolyTruncateComposite(HigherOrderComposite(ExactSolver()), 6, aggregate=True)
with self.assertWarns(DeprecationWarning):
samples = sampler.sample_ising(h, J)
self.assertEqual(len(samples), 6)
| apache-2.0 | -3,437,323,772,655,248,000 | 34.878788 | 95 | 0.660051 | false |
subodhchhabra/airflow | tests/contrib/operators/test_segment_track_event_operator.py | 42 | 2088 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import mock
import unittest
from airflow import configuration, AirflowException
from airflow.contrib.hooks.segment_hook import SegmentHook
TEST_CONN_ID = 'test_segment'
WRITE_KEY = 'foo'
class TestSegmentHook(unittest.TestCase):
def setUp(self):
super(TestSegmentHook, self).setUp()
configuration.load_test_config()
self.conn = conn = mock.MagicMock()
conn.write_key = WRITE_KEY
self.expected_write_key = WRITE_KEY
self.conn.extra_dejson = {'write_key': self.expected_write_key}
class UnitTestSegmentHook(SegmentHook):
def get_conn(self):
return conn
def get_connection(self, connection_id):
return conn
self.test_hook = UnitTestSegmentHook(segment_conn_id=TEST_CONN_ID)
def test_get_conn(self):
expected_connection = self.test_hook.get_conn()
self.assertEqual(expected_connection, self.conn)
self.assertIsNotNone(expected_connection.write_key)
self.assertEqual(expected_connection.write_key, self.expected_write_key)
def test_on_error(self):
with self.assertRaises(AirflowException):
self.test_hook.on_error('error', ['items'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,119,401,674,845,474,000 | 31.625 | 80 | 0.697318 | false |
ZuluPro/abdallah | abdallah/rest/viewsets.py | 1 | 1371 | from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from abdallah.models import Project, Build, Job
from abdallah.rest import serializers
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = serializers.ProjectSerializer
authentication_classes = ()
@detail_route(methods=['post'])
def run_build(self, request, pk=None):
"""Run a build for a project and a commit."""
queryset = Project.objects.all()
project = get_object_or_404(queryset, pk=pk)
build = project.launch_build(commit=request.data.get('commit', 'master'))
serializer = serializers.BuildSerializer(build)
return Response(serializer.data)
class BuildViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Build.objects.all()
serializer_class = serializers.BuildSerializer
authentication_classes = ()
class JobViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Job.objects.all()
serializer_class = serializers.JobSerializer
authentication_classes = ()
@detail_route(methods=['get'])
def logs(self, request, pk=None):
queryset = Job.objects.all()
job = get_object_or_404(queryset, pk=pk)
return Response(job.logs)
| bsd-3-clause | 4,440,280,404,797,743,000 | 34.153846 | 81 | 0.718454 | false |
sonaht/ansible | lib/ansible/modules/network/nxos/nxos_vtp_version.py | 46 | 5735 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vtp_version
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP version configuration.
description:
- Manages VTP version configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP version.
- Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version)
to fully manage VTP operations.
options:
version:
description:
- VTP version number.
required: true
choices: ['1', '2']
'''
EXAMPLES = '''
# ENSURE VTP VERSION IS 2
- nxos_vtp_version:
version: 2
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"version": "2"}
existing:
description:
- k/v pairs of existing vtp
returned: always
type: dict
sample: {"domain": "testing", "version": "1", "vtp_password": "\"}
end_state:
description: k/v pairs of vtp after module execution
returned: always
type: dict
sample: {"domain": "testing", "version": "2", "vtp_password": "\"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp version 2"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'status' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
vtp_parsed = {}
if body:
version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
password = body['passwd']
if password:
return str(password)
else:
return ""
def main():
argument_spec = dict(
version=dict(type='str', choices=['1', '2'], required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
version = module.params['version']
existing = get_vtp_config(module)
end_state = existing
args = dict(version=version)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if delta:
commands.append(['vtp version {0}'.format(version)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,200,573,338,479,495,700 | 26.309524 | 79 | 0.623888 | false |
bbc/kamaelia | Code/Python/Kamaelia/Kamaelia/Visualisation/PhysicsGraph/__init__.py | 3 | 3471 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Simple topography viewer server - takes textual commands from a single socket
# and renders the appropriate graph
import pygame
from pygame.locals import *
import random, time, re, sys
from Axon.Scheduler import scheduler as _scheduler
import Axon as _Axon
# import Kamaelia.Physics
from Kamaelia.Support.Particles import Particle as BaseParticle
from Kamaelia.UI.MH import PyGameApp, DragHandler
component = _Axon.Component.component
# from GridRenderer import GridRenderer
# from RenderingParticle import RenderingParticle
# from ParticleDragger import ParticleDragger
# from TopologyViewer import TopologyViewer
# from lines_to_tokenlists import lines_to_tokenlists
# from chunks_to_lines import chunks_to_lines
from .TopologyViewerServer import TopologyViewerServer
def parseArgs(argv, extraShortArgs="", extraLongArgs=[]):
import getopt
shortargs = "fh" + extraShortArgs
longargs = ["help","fullscreen","resolution=","port="] + extraLongArgs
optlist, remargs = getopt.getopt(argv, shortargs, longargs)
dictArgs = {}
for o,a in optlist:
if o in ("-h","--help"):
dictArgs['help'] = "Arguments:\n" + \
" -h, --help\n" + \
" This help message\n\n" + \
" -f, --fullscreen\n" + \
" Full screen mode\n\n" + \
" --resolution=WxH\n" + \
" Set window size to W by H pixels\n\n" + \
" --port=N\n" + \
" Listen on port N (default is 1500)\n\n"
elif o in ("-f","--fullscreen"):
dictArgs['fullscreen'] = True
elif o in ("--resolution"):
match = re.match(r"^(\d+)[x,-](\d+)$", a)
x=int(match.group(1))
y=int(match.group(2))
dictArgs['screensize'] = (x,y)
elif o in ("--port"):
dictArgs['serverPort'] = int(a)
return dictArgs, optlist, remargs
if __name__=="__main__":
import sys
print ("X1", sys.argv)
print ("X2", sys.argv[1:])
print ("X3", parseArgs(sys.argv[1:]))
dictArgs, remargs, junk = parseArgs(sys.argv[1:])
if "help" in dictArgs:
print (dictArgs["help"])
else:
app = TopologyViewerServer(**dictArgs)
app.activate()
_scheduler.run.runThreads(slowmo=0)
| apache-2.0 | 6,212,373,356,702,578,000 | 34.060606 | 79 | 0.581389 | false |
sajuptpm/magnum | magnum/common/pythonk8sclient/client/models/V1beta3_Lifecycle.py | 15 | 1913 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_Lifecycle(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'postStart': 'V1beta3_Handler',
'preStop': 'V1beta3_Handler'
}
self.attributeMap = {
'postStart': 'postStart',
'preStop': 'preStop'
}
#called immediately after a container is started; if the handler fails, the container is terminated and restarted according to its restart policy; other management of the container blocks until the hook completes
self.postStart = None # V1beta3_Handler
#called before a container is terminated; the container is terminated after the handler completes; other management of the container blocks until the hook completes
self.preStop = None # V1beta3_Handler
| apache-2.0 | -1,163,022,930,754,847,200 | 34.425926 | 220 | 0.634083 | false |
arborworkflows/ArborWebApps | treeExplorer/service/listItemsInCollection.py | 5 | 1433 |
import json
import string
import requests
import tangelo
def run(collectionName=None):
# Create an empty response object.
response = {}
collectionNames = []
# look through the collections in girder. Return a list of collections that are in this local # Arbor instance
girderlocation = 'http://localhost:9000'
resp = requests.get(girderlocation+'/api/v1/collection')
for coll in resp.json():
if (coll['name'] == collectionName):
collectionId = coll['_id']
print "found collectionID:",collectionId
break
# get a list of all the folders inside this collection
datafolderresp = requests.get(girderlocation+'/api/v1/folder?parentType=collection&parentId='+collectionId)
print "found folder:", datafolderresp.text
# find Data folder inside named collection
for folder in datafolderresp.json():
if (folder['name'] == 'Data'):
folderId = folder['_id']
print "found folderID:",collectionId
break
# loop through the folder
dataitemsresp = requests.get(girderlocation+'/api/v1/item?folderId='+folderId)
for item in dataitemsresp.json():
collectionNames.append(item['name'])
# Pack the results into the response object, and return it.
response['result'] = collectionNames
# Return the response object.
tangelo.log(str(response))
return json.dumps(response)
| apache-2.0 | -5,685,020,270,118,348,000 | 30.152174 | 114 | 0.671319 | false |
cosim/zerorpc-python | tests/test_middleware_client.py | 102 | 12192 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gevent
import zerorpc
from testutils import random_ipc_endpoint
class EchoModule(object):
def __init__(self, trigger=None):
self.last_msg = None
self._trigger = trigger
def echo(self, msg):
self.last_msg = "echo: " + msg
if self._trigger:
self._trigger.set()
return self.last_msg
@zerorpc.stream
def echoes(self, msg):
self.last_msg = "echo: " + msg
for i in xrange(0, 3):
yield self.last_msg
def crash(self, msg):
try:
self.last_msg = "raise: " + msg
raise RuntimeError("BrokenEchoModule")
finally:
if self._trigger:
self._trigger.set()
@zerorpc.stream
def echoes_crash(self, msg):
self.crash(msg)
def timeout(self, msg):
self.last_msg = "timeout: " + msg
gevent.sleep(2)
def test_hook_client_before_request():
class ClientBeforeRequestMiddleware(object):
def __init__(self):
self.called = False
def client_before_request(self, event):
self.called = True
self.method = event.name
zero_ctx = zerorpc.Context()
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_client.echo("test") == "echo: test"
test_middleware = ClientBeforeRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
assert test_client.echo("test") == "echo: test"
assert test_middleware.called == True
assert test_middleware.method == 'echo'
test_server.stop()
test_server_task.join()
class ClientAfterRequestMiddleware(object):
def __init__(self):
self.called = False
def client_after_request(self, req_event, rep_event, exception):
self.called = True
assert req_event is not None
assert req_event.name == "echo" or req_event.name == "echoes"
self.retcode = rep_event.name
assert exception is None
def test_hook_client_after_request():
zero_ctx = zerorpc.Context()
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_client.echo("test") == "echo: test"
test_middleware = ClientAfterRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
assert test_client.echo("test") == "echo: test"
assert test_middleware.called == True
assert test_middleware.retcode == 'OK'
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_stream():
zero_ctx = zerorpc.Context()
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
it = test_client.echoes("test")
assert next(it) == "echo: test"
for echo in it:
assert echo == "echo: test"
test_middleware = ClientAfterRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
it = test_client.echoes("test")
assert next(it) == "echo: test"
assert test_middleware.called == False
for echo in it:
assert echo == "echo: test"
assert test_middleware.called == True
assert test_middleware.retcode == 'STREAM_DONE'
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_timeout():
class ClientAfterRequestMiddleware(object):
def __init__(self):
self.called = False
def client_after_request(self, req_event, rep_event, exception):
self.called = True
assert req_event is not None
assert req_event.name == "timeout"
assert rep_event is None
zero_ctx = zerorpc.Context()
test_middleware = ClientAfterRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(timeout=1, context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.timeout("test")
except zerorpc.TimeoutExpired as ex:
assert test_middleware.called == True
assert "timeout" in ex.args[0]
test_server.stop()
test_server_task.join()
class ClientAfterFailedRequestMiddleware(object):
def __init__(self):
self.called = False
def client_after_request(self, req_event, rep_event, exception):
assert req_event is not None
assert req_event.name == "crash" or req_event.name == "echoes_crash"
self.called = True
assert isinstance(exception, zerorpc.RemoteError)
assert exception.name == 'RuntimeError'
assert 'BrokenEchoModule' in exception.msg
assert rep_event.name == 'ERR'
def test_hook_client_after_request_remote_error():
zero_ctx = zerorpc.Context()
test_middleware = ClientAfterFailedRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(timeout=1, context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except zerorpc.RemoteError:
assert test_middleware.called == True
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_remote_error_stream():
zero_ctx = zerorpc.Context()
test_middleware = ClientAfterFailedRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(timeout=1, context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.echoes_crash("test")
except zerorpc.RemoteError:
assert test_middleware.called == True
test_server.stop()
test_server_task.join()
def test_hook_client_handle_remote_error_inspect():
class ClientHandleRemoteErrorMiddleware(object):
def __init__(self):
self.called = False
def client_handle_remote_error(self, event):
self.called = True
test_middleware = ClientHandleRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except zerorpc.RemoteError as ex:
assert test_middleware.called == True
assert ex.name == "RuntimeError"
test_server.stop()
test_server_task.join()
# This is a seriously broken idea, but possible nonetheless
class ClientEvalRemoteErrorMiddleware(object):
def __init__(self):
self.called = False
def client_handle_remote_error(self, event):
self.called = True
name, msg, tb = event.args
etype = eval(name)
e = etype(tb)
return e
def test_hook_client_handle_remote_error_eval():
test_middleware = ClientEvalRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except RuntimeError as ex:
assert test_middleware.called == True
assert "BrokenEchoModule" in ex.args[0]
test_server.stop()
test_server_task.join()
def test_hook_client_handle_remote_error_eval_stream():
test_middleware = ClientEvalRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.echoes_crash("test")
except RuntimeError as ex:
assert test_middleware.called == True
assert "BrokenEchoModule" in ex.args[0]
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_custom_error():
# This is a seriously broken idea, but possible nonetheless
class ClientEvalInspectRemoteErrorMiddleware(object):
def __init__(self):
self.called = False
def client_handle_remote_error(self, event):
name, msg, tb = event.args
etype = eval(name)
e = etype(tb)
return e
def client_after_request(self, req_event, rep_event, exception):
assert req_event is not None
assert req_event.name == "crash"
self.called = True
assert isinstance(exception, RuntimeError)
test_middleware = ClientEvalInspectRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except RuntimeError as ex:
assert test_middleware.called == True
assert "BrokenEchoModule" in ex.args[0]
test_server.stop()
test_server_task.join()
| mit | 5,091,339,135,524,437,000 | 31.686327 | 81 | 0.674459 | false |
akretion/connector | connector/checkpoint/checkpoint.py | 1 | 9460 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The checkpoint is a model containing records to be reviewed by the end
users. The connectors register records to verify so the user can check
them and flag them as reviewed.
A concrete use case is the import of new products from Magento. Once
they are imported, the user have to configure things like the supplier,
so they appears in this list.
"""
from openerp.osv import orm, fields
from openerp.tools.translate import _
class connector_checkpoint(orm.Model):
_name = 'connector.checkpoint'
_description = 'Connector Checkpoint'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _get_models(self, cr, uid, context=None):
""" All models are allowed as reference, anyway the
fields.reference are readonly. """
model_obj = self.pool.get('ir.model')
model_ids = model_obj.search(cr, uid, [], context=context)
models = model_obj.read(cr, uid, model_ids,
['model', 'name'], context=context)
return [(m['model'], m['name']) for m in models]
def _get_ref(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for check in self.browse(cr, uid, ids, context=context):
res[check.id] = check.model_id.model + ',' + str(check.record_id)
return res
def _get_record_name(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for check in self.browse(cr, uid, ids, context=context):
model_obj = self.pool.get(check.model_id.model)
res[check.id] = model_obj.name_get(cr, uid, check.record_id,
context=context)[0][1]
return res
def _search_record(self, cr, uid, obj, name, args, context=None):
ids = set()
model_obj = self.pool.get('ir.model')
sql = "SELECT DISTINCT model_id FROM connector_checkpoint"
cr.execute(sql)
model_ids = [row[0] for row in cr.fetchall()]
models = model_obj.read(cr, uid, model_ids,
['model'], context=context)
for criteria in args:
__, operator, value = criteria
for model in models:
model_id = model['id']
model_name = model['model']
model_obj = self.pool.get(model_name)
results = model_obj.name_search(cr, uid,
name=value,
operator=operator,
context=context)
res_ids = [res[0] for res in results]
check_ids = self.search(cr, uid,
[('model_id', '=', model_id),
('record_id', 'in', res_ids)],
context=context)
ids.update(check_ids)
if not ids:
return [('id', '=', '0')]
return [('id', 'in', tuple(ids))]
_columns = {
'record': fields.function(
_get_ref,
type='reference',
string='Record',
selection=_get_models,
help="The record to review.",
size=128,
readonly=True),
'name': fields.function(
_get_record_name,
fnct_search=_search_record,
type='char',
string='Record Name',
help="Name of the record to review",
readonly=True),
'record_id': fields.integer('Record ID',
required=True,
readonly=True),
'model_id': fields.many2one('ir.model',
string='Model',
required=True,
readonly=True),
'backend_id': fields.reference(
'Imported from',
selection=_get_models,
size=128,
readonly=True,
required=True,
help="The record has been imported from this backend",
select=1),
'state': fields.selection(
[('need_review', 'Need Review'),
('reviewed', 'Reviewed')],
'Status',
required=True,
readonly=True),
}
_defaults = {
'state': 'need_review',
}
def reviewed(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids,
{'state': 'reviewed'},
context=context)
def _subscribe_users(self, cr, uid, ids, context=None):
""" Subscribe all users having the 'Connector Manager' group """
group_ref = self.pool.get('ir.model.data').get_object_reference(
cr, uid, 'connector', 'group_connector_manager')
if not group_ref:
return
group_id = group_ref[1]
user_ids = self.pool.get('res.users').search(
cr, uid, [('groups_id', '=', group_id)], context=context)
self.message_subscribe_users(cr, uid, ids,
user_ids=user_ids,
context=context)
def create(self, cr, uid, vals, context=None):
obj_id = super(connector_checkpoint, self).create(
cr, uid, vals, context=context)
self._subscribe_users(cr, uid, [obj_id], context=context)
cp = self.browse(cr, uid, obj_id, context=context)
msg = _('A %s needs a review.') % cp.model_id.name
self.message_post(cr, uid, obj_id, body=msg,
subtype='mail.mt_comment',
context=context)
return obj_id
def create_from_name(self, cr, uid, model_name, record_id,
backend_model_name, backend_id, context=None):
model_obj = self.pool.get('ir.model')
model_ids = model_obj.search(cr, uid,
[('model', '=', model_name)],
context=context)
assert model_ids, "The model %s does not exist" % model_name
backend = backend_model_name + ',' + str(backend_id)
return self.create(cr, uid,
{'model_id': model_ids[0],
'record_id': record_id,
'backend_id': backend},
context=context)
def _needaction_domain_get(self, cr, uid, context=None):
""" Returns the domain to filter records that require an action
:return: domain or False is no action
"""
return [('state', '=', 'need_review')]
def add_checkpoint(session, model_name, record_id,
backend_model_name, backend_id):
cr, uid, context = session.cr, session.uid, session.context
checkpoint_obj = session.pool['connector.checkpoint']
return checkpoint_obj.create_from_name(cr, uid, model_name, record_id,
backend_model_name, backend_id,
context=context)
class connector_checkpoint_review(orm.TransientModel):
_name = 'connector.checkpoint.review'
_description = 'Checkpoints Review'
def _get_checkpoint_ids(self, cr, uid, context=None):
if context is None:
context = {}
res = False
if (context.get('active_model') == 'connector.checkpoint' and
context.get('active_ids')):
res = context['active_ids']
return res
_columns = {
'checkpoint_ids': fields.many2many('connector.checkpoint',
'connector_checkpoint_review_rel',
'review_id', 'checkpoint_id',
string='Checkpoints',
domain="[('state', '=', 'need_review')]"),
}
_defaults = {
'checkpoint_ids': _get_checkpoint_ids,
}
def review(self, cr, uid, ids, context=None):
if isinstance(ids, (tuple, list)):
assert len(ids) == 1, "One ID expected"
ids = ids[0]
form = self.browse(cr, uid, ids, context=context)
checkpoint_ids = [checkpoint.id for checkpoint in form.checkpoint_ids]
checkpoint_obj = self.pool['connector.checkpoint']
checkpoint_obj.reviewed(cr, uid, checkpoint_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 | 8,207,099,585,244,588,000 | 40.130435 | 85 | 0.518182 | false |
gg7/sentry | src/sentry/interfaces/exception.py | 12 | 11093 | """
sentry.interfaces.exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('Exception',)
from django.conf import settings
from sentry.interfaces.base import Interface
from sentry.interfaces.stacktrace import Stacktrace, is_newest_frame_first
from sentry.utils.safe import trim
from sentry.web.helpers import render_to_string
class SingleException(Interface):
"""
A standard exception with a ``type`` and value argument, and an optional
``module`` argument describing the exception class type and
module namespace. Either ``type`` or ``value`` must be present.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }
"""
score = 900
display_score = 1200
@classmethod
def to_python(cls, data):
assert data.get('type') or data.get('value')
if data.get('stacktrace'):
stacktrace = Stacktrace.to_python(data['stacktrace'])
else:
stacktrace = None
kwargs = {
'type': trim(data.get('type'), 128),
'value': trim(data.get('value'), 4096),
'module': trim(data.get('module'), 128),
'stacktrace': stacktrace,
}
return cls(**kwargs)
def to_json(self):
if self.stacktrace:
stacktrace = self.stacktrace.to_json()
else:
stacktrace = None
return {
'type': self.type,
'value': self.value,
'module': self.module,
'stacktrace': stacktrace,
}
def get_api_context(self):
if self.stacktrace:
stacktrace = self.stacktrace.get_api_context()
else:
stacktrace = None
return {
'type': self.type,
'value': self.value,
'module': self.module,
'stacktrace': stacktrace,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def get_hash(self):
output = None
if self.stacktrace:
output = self.stacktrace.get_hash()
if output and self.type:
output.append(self.type)
if not output:
output = filter(bool, [self.type, self.value])
return output
def get_context(self, event, is_public=False, **kwargs):
last_frame = None
interface = event.interfaces.get('sentry.interfaces.Stacktrace')
if interface is not None and interface.frames:
last_frame = interface.frames[-1]
e_module = self.module
e_type = self.type
e_value = self.value
if self.module:
fullname = '%s.%s' % (e_module, e_type)
else:
fullname = e_type
if e_value and not e_type:
e_type = e_value
e_value = None
return {
'is_public': is_public,
'event': event,
'exception_type': e_type,
'exception_value': e_value,
'exception_module': e_module,
'fullname': fullname,
'last_frame': last_frame,
}
class Exception(Interface):
"""
An exception consists of a list of values. In most cases, this list
contains a single exception, with an optional stacktrace interface.
Each exception has a mandatory ``value`` argument and optional ``type`` and
``module`` arguments describing the exception class type and module
namespace.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "values": [{
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }]
>>> }
Values should be sent oldest to newest, this includes both the stacktrace
and the exception itself.
.. note:: This interface can be passed as the 'exception' key in addition
to the full interface path.
"""
score = 2000
@classmethod
def to_python(cls, data):
if 'values' not in data:
data = {'values': [data]}
assert data['values']
trim_exceptions(data)
kwargs = {
'values': [
SingleException.to_python(v)
for v in data['values']
],
}
if data.get('exc_omitted'):
assert len(data['exc_omitted']) == 2
kwargs['exc_omitted'] = data['exc_omitted']
else:
kwargs['exc_omitted'] = None
return cls(**kwargs)
def to_json(self):
return {
'values': [v.to_json() for v in self.values],
'exc_omitted': self.exc_omitted,
}
def get_api_context(self):
return {
'values': [v.get_api_context() for v in self.values],
'excOmitted': self.exc_omitted,
}
def __getitem__(self, key):
return self.values[key]
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def compute_hashes(self, platform):
system_hash = self.get_hash(system_frames=True)
if not system_hash:
return []
app_hash = self.get_hash(system_frames=False)
if system_hash == app_hash or not app_hash:
return [system_hash]
return [system_hash, app_hash]
def get_hash(self, system_frames=True):
# optimize around the fact that some exceptions might have stacktraces
# while others may not and we ALWAYS want stacktraces over values
output = []
for value in self.values:
if not value.stacktrace:
continue
stack_hash = value.stacktrace.get_hash(
system_frames=system_frames,
)
if stack_hash:
output.extend(stack_hash)
output.append(value.type)
if not output:
for value in self.values:
output.extend(value.get_hash())
return output
def get_context(self, event, is_public=False, **kwargs):
newest_first = is_newest_frame_first(event)
system_frames = 0
app_frames = 0
unknown_frames = 0
for exc in self.values:
if not exc.stacktrace:
continue
for frame in exc.stacktrace.frames:
if frame.in_app is False:
system_frames += 1
elif frame.in_app is True:
app_frames += 1
else:
unknown_frames += 1
# TODO(dcramer): this should happen in normalize
# We need to ensure that implicit values for in_app are handled
# appropriately
if unknown_frames and (app_frames or system_frames):
for exc in self.values:
if not exc.stacktrace:
continue
for frame in exc.stacktrace.frames:
if frame.in_app is None:
frame.in_app = bool(system_frames)
if frame.in_app:
app_frames += 1
else:
system_frames += 1
# if there is a mix of frame styles then we indicate that system frames
# are present and should be represented as a split
has_system_frames = app_frames and system_frames
context_kwargs = {
'event': event,
'is_public': is_public,
'newest_first': newest_first,
'has_system_frames': has_system_frames,
}
exceptions = []
last = len(self.values) - 1
for num, e in enumerate(self.values):
context = e.get_context(**context_kwargs)
if e.stacktrace:
context['stacktrace'] = e.stacktrace.get_context(
with_stacktrace=False, **context_kwargs)
else:
context['stacktrace'] = {}
context['stack_id'] = 'exception_%d' % (num,)
context['is_root'] = num == last
exceptions.append(context)
if newest_first:
exceptions.reverse()
if self.exc_omitted:
first_exc_omitted, last_exc_omitted = self.exc_omitted
else:
first_exc_omitted, last_exc_omitted = None, None
return {
'newest_first': newest_first,
'system_frames': system_frames if has_system_frames else 0,
'exceptions': exceptions,
'stacktrace': self.get_stacktrace(event, newest_first=newest_first),
'first_exc_omitted': first_exc_omitted,
'last_exc_omitted': last_exc_omitted,
}
def to_html(self, event, **kwargs):
if not self.values:
return ''
if len(self.values) == 1 and not self.values[0].stacktrace:
exception = self.values[0]
context = exception.get_context(event=event, **kwargs)
return render_to_string('sentry/partial/interfaces/exception.html', context)
context = self.get_context(event=event, **kwargs)
return render_to_string('sentry/partial/interfaces/chained_exception.html', context)
def to_string(self, event, is_public=False, **kwargs):
if not self.values:
return ''
output = []
for exc in self.values:
output.append(u'{0}: {1}\n'.format(exc.type, exc.value))
if exc.stacktrace:
output.append(exc.stacktrace.get_stacktrace(
event, system_frames=False, max_frames=5,
header=False) + '\n\n')
return (''.join(output)).strip()
def get_stacktrace(self, *args, **kwargs):
exc = self.values[0]
if exc.stacktrace:
return exc.stacktrace.get_stacktrace(*args, **kwargs)
return ''
def trim_exceptions(data, max_values=settings.SENTRY_MAX_EXCEPTIONS):
# TODO: this doesnt account for cases where the client has already omitted
# exceptions
values = data['values']
exc_len = len(values)
if exc_len <= max_values:
return
half_max = max_values / 2
data['exc_omitted'] = (half_max, exc_len - half_max)
for n in xrange(half_max, exc_len - half_max):
del values[half_max]
| bsd-3-clause | 4,577,838,370,213,464,000 | 29.226158 | 92 | 0.547913 | false |
agileblaze/OpenStackTwoFactorAuthentication | openstack_dashboard/dashboards/project/data_processing/data_sources/views.py | 18 | 2196 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from horizon import workflows
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing.data_sources. \
tables as ds_tables
import openstack_dashboard.dashboards.project.data_processing.data_sources. \
tabs as _tabs
import openstack_dashboard.dashboards.project.data_processing.data_sources. \
workflows.create as create_flow
LOG = logging.getLogger(__name__)
class DataSourcesView(tables.DataTableView):
table_class = ds_tables.DataSourcesTable
template_name = 'project/data_processing.data_sources/data_sources.html'
page_title = _("Data Sources")
def get_data(self):
try:
data_sources = saharaclient.data_source_list(self.request)
except Exception:
data_sources = []
exceptions.handle(self.request,
_("Unable to fetch data sources."))
return data_sources
class CreateDataSourceView(workflows.WorkflowView):
workflow_class = create_flow.CreateDataSource
success_url = \
"horizon:project:data_processing.data-sources:create-data-source"
classes = ("ajax-modal",)
template_name = "project/data_processing.data_sources/create.html"
page_title = _("Create Data Source")
class DataSourceDetailsView(tabs.TabView):
tab_group_class = _tabs.DataSourceDetailsTabs
template_name = 'project/data_processing.data_sources/details.html'
page_title = _("Data Source Details")
| apache-2.0 | -3,622,193,572,903,820,300 | 34.419355 | 77 | 0.732696 | false |
PHOTOX/fuase | ase/ase/data/tmgmjbp04n.py | 11 | 19755 | # Generated: 2012-01-11
# doi:10.1063/1.1788656 neutral dimers only
from numpy import array
data = {
'Sc': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.0],
'name': 'Sc',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Sc'},
'Ti': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.0],
'name': 'Ti',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Ti'},
'V': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.0],
'name': 'V',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'V'},
'Cr': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [6.0],
'name': 'Cr',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Cr'},
'Mn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [5.0],
'name': 'Mn',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Mn'},
'Fe': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [4.0],
'name': 'Fe',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Fe'},
'Co': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.0],
'name': 'Co',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Co'},
'Ni': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.0],
'name': 'Ni',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Ni'},
'Cu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.0],
'name': 'Cu',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Cu'},
'Zn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.0],
'name': 'Zn',
'positions': array([[ 0., 0., 0.]]),
'symbols': 'Zn'},
#
'Sc2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.0, 2.0],
'name': 'Sc2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.63],
]),
'symbols': 'ScSc'},
'ScTi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.5, 3.5],
'name': 'ScTi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.29],
]),
'symbols': 'ScTi'},
'ScV': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.0, 5.0],
'name': 'ScV',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.51],
]),
'symbols': 'ScV'},
'ScCr': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 2.9],
'name': 'ScCr',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.99],
]),
'symbols': 'ScCr'},
'ScMn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 1.9],
'name': 'ScMn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.94],
]),
'symbols': 'ScMn'},
'ScFe': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 0.9],
'name': 'ScFe',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.85],
]),
'symbols': 'ScFe'},
'ScCo': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.0, 0.0],
'name': 'ScCo',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.80],
]),
'symbols': 'ScCo'},
'ScNi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.5, 0.5],
'name': 'ScNi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.05],
]),
'symbols': 'ScNi'},
'ScCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.9, 0.1],
'name': 'ScCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.54],
]),
'symbols': 'ScCu'},
'ScZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.9, 0.1],
'name': 'ScZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.71],
]),
'symbols': 'ScZn'},
#
'Ti2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.0, 1.0],
'name': 'Ti2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.89],
]),
'symbols': 'TiTi'},
'TiV': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.0, 2.0],
'name': 'TiV',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.78],
]),
'symbols': 'TiV'},
'TiCr': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 1.9],
'name': 'TiCr',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.79],
]),
'symbols': 'TiCr'},
'TiMn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 0.9],
'name': 'TiMn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.76],
]),
'symbols': 'TiMn'},
'TiFe': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.0, 0.0],
'name': 'TiFe',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.67],
]),
'symbols': 'TiFe'},
'TiCo': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.9, 0.1],
'name': 'TiCo',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.88],
]),
'symbols': 'TiCo'},
'TiNi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.9, 0.1],
'name': 'TiNi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.06],
]),
'symbols': 'TiNi'},
'TiCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.9, 0.1],
'name': 'TiCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.45],
]),
'symbols': 'TiCu'},
'TiZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.9, 0.1],
'name': 'TiZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.74],
]),
'symbols': 'TiZn'},
#
'V2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.0, 1.0],
'name': 'V2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.74],
]),
'symbols': 'VV'},
'VCr': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 0.9],
'name': 'VCr',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.72],
]),
'symbols': 'VCr'},
'VMn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.0, 0.0],
'name': 'VMn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.69],
]),
'symbols': 'VMn'},
'VFe': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.9, 0.1],
'name': 'VFe',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.74],
]),
'symbols': 'VFe'},
'VCo': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.9, 0.1],
'name': 'VCo',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.81],
]),
'symbols': 'VCo'},
'VNi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.9, 0.1],
'name': 'VNi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.11],
]),
'symbols': 'VNi'},
'VCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.9, 0.1],
'name': 'VCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.42],
]),
'symbols': 'VCu'},
'VZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [4.9, 0.1],
'name': 'VZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.71],
]),
'symbols': 'VZn'},
#
'Cr2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.0, 0.0],
'name': 'Cr2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.75],
]),
'symbols': 'CrCr'},
'CrMn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 0.9],
'name': 'CrMn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.46],
]),
'symbols': 'CrMn'},
'CrFe': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.1, 1.9],
'name': 'CrFe',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.37],
]),
'symbols': 'CrFe'},
'CrCo': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.9, 0.1],
'name': 'CrCo',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.34],
]),
'symbols': 'CrCo'},
'CrNi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.9, 0.1],
'name': 'CrNi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.25],
]),
'symbols': 'CrNi'},
'CrCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [4.9, 0.1],
'name': 'CrCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.42],
]),
'symbols': 'CrCu'},
'CrZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [5.9, 0.1],
'name': 'CrZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.75],
]),
'symbols': 'CrZn'},
#
'Mn2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [5.0, 5.0],
'name': 'Mn2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.62],
]),
'symbols': 'MnMn'},
'MnFe': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [5.0, 4.0],
'name': 'MnFe',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.42],
]),
'symbols': 'MnFe'},
'MnCo': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [4.0, 2.0],
'name': 'MnCo',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.09],
]),
'symbols': 'MnCo'},
'MnNi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [4.0, 1.0],
'name': 'MnNi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.09],
]),
'symbols': 'MnNi'},
'MnCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [5.0, 1.0],
'name': 'MnCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.37],
]),
'symbols': 'MnCu'},
'MnZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [4.9, 0.1],
'name': 'MnZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.99],
]),
'symbols': 'MnZn'},
#
'Fe2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.0, 3.0],
'name': 'Fe2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.01],
]),
'symbols': 'FeFe'},
'FeCo': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [4.0, 1.0],
'name': 'FeCo',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.96],
]),
'symbols': 'FeCo'},
'FeNi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.0, 1.0],
'name': 'FeNi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.07],
]),
'symbols': 'FeNi'},
'FeCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.9, 0.1],
'name': 'FeCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.31],
]),
'symbols': 'FeCu'},
'FeZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [3.9, 0.1],
'name': 'FeZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.53],
]),
'symbols': 'FeZn'},
#
'Co2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.0, 2.0],
'name': 'Co2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 1.98],
]),
'symbols': 'CoCo'},
'CoNi': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.0, 1.0],
'name': 'CoNi',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.10],
]),
'symbols': 'CoNi'},
'CoCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.9, 0.1],
'name': 'CoCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.26],
]),
'symbols': 'CoCu'},
'CoZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [2.9, 0.1],
'name': 'CoZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.44],
]),
'symbols': 'CoZn'},
#
'Ni2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.0, 1.0],
'name': 'Ni2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.11],
]),
'symbols': 'NiNi'},
'NiCu': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.9, 0.1],
'name': 'NiCu',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.25],
]),
'symbols': 'NiCu'},
'NiZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [1.9, 0.1],
'name': 'NiZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.39],
]),
'symbols': 'NiZn'},
#
'Cu2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.0, 0.0],
'name': 'Cu2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.25],
]),
'symbols': 'CuCu'},
'CuZn': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.5, 0.5],
'name': 'CuZn',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 2.40],
]),
'symbols': 'CuZn'},
#
'Zn2': {'charges': None,
'database': 'TMGMJBP04N',
'magmoms': [0.0, 0.0],
'name': 'Zn2',
'positions': array([
[ 0., 0., 0.],
[ 0., 0., 3.27],
]),
'symbols': 'ZnZn'},
}
| gpl-2.0 | 8,439,975,580,856,800,000 | 33.657895 | 49 | 0.274158 | false |
jupitercoin/jupitercoin-v1.1--diff-update- | qa/rpc-tests/test_framework/scrypt_auxpow.py | 2 | 3043 | #!/usr/bin/env python
# Copyright (c) 2014 Daniel Kraft
# Copyright (c) 2015 The Jupitercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# General code for scrypt auxpow testing. This includes routines to
# solve an auxpow header and to generate auxpow blocks with scrypt.
# extends and modifies auxpow module by Daniel Kraft.
# This module requires a built and installed version of the ltc_scrypt
# package, which can be downloaded from:
# https://pypi.python.org/packages/source/l/ltc_scrypt/ltc_scrypt-1.0.tar.gz
import binascii
import hashlib
import auxpow
import ltc_scrypt
def computeAuxpowWithChainId (block, target, chainid, ok):
"""
Build an auxpow object (serialised as hex string) that solves the
block, for a given chain id.
"""
# Start by building the merge-mining coinbase. The merkle tree
# consists only of the block hash as root.
coinbase = "fabe" + binascii.hexlify ("m" * 2)
coinbase += block
coinbase += "01000000" + ("00" * 4)
# Construct "vector" of transaction inputs.
vin = "01"
vin += ("00" * 32) + ("ff" * 4)
vin += ("%02x" % (len (coinbase) / 2)) + coinbase
vin += ("ff" * 4)
# Build up the full coinbase transaction. It consists only
# of the input and has no outputs.
tx = "01000000" + vin + "00" + ("00" * 4)
txHash = auxpow.doubleHashHex (tx)
# Construct the parent block header. It need not be valid, just good
# enough for auxpow purposes.
header = "0100" + chainid + "00"
header += "00" * 32
header += auxpow.reverseHex (txHash)
header += "00" * 4
header += "00" * 4
header += "00" * 4
# Mine the block.
(header, blockhash) = mineScryptBlock (header, target, ok)
# Build the MerkleTx part of the auxpow.
output = tx
output += blockhash
output += "00"
output += "00" * 4
# Extend to full auxpow.
output += "00"
output += "00" * 4
output += header
return output
# for now, just offer hashes to rpc until it matches the work we need
def mineScryptAux (node, chainid, ok):
"""
Mine an auxpow block on the given RPC connection.
"""
auxblock = node.getauxblock ()
target = auxpow.reverseHex (auxblock['target'])
apow = computeAuxpowWithChainId (auxblock['hash'], target, chainid, ok)
res = node.getauxblock (auxblock['hash'], apow)
def mineScryptBlock (header, target, ok):
"""
Given a block header, update the nonce until it is ok (or not)
for the given target.
"""
data = bytearray (binascii.unhexlify (header))
while True:
assert data[79] < 255
data[79] += 1
hexData = binascii.hexlify (data)
scrypt = getScryptPoW(hexData)
if (ok and scrypt < target) or ((not ok) and scrypt > target):
break
blockhash = auxpow.doubleHashHex (hexData)
return (hexData, blockhash)
def getScryptPoW(hexData):
"""
Actual scrypt pow calculation
"""
data = binascii.unhexlify(hexData)
return auxpow.reverseHex(binascii.hexlify(ltc_scrypt.getPoWHash(data)))
| mit | -7,610,289,672,699,377,000 | 27.980952 | 76 | 0.681564 | false |
zuphilip/ocropy | ocrolib/extras/cairoextras.py | 9 | 2346 | import ctypes
import cairo
# from: http://cairographics.org/freetypepython/
class PycairoContext(ctypes.Structure):
_fields_ = [("PyObject_HEAD", ctypes.c_byte * object.__basicsize__),
("ctx", ctypes.c_void_p),
("base", ctypes.c_void_p)]
_initialized = False
def create_cairo_font_face_for_file(filename, faceindex=0, loadoptions=0):
global _initialized
global _freetype_so
global _cairo_so
global _ft_lib
global _surface
CAIRO_STATUS_SUCCESS = 0
FT_Err_Ok = 0
if not _initialized:
# find shared objects
_freetype_so = ctypes.CDLL("libfreetype.so.6")
_cairo_so = ctypes.CDLL("libcairo.so.2")
# initialize freetype
_ft_lib = ctypes.c_void_p()
if FT_Err_Ok != _freetype_so.FT_Init_FreeType(ctypes.byref(_ft_lib)):
raise OSError("Error initialising FreeType library.")
_surface = cairo.ImageSurface(cairo.FORMAT_A8, 0, 0)
_initialized = True
# create freetype face
ft_face = ctypes.c_void_p()
cairo_ctx = cairo.Context(_surface)
cairo_t = PycairoContext.from_address(id(cairo_ctx)).ctx
_cairo_so.cairo_ft_font_face_create_for_ft_face.restype = ctypes.c_void_p
if FT_Err_Ok != _freetype_so.FT_New_Face(_ft_lib, filename, faceindex, ctypes.byref(ft_face)):
raise Exception("Error creating FreeType font face for " + filename)
# create cairo font face for freetype face
cr_face = _cairo_so.cairo_ft_font_face_create_for_ft_face(ft_face, loadoptions)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_font_face_status(cr_face):
raise Exception("Error creating cairo font face for " + filename)
_cairo_so.cairo_set_font_face(cairo_t, cr_face)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_status(cairo_t):
raise Exception("Error creating cairo font face for " + filename)
face = cairo_ctx.get_font_face()
return face
if __name__ == '__main__':
face = create_cairo_font_face_for_file ("/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 0)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 128, 128)
ctx = cairo.Context(surface)
ctx.set_font_face(face)
ctx.set_font_size(30)
ctx.move_to(0, 44)
ctx.show_text("Hello,")
ctx.move_to(30, 74)
ctx.show_text("world!")
del ctx
surface.write_to_png("hello.png")
| apache-2.0 | -3,221,845,798,708,706,300 | 38.1 | 107 | 0.657289 | false |
bhupennewalkar1337/erpnext | erpnext/config/projects.py | 3 | 1509 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Projects"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Project",
"description": _("Project master."),
},
{
"type": "doctype",
"name": "Task",
"description": _("Project activity / task."),
},
{
"type": "report",
"route": "List/Task/Gantt",
"doctype": "Task",
"name": "Gantt Chart",
"description": _("Gantt chart of all tasks.")
},
]
},
{
"label": _("Time Tracking"),
"items": [
{
"type": "doctype",
"name": "Timesheet",
"description": _("Timesheet for tasks."),
},
{
"type": "doctype",
"name": "Activity Type",
"description": _("Types of activities for Time Logs"),
},
{
"type": "doctype",
"name": "Activity Cost",
"description": _("Cost of various activities"),
},
]
},
{
"label": _("Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Daily Timesheet Summary",
"doctype": "Timesheet"
},
{
"type": "report",
"is_query_report": True,
"name": "Project wise Stock Tracking",
"doctype": "Project"
},
]
},
{
"label": _("Help"),
"icon": "icon-facetime-video",
"items": [
{
"type": "help",
"label": _("Managing Projects"),
"youtube_id": "egxIGwtoKI4"
},
]
},
]
| gpl-3.0 | -3,666,984,021,390,086,000 | 18.346154 | 59 | 0.479125 | false |
flaviovdf/tribeflow | tribeflow/sorting/tests/test_sort.py | 2 | 1369 | #-*- coding: utf8
from __future__ import division, print_function
from tribeflow.sorting.introsort import _sort
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
import numpy as np
def test__sort():
data = np.array([], dtype='d', order='C')
_sort(data)
assert_array_equal([], data)
data = np.array([1], dtype='d', order='C')
_sort(data)
assert_array_equal([1], data)
data = np.array([2, 1], dtype='d', order='C')
_sort(data)
assert_array_equal([1, 2], data)
data = np.array([2, 2, 0, 1], dtype='d', order='C')
_sort(data)
assert_array_equal([0, 1, 2, 2], data)
data = np.array([0, 2, 2, 1], dtype='d', order='C')
_sort(data)
assert_array_equal([0, 1, 2, 2], data)
data = np.array([2, 0, 1, 2], dtype='d', order='C')
_sort(data)
assert_array_equal([0, 1, 2, 2], data)
correct = np.arange(100)
data = np.asarray(correct, dtype='d', order='C')
_sort(data)
assert_array_equal(correct, data)
correct = np.arange(100)
data = np.asarray(correct.copy()[::-1], dtype='d', order='C')
_sort(data)
assert_array_equal(correct, data)
correct = np.arange(100)
data = np.asarray(correct.copy(), dtype='d', order='C')
np.random.shuffle(data)
_sort(data)
assert_array_equal(correct, data)
| bsd-3-clause | 2,400,989,831,708,676,600 | 26.38 | 65 | 0.585829 | false |
adeshmukh/learntosolveit | languages/python/algorithm_spelling.py | 7 | 1031 | import re, collections
def words(text): return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
NWORDS = train(words(file('big.txt').read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
| bsd-3-clause | -3,281,640,367,960,298,000 | 33.366667 | 85 | 0.636275 | false |
arruda/mediana_aa | tests/test_select_mediana.py | 1 | 1189 | # -*- coding: utf-8 -*-
import unittest
from mediana import select_mediana
class TestSelectMediana(unittest.TestCase):
def test_media_em_exemplo_5_elementos(self):
s = [1, 2, 4, 10, 13]
k = (len(s) + 1) / 2 #: 2
mediana = select_mediana(s, k)
self.assertEquals(mediana, 4)
def test_media_em_exemplo_5_elementos_media(self):
s = [1, 3, 5, 7, 9]
k = (len(s) + 1) / 2 #: 3
mediana = select_mediana(s, k)
self.assertEquals(mediana, 5)
def test_media_em_exemplo_6_elementos(self):
# numa par ele pega o anterior ao que seria do meio
s = [1, 2, 8, 9, 4, 10]
k = (len(s) + 1) / 2 #: 2
mediana = select_mediana(s, k)
self.assertEquals(mediana, 4)
def test_media_em_exemplo_7_elementos(self):
s = [15, 1, 3, 5, 7, 9, 10]
k = (len(s) + 1) / 2 #: 2
mediana = select_mediana(s, k)
self.assertEquals(mediana, 7)
def test_media_em_exemplo_livro_11_elementos(self):
s = [2, 36, 21, 5, 8, 13, 11, 20, 5, 4, 1]
k = (len(s) + 1) / 2 #: 6
mediana = select_mediana(s, k)
self.assertEquals(mediana, 8)
| mit | 1,922,591,476,289,863,200 | 25.422222 | 59 | 0.534903 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/GRDO/Tools/i386/analyzeMFT.py | 1 | 32632 | #!/usr/bin/env python
noGUI = True
unicodeHack = True
import struct, sys, ctypes, re, time, unicodedata, csv, binascii, os, platform
from datetime import date, datetime, timedelta
from optparse import OptionParser
# Globals
SIAttributeSizeXP = 72
SIAttributeSizeNT = 48
if noGUI == False:
if platform.system() == "Windows":
import win32gui
from Tkinter import *
import Tkinter as tk
import tkCommonDialog
import tkFileDialog
class WindowsTime:
"Convert the Windows time in 100 nanosecond intervals since Jan 1, 1601 to time in seconds since Jan 1, 1970"
def __init__(self, low, high):
self.low = long(low)
self.high = long(high)
if (low == 0) and (high == 0):
self.dt = 0
self.dtstr = "Not defined"
self.unixtime = 0
return
# Windows NT time is specified as the number of 100 nanosecond intervals since January 1, 1601.
# UNIX time is specified as the number of seconds since January 1, 1970.
# There are 134,774 days (or 11,644,473,600 seconds) between these dates.
self.unixtime = self.GetUnixTime()
try:
self.dt = datetime.fromtimestamp(self.unixtime)
# Pass isoformat a delimiter if you don't like the default "T".
self.dtstr = self.dt.isoformat(' ')
except:
self.dt = 0
self.dtstr = "Invalid timestamp"
self.unixtime = 0
def GetUnixTime(self):
t=float(self.high)*2**32 + self.low
return (t*1e-7 - 11644473600)
def decodeMFTmagic(s):
if s == 0x454c4946:
return "Good"
elif s == 0x44414142:
return 'Bad'
elif s == 0x00000000:
return 'Zero'
else:
return 'Unknown'
# decodeMFTisactive and decodeMFTrecordtype both look at the flags field in the MFT header.
# The first bit indicates if the record is active or inactive. The second bit indicates if it
# is a file or a folder.
#
# I had this coded incorrectly initially. Spencer Lynch identified and fixed the code. Many thanks!
def decodeMFTisactive(s):
if s & 0x0001:
return 'Active'
else:
return 'Inactive'
def decodeMFTrecordtype(s):
tmpBuffer = s
if s & 0x0002:
tmpBuffer = 'Folder'
else:
tmpBuffer = 'File'
if s & 0x0004:
tmpBuffer = "%s %s" % (tmpBuffer, '+ Unknown1')
if s & 0x0008:
tmpBuffer = "%s %s" % (tmpBuffer, '+ Unknown2')
return tmpBuffer
def addNote(s):
if 'notes' in MFTR:
# MFTR['notes'] = "%s | %s |" % (MFTR['notes'], s)
MFTR['notes'] = "%s | %s |" % (MFTR['notes'], s)
else:
MFTR['notes'] = "%s" % s
def decodeMFTHeader(s):
d = {}
d['magic'] = struct.unpack("<I", s[:4])[0]
d['upd_off'] = struct.unpack("<H",s[4:6])[0]
d['upd_cnt'] = struct.unpack("<H",s[6:8])[0]
d['lsn'] = struct.unpack("<d",s[8:16])[0]
d['seq'] = struct.unpack("<H",s[16:18])[0]
d['link'] = struct.unpack("<H",s[18:20])[0]
d['attr_off'] = struct.unpack("<H",s[20:22])[0]
d['flags'] = struct.unpack("<H", s[22:24])[0]
d['size'] = struct.unpack("<I",s[24:28])[0]
d['alloc_sizef'] = struct.unpack("<I",s[28:32])[0]
d['base_ref'] = struct.unpack("<Lxx",s[32:38])[0]
d['base_seq'] = struct.unpack("<H",s[38:40])[0]
d['next_attrid'] = struct.unpack("<H",s[40:42])[0]
d['f1'] = s[42:44]
d['entry'] = s[44:48]
d['fncnt'] = 0 # Counter for number of FN attributes
d['si'] = -1
d['file_size'] = 0
return d
def decodeATRHeader(s):
d = {}
d['type'] = struct.unpack("<L",s[:4])[0]
if d['type'] == 0xffffffff:
return d
d['len'] = struct.unpack("<L",s[4:8])[0]
d['res'] = struct.unpack("B",s[8])[0]
d['nlen'] = struct.unpack("B",s[9])[0] # This name is the name of the ADS, I think.
d['name_off'] = struct.unpack("<H",s[10:12])[0]
d['flags'] = struct.unpack("<H",s[12:14])[0]
d['id'] = struct.unpack("<H",s[14:16])[0]
if d['res'] == 0:
d['ssize'] = struct.unpack("<L",s[16:20])[0]
d['soff'] = struct.unpack("<H",s[20:22])[0]
d['idxflag'] = struct.unpack("<H",s[22:24])[0]
else:
d['start_vcn'] = struct.unpack("<d",s[16:24])[0]
d['last_vcn'] = struct.unpack("<d",s[24:32])[0]
d['run_off'] = struct.unpack("<H",s[32:34])[0]
d['compusize'] = struct.unpack("<H",s[34:36])[0]
d['f1'] = struct.unpack("<I",s[36:40])[0]
d['alen'] = struct.unpack("<d",s[40:48])[0]
d['ssize'] = struct.unpack("<d",s[48:56])[0]
d['initsize'] = struct.unpack("<d",s[56:64])[0]
return d
def decodeSIAttribute(s):
d = {}
d['crtime'] = WindowsTime(struct.unpack("<L",s[:4])[0],struct.unpack("<L",s[4:8])[0])
d['mtime'] = WindowsTime(struct.unpack("<L",s[8:12])[0],struct.unpack("<L",s[12:16])[0])
d['ctime'] = WindowsTime(struct.unpack("<L",s[16:20])[0],struct.unpack("<L",s[20:24])[0])
d['atime'] = WindowsTime(struct.unpack("<L",s[24:28])[0],struct.unpack("<L",s[28:32])[0])
d['dos'] = struct.unpack("<I",s[32:36])[0] # 4
d['maxver'] = struct.unpack("<I",s[36:40])[0] # 4
d['ver'] = struct.unpack("<I",s[40:44])[0] # 4
d['class_id'] = struct.unpack("<I",s[44:48])[0] # 4
d['own_id'] = struct.unpack("<I",s[48:52])[0] # 4
d['sec_id'] = struct.unpack("<I",s[52:56])[0] # 4
d['quota'] = struct.unpack("<d",s[56:64])[0] # 8
d['usn'] = struct.unpack("<d",s[64:72])[0] # 8 - end of date to here is 40
return d
def decodeFNAttribute(s):
hexFlag = False
# File name attributes can have null dates.
d = {}
d['par_ref'] = struct.unpack("<Lxx", s[:6])[0] # Parent reference nummber
d['par_seq'] = struct.unpack("<H",s[6:8])[0] # Parent sequence number
d['crtime'] = WindowsTime(struct.unpack("<L",s[8:12])[0],struct.unpack("<L",s[12:16])[0])
d['mtime'] = WindowsTime(struct.unpack("<L",s[16:20])[0],struct.unpack("<L",s[20:24])[0])
d['ctime'] = WindowsTime(struct.unpack("<L",s[24:28])[0],struct.unpack("<L",s[28:32])[0])
d['atime'] = WindowsTime(struct.unpack("<L",s[32:36])[0],struct.unpack("<L",s[36:40])[0])
d['alloc_fsize'] = struct.unpack("<q",s[40:48])[0]
d['real_fsize'] = struct.unpack("<q",s[48:56])[0]
d['flags'] = struct.unpack("<d",s[56:64])[0] # 0x01=NTFS, 0x02=DOS
d['nlen'] = struct.unpack("B",s[64])[0]
d['nspace'] = struct.unpack("B",s[65])[0]
# The $MFT string is stored as \x24\x00\x4D\x00\x46\x00\x54. Ie, the first character is a single
# byte and the remaining characters are two bytes with the first byte a null.
# Note: Actually, it can be stored in several ways and the nspace field tells me which way.
#
# I found the following:
#
# NTFS allows any sequence of 16-bit values for name encoding (file names, stream names, index names,
# etc.). This means UTF-16 codepoints are supported, but the file system does not check whether a
# sequence is valid UTF-16 (it allows any sequence of short values, not restricted to those in the
# Unicode standard).
#
# If true, lovely. But that would explain what I am seeing.
#
# I just ran across an example of "any sequence of ..." - filenames with backspaces and newlines
# in them. Thus, the "isalpha" check. I really need to figure out how to handle Unicode better.
if (unicodeHack):
d['name'] = ''
for i in range(66, 66 + d['nlen']*2):
if s[i] != '\x00': # Just skip over nulls
if s[i] > '\x1F' and s[i] < '\x80': # If it is printable, add it to the string
d['name'] = d['name'] + s[i]
else:
d['name'] = "%s0x%02s" % (d['name'], s[i].encode("hex"))
hexFlag = True
# This statement produces a valid unicode string, I just cannot get it to print correctly
# so I'm temporarily hacking it with the if (unicodeHack) above.
else:
d['name'] = s[66:66+d['nlen']*2]
# This didn't work
# d['name'] = struct.pack("\u
# for i in range(0, d['nlen']*2, 2):
# d['name']=d['name'] + struct.unpack("<H",s[66+i:66+i+1])
# What follows is ugly. I'm trying to deal with the filename in Unicode and not doing well.
# This solution works, though it is printing nulls between the characters. It'll do for now.
# d['name'] = struct.unpack("<%dH" % (int(d['nlen'])*2),s[66:66+(d['nlen']*2)])
# d['name'] = s[66:66+(d['nlen']*2)]
# d['decname'] = unicodedata.normalize('NFKD', d['name']).encode('ASCII','ignore')
# d['decname'] = unicode(d['name'],'iso-8859-1','ignore')
if hexFlag:
addNote('Filename - chars converted to hex')
return d
def decodeAttributeList(s):
hexFlag = False
d = {}
d['type'] = struct.unpack("<I",s[:4])[0] # 4
d['len'] = struct.unpack("<H",s[4:6])[0] # 2
d['nlen'] = struct.unpack("B",s[6])[0] # 1
d['f1'] = struct.unpack("B",s[7])[0] # 1
d['start_vcn'] = struct.unpack("<d",s[8:16])[0] # 8
d['file_ref'] = struct.unpack("<Lxx",s[16:22])[0] # 6
d['seq'] = struct.unpack("<H",s[22:24])[0] # 2
d['id'] = struct.unpack("<H",s[24:26])[0] # 4
if (unicodeHack):
d['name'] = ''
for i in range(26, 26 + d['nlen']*2):
if s[i] != '\x00': # Just skip over nulls
if s[i] > '\x1F' and s[i] < '\x80': # If it is printable, add it to the string
d['name'] = d['name'] + s[i]
else:
d['name'] = "%s0x%02s" % (d['name'], s[i].encode("hex"))
hexFlag = True
else:
d['name'] = s[26:26+d['nlen']*2]
if hexFlag:
addNote('Filename - chars converted to hex')
return d
def decodeVolumeInfo(s):
d = {}
d['f1'] = struct.unpack("<d",s[:8])[0] # 8
d['maj_ver'] = struct.unpack("B",s[8])[0] # 1
d['min_ver'] = struct.unpack("B",s[9])[0] # 1
d['flags'] = struct.unpack("<H",s[10:12])[0] # 2
d['f2'] = struct.unpack("<I",s[12:16])[0] # 4
if (options.debug):
print "+Volume Info"
print "++F1%d" % d['f1']
print "++Major Version: %d" % d['maj_ver']
print "++Minor Version: %d" % d['min_ver']
print "++Flags: %d" % d['flags']
print "++F2: %d" % d['f2']
return d
class ObjectID:
def __init__(self, s):
self.objid = s
if s == 0:
self.objstr = 'Undefined'
else:
self.objstr = self.FmtObjectID()
def FmtObjectID(self):
string = "%s-%s-%s-%s-%s" % (binascii.hexlify(self.objid[0:4]),binascii.hexlify(self.objid[4:6]),
binascii.hexlify(self.objid[6:8]),binascii.hexlify(self.objid[8:10]),binascii.hexlify(self.objid[10:16]))
return string
def decodeObjectID(s):
d = {}
d['objid'] = ObjectID(s[0:16])
d['orig_volid'] = ObjectID(s[16:32])
d['orig_objid'] = ObjectID(s[32:48])
d['orig_domid'] = ObjectID(s[48:64])
return d
def getfilepath(files,recordNumber):
if not(files.has_key(recordNumber)):
return -1
else:
if(-1 == files[recordNumber]):
return "No FN Record"
outstring = "GETFILEPATH" + "_" + str(recordNumber) + "_" + str(files[recordNumber]['par_ref'])
workingrecordnumber = recordNumber
path = files[recordNumber]['name']
if((files[recordNumber]['fileflags'] & 0x2)):
path += '\\'
if(not(files[recordNumber]['fileflags'] & 0x1)):
path += '(deleted)'
i=0
while (files[workingrecordnumber]['par_ref']<>5 and i<30):
workingrecordnumber = files[workingrecordnumber]['par_ref']
if(workingrecordnumber>len(files)):
return "Invalid parent___" + "\\" + path
if(files.has_key(workingrecordnumber)):
if(files[workingrecordnumber]<>-1):
path = files[workingrecordnumber]['name'] + "\\" + path
else:
return "PATHERROR_" + str(workingrecordnumber) + "\\" + path
else:
return "PATHERROR2" + "\\" + path
i+=1
return path
def anomalyDetect(files,recordNumber):
# Check for STD create times that are before the FN create times
if MFTR['fncnt'] > 0:
try:
if (MFTR['fn', 0]['crtime'].dt == 0) or (MFTR['si']['crtime'].dt < MFTR['fn', 0]['crtime'].dt):
MFTR['stf-fn-shift'] = True
if MFTR['fn', 0]['crtime'].dt > searchdate:
print getfilepath(files,recordNumber)
except:
MFTR['stf-fn-shift'] = True
# Check for STD create times with a nanosecond value of '0'
if MFTR['fn',0]['crtime'].dt != 0:
if MFTR['fn',0]['crtime'].dt.microsecond == 0:
MFTR['usec-zero'] = True
if MFTR['fn',0]['crtime'].dt != 0:
if MFTR['fn',0]['crtime'].dt == MFTR['fn',0]['mtime'].dt:
MFTR['FN_cr_mod_match'] = True
def writeCSVFile():
mftBuffer = ''
tmpBuffer = ''
filenameBuffer = ''
if recordNumber == -1:
# Write headers
csvOutFile.writerow(['Record Number', 'Good', 'Active', 'Record type',
'$Logfile Seq. Num.',
'Sequence Number', 'Parent File Rec. #', 'Parent File Rec. Seq. #','Size',
'Filename #1', 'Std Info Creation date', 'Std Info Modification date',
'Std Info Access date', 'Std Info Entry date', 'FN Info Creation date',
'FN Info Modification date','FN Info Access date', 'FN Info Entry date',
'Object ID', 'Birth Volume ID', 'Birth Object ID', 'Birth Domain ID',
'Filename #2', 'FN Info Creation date', 'FN Info Modify date',
'FN Info Access date', 'FN Info Entry date', 'Filename #3', 'FN Info Creation date',
'FN Info Modify date', 'FN Info Access date', 'FN Info Entry date', 'Filename #4',
'FN Info Creation date', 'FN Info Modify date', 'FN Info Access date',
'FN Info Entry date', 'Standard Information', 'Attribute List', 'Filename',
'Object ID', 'Volume Name', 'Volume Info', 'Data', 'Sec_Desc', 'Index Root',
'Index Allocation', 'Bitmap', 'Reparse Point', 'EA Information', 'EA',
'Property Set', 'Logged Utility Stream', 'Log/Notes', 'STF FN Shift', 'uSec Zero', 'uniq_st_entry'])
elif 'baad' in MFTR:
csvOutFile.writerow(["%s" % recordNumber,"BAAD MFT Record"])
else:
mftBuffer = [recordNumber, decodeMFTmagic(MFTR['magic']), decodeMFTisactive(MFTR['flags']),
decodeMFTrecordtype(int(MFTR['flags']))]
tmpBuffer = ["%d" % MFTR['seq']]
mftBuffer.extend(tmpBuffer)
if MFTR['fncnt'] > 0:
mftBuffer.extend([str(MFTR['fn',0]['par_ref']), str(MFTR['fn',0]['par_seq'])])
else:
mftBuffer.extend(['NoParent', 'NoParent'])
mftBuffer.extend([MFTR['file_size']])
if MFTR['fncnt'] > 0:
filenameBuffer = [FNrecord['name'], "'"+str(SIrecord['crtime'].dtstr),
"'"+SIrecord['mtime'].dtstr, "'"+SIrecord['atime'].dtstr, "'"+SIrecord['ctime'].dtstr,
"'"+MFTR['fn',0]['crtime'].dtstr, "'"+MFTR['fn',0]['mtime'].dtstr,
"'"+MFTR['fn',0]['atime'].dtstr, "'"+MFTR['fn',0]['ctime'].dtstr]
elif 'si' in MFTR:
# Should replace SIrecord with MFTR['si']
filenameBuffer = ['NoFNRecord', "'"+str(SIrecord['crtime'].dtstr),
"'"+SIrecord['mtime'].dtstr, "'"+SIrecord['atime'].dtstr, "'"+SIrecord['ctime'].dtstr,
'NoFNRecord', 'NoFNRecord', 'NoFNRecord','NoFNRecord']
else:
filenameBuffer = ['NoFNRecord', 'NoSIRecord',
'NoSIRecord', 'NoSIRecord', 'NoSIRecord',
'NoFNRecord', 'NoFNRecord', 'NoFNRecord','NoFNRecord']
mftBuffer.extend(filenameBuffer)
if 'objid' in MFTR:
objidBuffer = [MFTR['objid']['objid'].objstr, MFTR['objid']['orig_volid'].objstr,
MFTR['objid']['orig_objid'].objstr, MFTR['objid']['orig_domid'].objstr]
else:
objidBuffer = ['','','','']
mftBuffer.extend(objidBuffer)
# If this goes above four FN attributes, the number of columns will exceed the headers
for i in range(1, MFTR['fncnt']):
filenameBuffer = [MFTR['fn',i]['name'], "'"+MFTR['fn',i]['crtime'].dtstr, "'"+MFTR['fn',i]['mtime'].dtstr,
"'"+MFTR['fn',i]['atime'].dtstr, "'"+MFTR['fn',i]['ctime'].dtstr]
mftBuffer.extend(filenameBuffer)
filenameBuffer = ''
# Pad out the remaining FN columns
if MFTR['fncnt'] < 2:
tmpBuffer = ['','','','','','','','','','','','','','','']
elif MFTR['fncnt'] == 2:
tmpBuffer = ['','','','','','','','','','']
elif MFTR['fncnt'] == 3:
tmpBuffer = ['','','','','']
mftBuffer.extend(tmpBuffer)
# One darned big if statement, alas.
mftBuffer.append('True') if 'si' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'al' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if MFTR['fncnt'] > 0 else mftBuffer.append('False')
mftBuffer.append('True') if 'objid' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'volname' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'volinfo' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'data' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'sd' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'indexroot' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'indexallocation' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'bitmap' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'reparse' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'eainfo' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'ea' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'propertyset' in MFTR else mftBuffer.append('False')
mftBuffer.append('True') if 'loggedutility' in MFTR else mftBuffer.append('False')
if 'notes' in MFTR: # Log of abnormal activity related to this record
mftBuffer.append(MFTR['notes'])
else:
mftBuffer.append('None')
if 'stf-fn-shift' in MFTR:
mftBuffer.append('Y')
else:
mftBuffer.append('N')
if 'usec-zero' in MFTR:
mftBuffer.append('Y')
else:
mftBuffer.append('N')
if 'uniq_st_entry' in MFTR:
mftBuffer.append(MFTR['uniq_st_entry'])
else:
mftBuffer.append('N')
if 'FN_cr_mod_match' in MFTR:
mftBuffer.append('Y')
else:
mftBuffer.append('N')
csvOutFile.writerow(mftBuffer)
# Get command line options
parser = OptionParser()
parser.set_defaults(debug=False,UseLocalTimezone=False,UseGUI=False)
parser.add_option("-f", "--file", dest="filename",
help="read MFT from FILE", metavar="FILE")
parser.add_option("-o", "--output", dest="output",
help="write results to FILE", metavar="FILE")
parser.add_option("-t", "--time", dest="time",
help="time to search back", metavar="FILE")
parser.add_option("-a", "--anomaly",
action="store_true", dest="anomaly",
help="turn on anomaly detection")
parser.add_option("-m", "--mactimes", dest="mactimes",
help="write sorted MAC times to file", metavar="FILE")
if noGUI == False:
parser.add_option("-g", "--gui",
action="store_true", dest="UseGUI",
help="Use GUI for file selection")
parser.add_option("-d", "--debug",
action="store_true", dest="debug",
help="turn on debugging output")
(options, args) = parser.parse_args()
# Start reading file
if (options.time):
searchdate = datetime.now()-timedelta(days=int(options.time))
else:
searchdate = datetime.now()-timedelta(days=7)
if (options.UseGUI):
# Hide root tK window
root = tk.Tk()
root.withdraw()
options.filename = tkFileDialog.askopenfilename(title='MFT file to open',filetypes=[("all files", "*")])
options.output = tkFileDialog.asksaveasfilename(title='Output file')
if options.mactimes != None:
options.mactimes = tkFileDialog.asksaveasfilename(title='mactimes file')
else:
if options.filename == None:
print "-f <filename> required."
sys.exit()
if options.output == None:
print "-o <filename> required."
sys.exit()
try:
F = open(options.filename, 'rb')
except:
print "Unable to open file: %s" % options.filename
sys.exit()
try:
outFile = open(options.output, 'wb')
csvOutFile = csv.writer(outFile, dialect=csv.excel,quoting=1)
except (IOError, TypeError):
print "Unable to open file: %s" % options.output
sys.exit()
if options.mactimes != None:
try:
mactimesfile = open(options.mactimes, 'w')
except:
print "Unable to open file: %s" % options.mactimes
sys.exit()
# Write the headers to the output file
recordNumber = -1
MFTR = -1
writeCSVFile()
recordNumber = 0
print "Processing MFT data and writing csv data"
file_std_ctimes = {}
files = {}
file_times = {}
recordnum = 0
record = F.read(1024)
while record != "":
MFTR = decodeMFTHeader(record);
FNrecord = -1
if options.debug: print '-->Record number: %d\n\tMagic: %s Attribute offset: %d Flags: %s Size:%d' % (recordNumber, MFTR['magic'], MFTR['attr_off'], hex(int(MFTR['flags'])), MFTR['size'])
if MFTR['magic'] == 0x44414142:
if options.debug: print "BAAD MFT Record " + str(recordNumber)
MFTR['baad'] = True
else:
ReadPtr = MFTR['attr_off']
while (ReadPtr < 1024):
ATRrecord = decodeATRHeader(record[ReadPtr:])
if ATRrecord['type'] == 0xffffffff: # End of attributes
break
if ATRrecord['type'] == 0x10: # Standard Information
if options.debug: print "Stardard Information:\n++Type: %s Length: %d Resident: %s Name Len:%d Name Offset: %d" % (hex(int(ATRrecord['type'])),ATRrecord['len'],ATRrecord['res'],ATRrecord['nlen'],ATRrecord['name_off'])
SIrecord = decodeSIAttribute(record[ReadPtr+ATRrecord['soff']:])
MFTR['si'] = SIrecord
if (file_std_ctimes.has_key(SIrecord['ctime'].dtstr)):
file_std_ctimes[SIrecord['ctime'].dtstr] += 1
else:
file_std_ctimes[SIrecord['ctime'].dtstr] = 1
if options.debug: print "++CRTime: %s\n++MTime: %s\n++ATime: %s\n++EntryTime: %s" % (SIrecord['crtime'].dtstr, SIrecord['mtime'].dtstr, SIrecord['atime'].dtstr, SIrecord['ctime'].dtstr)
elif ATRrecord['type'] == 0x20: # Attribute list
if options.debug: print "Attribute list"
if ATRrecord['res'] == 0:
ALrecord = decodeAttributeList(record[ReadPtr+ATRrecord['soff']:])
MFTR['al'] = ALrecord
if options.debug: print "Name: %s"% (ALrecord['name'])
else:
if options.debug: print "Non-resident Attribute List?"
MFTR['al'] = None
elif ATRrecord['type'] == 0x30: # File name
if options.debug: print "File name record"
FNrecord = decodeFNAttribute(record[ReadPtr+ATRrecord['soff']:])
if(MFTR['file_size']==0):
MFTR['file_size']=FNrecord['real_fsize']
MFTR['fn',MFTR['fncnt']] = FNrecord
MFTR['fncnt'] = MFTR['fncnt'] + 1
if options.debug: print "Name: %s" % (FNrecord['name'])
if FNrecord['crtime'] != 0:
if options.debug: print "\tCRTime: %s MTime: %s ATime: %s EntryTime: %s" % (FNrecord['crtime'].dtstr, FNrecord['mtime'].dtstr, FNrecord['atime'].dtstr, FNrecord['ctime'].dtstr)
elif ATRrecord['type'] == 0x40: #Object ID
ObjectIDRecord = decodeObjectID(record[ReadPtr+ATRrecord['soff']:])
MFTR['objid'] = ObjectIDRecord
if options.debug: print "Object ID"
elif ATRrecord['type'] == 0x50: # Security descriptor
MFTR['sd'] = True
if options.debug: print "Security descriptor"
elif ATRrecord['type'] == 0x60: # Volume name
MFTR['volname'] = True
if options.debug: print "Volume name"
elif ATRrecord['type'] == 0x70: # Volume information
if options.debug: print "Volume info attribute"
VolumeInfoRecord = decodeVolumeInfo(record[ReadPtr+ATRrecord['soff']:])
MFTR['volinfo'] = VolumeInfoRecord
elif ATRrecord['type'] == 0x80: # Data
MFTR['data'] = True
if options.debug: print "Data attribute"
elif ATRrecord['type'] == 0x90: # Index root
MFTR['indexroot'] = True
if options.debug: print "Index root"
elif ATRrecord['type'] == 0xA0: # Index allocation
MFTR['indexallocation'] = True
if options.debug: print "Index allocation"
elif ATRrecord['type'] == 0xB0: # Bitmap
MFTR['bitmap'] = True
if options.debug: print "Bitmap"
elif ATRrecord['type'] == 0xC0: # Reparse point
MFTR['reparsepoint'] = True
if options.debug: print "Reparse point"
elif ATRrecord['type'] == 0xD0: # EA Information
MFTR['eainfo'] = True
if options.debug: print "EA Information"
elif ATRrecord['type'] == 0xE0: # EA
MFTR['ea'] = True
if options.debug: print "EA"
elif ATRrecord['type'] == 0xF0: # Property set
MFTR['propertyset'] = True
if options.debug: print "Property set"
elif ATRrecord['type'] == 0x100: # Logged utility stream
MFTR['loggedutility'] = True
if options.debug: print "Logged utility stream"
else:
if options.debug: print "Found an unknown attribute"
if ATRrecord['len'] > 0:
ReadPtr = ReadPtr + ATRrecord['len']
else:
if options.debug: print "ATRrecord->len < 0, exiting loop"
break
if(FNrecord<>-1):
FNrecord['fileflags'] = MFTR['flags']
files[recordNumber]=FNrecord
record = F.read(1024)
writeCSVFile()
recordNumber = recordNumber + 1
if(recordNumber % 100000 == 0):
print "processing recordNumber - " + str(recordNumber)
print "Starting run 2 for anamoly detection and MAC times"
F.seek(0)
recordNumber = 0;
# 1024 is valid for current version of Windows but should really get this value from somewhere
record = F.read(1024)
while record != "":
MFTR = decodeMFTHeader(record);
MFTR['file_size'] = 0
if options.debug: print '-->Record number: %d\n\tMagic: %s Attribute offset: %d Flags: %s Size:%d' %(recordNumber, MFTR['magic'], MFTR['attr_off'], hex(int(MFTR['flags'])), MFTR['size'])
if MFTR['magic'] == 0x44414142:
if options.debug: print "BAAD MFT Record"
MFTR['baad'] = True
else:
ReadPtr = MFTR['attr_off']
while (ReadPtr < 1024):
ATRrecord = decodeATRHeader(record[ReadPtr:])
if ATRrecord['type'] == 0xffffffff: # End of attributes
break
if options.debug: print "Attribute type: %x Length: %d Res: %x" % (ATRrecord['type'], ATRrecord['len'], ATRrecord['res'])
if ATRrecord['type'] == 0x10: # Standard Information
if options.debug: print "Stardard Information:\n++Type: %s Length: %d Resident: %s Name Len:%d Name Offset: %d" % (hex(int(ATRrecord['type'])),ATRrecord['len'],ATRrecord['res'],ATRrecord['nlen'],ATRrecord['name_off'])
SIrecord = decodeSIAttribute(record[ReadPtr+ATRrecord['soff']:])
MFTR['si'] = SIrecord
if options.debug: print "++CRTime: %s\n++MTime: %s\n++ATime: %s\n++EntryTime: %s" % (SIrecord['crtime'].dtstr, SIrecord['mtime'].dtstr, SIrecord['atime'].dtstr, SIrecord['ctime'].dtstr)
elif ATRrecord['type'] == 0x20: # Attribute list
if options.debug: print "Attribute list"
if ATRrecord['res'] == 0:
ALrecord = decodeAttributeList(record[ReadPtr+ATRrecord['soff']:])
MFTR['al'] = ALrecord
if options.debug: print "Name: %s"% (ALrecord['name'])
else:
if options.debug: print "Non-resident Attribute List?"
MFTR['al'] = None
elif ATRrecord['type'] == 0x30: # File name
if options.debug: print "File name record"
FNrecord = decodeFNAttribute(record[ReadPtr+ATRrecord['soff']:])
if(MFTR['file_size']==0):
MFTR['file_size']=FNrecord['real_fsize']
MFTR['fn',MFTR['fncnt']] = FNrecord
MFTR['fncnt'] = MFTR['fncnt'] + 1
if options.debug: print "Name: %s" % (FNrecord['name'])
if FNrecord['crtime'] != 0:
if options.debug: print "\tCRTime: %s MTime: %s ATime: %s EntryTime: %s" % (FNrecord['crtime'].dtstr, FNrecord['mtime'].dtstr, FNrecord['atime'].dtstr, FNrecord['ctime'].dtstr)
elif ATRrecord['type'] == 0x40: #Object ID
ObjectIDRecord = decodeObjectID(record[ReadPtr+ATRrecord['soff']:])
MFTR['objid'] = ObjectIDRecord
if options.debug: print "Object ID"
elif ATRrecord['type'] == 0x50: # Security descriptor
MFTR['sd'] = True
if options.debug: print "Security descriptor"
elif ATRrecord['type'] == 0x60: # Volume name
MFTR['volname'] = True
if options.debug: print "Volume name"
elif ATRrecord['type'] == 0x70: # Volume information
if options.debug: print "Volume info attribute"
VolumeInfoRecord = decodeVolumeInfo(record[ReadPtr+ATRrecord['soff']:])
MFTR['volinfo'] = VolumeInfoRecord
elif ATRrecord['type'] == 0x80: # Data
MFTR['data'] = True
if options.debug: print "Data attribute"
elif ATRrecord['type'] == 0x90: # Index root
MFTR['indexroot'] = True
if options.debug: print "Index root"
elif ATRrecord['type'] == 0xA0: # Index allocation
MFTR['indexallocation'] = True
if options.debug: print "Index allocation"
elif ATRrecord['type'] == 0xB0: # Bitmap
MFTR['bitmap'] = True
if options.debug: print "Bitmap"
elif ATRrecord['type'] == 0xC0: # Reparse point
MFTR['reparsepoint'] = True
if options.debug: print "Reparse point"
elif ATRrecord['type'] == 0xD0: # EA Information
MFTR['eainfo'] = True
if options.debug: print "EA Information"
elif ATRrecord['type'] == 0xE0: # EA
MFTR['ea'] = True
if options.debug: print "EA"
elif ATRrecord['type'] == 0xF0: # Property set
MFTR['propertyset'] = True
if options.debug: print "Property set"
elif ATRrecord['type'] == 0x100: # Logged utility stream
MFTR['loggedutility'] = True
if options.debug: print "Logged utility stream"
else:
if options.debug: print "Found an unknown attribute"
if ATRrecord['len'] > 0:
ReadPtr = ReadPtr + ATRrecord['len']
else:
if options.debug: print "ATRrecord->len < 0, exiting loop"
break
record = F.read(1024)
if(MFTR<>-1):
filename_with_path = getfilepath(files,recordNumber)
if(filename_with_path<>-1):
entry = [filename_with_path,str(MFTR['file_size'])]
if(MFTR['si']<>-1):
entry.extend([MFTR['si']['mtime'].dtstr,MFTR['si']['atime'].dtstr,MFTR['si']['crtime'].dtstr,MFTR['si']['ctime'].dtstr])
if(MFTR['fncnt']>0):
entry.extend([MFTR['fn',0]['mtime'].dtstr,MFTR['fn',0]['atime'].dtstr,MFTR['fn',0]['crtime'].dtstr,MFTR['fn',0]['ctime'].dtstr])
else:
entry.extend(['','','',''])
entry.extend([recordNumber])
timeIndex = MFTR['si']['mtime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
timeIndex = MFTR['si']['atime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
timeIndex = MFTR['si']['ctime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
timeIndex = MFTR['si']['crtime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
if(MFTR['fncnt']>0):
timeIndex = MFTR['fn',0]['mtime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
timeIndex = MFTR['fn',0]['atime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
timeIndex = MFTR['fn',0]['ctime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
timeIndex = MFTR['fn',0]['crtime'].dtstr
file_times[timeIndex + ' ' + filename_with_path] = [timeIndex]
file_times[timeIndex + ' ' + filename_with_path].extend(entry)
if(recordNumber % 100000 == 0):
print "processesing recordNumber - " + str(recordNumber)
#break;
if options.anomaly and 'baad' not in MFTR:
anomalyDetect(files,recordNumber)
recordNumber = recordNumber + 1
counter = 0
if options.mactimes:
print "Processing MAC times for " + str(len(file_times)) + " entries from " + str(recordNumber) + " files"
mactimesfile.write("filename,size,MACEmace,datetime,MFTRecIndex"+ "\n")
for file_entry in sorted(file_times.keys()):
MAC_string = ""
if(file_times[file_entry][0]==file_times[file_entry][3]):
MAC_string += 'M'
else:
MAC_string += '_'
if(file_times[file_entry][0]==file_times[file_entry][4]):
MAC_string += 'A'
else:
MAC_string += '_'
if(file_times[file_entry][0]==file_times[file_entry][5]):
MAC_string += 'C'
else:
MAC_string += '_'
if(file_times[file_entry][0]==file_times[file_entry][6]):
MAC_string += 'E'
else:
MAC_string += '_'
if(file_times[file_entry][0]==file_times[file_entry][7]):
MAC_string += 'm'
else:
MAC_string += '_'
if(file_times[file_entry][0]==file_times[file_entry][8]):
MAC_string += 'a'
else:
MAC_string += '_'
if(file_times[file_entry][0]==file_times[file_entry][9]):
MAC_string += 'c'
else:
MAC_string += '_'
if(file_times[file_entry][0]==file_times[file_entry][10]):
MAC_string += 'e'
else:
MAC_string += '_'
outline_string = ""
outline_string = file_times[file_entry][1]+ ',' + file_times[file_entry][2] + ',' + MAC_string+ ',\'' + file_times[file_entry][0] + ',' + str(file_times[file_entry][11])
if options.mactimes != None:
mactimesfile.write(outline_string+ "\n")
mactimesfile.close()
outFile.close()
| unlicense | -5,490,572,374,061,385,000 | 31.566866 | 221 | 0.621445 | false |
HybridF5/tempest | tempest/api/compute/admin/test_aggregates.py | 6 | 10648 | # Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class AggregatesAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Aggregates API that require admin privileges"""
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
def setup_clients(cls):
super(AggregatesAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.aggregates_client
@classmethod
def resource_setup(cls):
super(AggregatesAdminTestJSON, cls).resource_setup()
cls.aggregate_name_prefix = 'test_aggregate'
cls.az_name_prefix = 'test_az'
hosts_all = cls.os_adm.hosts_client.list_hosts()['hosts']
hosts = map(lambda x: x['host_name'],
filter(lambda y: y['service'] == 'compute', hosts_all))
cls.host = hosts[0]
def _try_delete_aggregate(self, aggregate_id):
# delete aggregate, if it exists
try:
self.client.delete_aggregate(aggregate_id)
# if aggregate not found, it depict it was deleted in the test
except lib_exc.NotFound:
pass
@test.idempotent_id('0d148aa3-d54c-4317-aa8d-42040a475e20')
def test_aggregate_create_delete(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertIsNone(aggregate['availability_zone'])
self.client.delete_aggregate(aggregate['id'])
self.client.wait_for_resource_deletion(aggregate['id'])
@test.idempotent_id('5873a6f8-671a-43ff-8838-7ce430bb6d0b')
def test_aggregate_create_delete_with_az(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)['aggregate']
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.client.delete_aggregate(aggregate['id'])
self.client.wait_for_resource_deletion(aggregate['id'])
@test.idempotent_id('68089c38-04b1-4758-bdf0-cf0daec4defd')
def test_aggregate_create_verify_entry_in_list(self):
# Create an aggregate and ensure it is listed.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
aggregates = self.client.list_aggregates()['aggregates']
self.assertIn((aggregate['id'], aggregate['availability_zone']),
map(lambda x: (x['id'], x['availability_zone']),
aggregates))
@test.idempotent_id('36ec92ca-7a73-43bc-b920-7531809e8540')
def test_aggregate_create_update_metadata_get_details(self):
# Create an aggregate and ensure its details are returned.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
body = self.client.show_aggregate(aggregate['id'])['aggregate']
self.assertEqual(aggregate['name'], body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertEqual({}, body["metadata"])
# set the metadata of the aggregate
meta = {"key": "value"}
body = self.client.set_metadata(aggregate['id'], metadata=meta)
self.assertEqual(meta, body['aggregate']["metadata"])
# verify the metadata has been set
body = self.client.show_aggregate(aggregate['id'])['aggregate']
self.assertEqual(meta, body["metadata"])
@test.idempotent_id('4d2b2004-40fa-40a1-aab2-66f4dab81beb')
def test_aggregate_create_update_with_az(self):
# Update an aggregate and ensure properties are updated correctly
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)['aggregate']
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.assertIsNotNone(aggregate['id'])
aggregate_id = aggregate['id']
new_aggregate_name = aggregate_name + '_new'
new_az_name = az_name + '_new'
resp_aggregate = self.client.update_aggregate(
aggregate_id,
name=new_aggregate_name,
availability_zone=new_az_name)['aggregate']
self.assertEqual(new_aggregate_name, resp_aggregate['name'])
self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
aggregates = self.client.list_aggregates()['aggregates']
self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
map(lambda x:
(x['id'], x['name'], x['availability_zone']),
aggregates))
@test.idempotent_id('c8e85064-e79b-4906-9931-c11c24294d02')
def test_aggregate_add_remove_host(self):
# Add a host to the given aggregate and remove.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
body = (self.client.add_host(aggregate['id'], host=self.host)
['aggregate'])
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertIn(self.host, body['hosts'])
body = (self.client.remove_host(aggregate['id'], host=self.host)
['aggregate'])
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertNotIn(self.host, body['hosts'])
@test.idempotent_id('7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72')
def test_aggregate_add_host_list(self):
# Add a host to the given aggregate and list.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
aggregates = self.client.list_aggregates()['aggregates']
aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
self.assertEqual(1, len(aggs))
agg = aggs[0]
self.assertEqual(aggregate_name, agg['name'])
self.assertIsNone(agg['availability_zone'])
self.assertIn(self.host, agg['hosts'])
@test.idempotent_id('eeef473c-7c52-494d-9f09-2ed7fc8fc036')
def test_aggregate_add_host_get_details(self):
# Add a host to the given aggregate and get details.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
body = self.client.show_aggregate(aggregate['id'])['aggregate']
self.assertEqual(aggregate_name, body['name'])
self.assertIsNone(body['availability_zone'])
self.assertIn(self.host, body['hosts'])
@test.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
def test_aggregate_add_host_create_server_with_az(self):
# Add a host to the given aggregate and create a server.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)['aggregate']
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
server_name = data_utils.rand_name('test_server')
admin_servers_client = self.os_adm.servers_client
server = self.create_test_server(name=server_name,
availability_zone=az_name,
wait_until='ACTIVE')
body = admin_servers_client.show_server(server['id'])['server']
self.assertEqual(self.host, body[self._host_key])
| apache-2.0 | -7,166,196,822,807,692,000 | 46.748879 | 78 | 0.646694 | false |
cjqian/incubator-airflow | airflow/contrib/operators/bigquery_check_operator.py | 18 | 4637 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator, IntervalCheckOperator
from airflow.utils.decorators import apply_defaults
class BigQueryCheckOperator(CheckOperator):
"""
Performs checks against BigQuery. The ``BigQueryCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alterts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: string
:param bigquery_conn_id: reference to the BigQuery database
:type bigquery_conn_id: string
"""
@apply_defaults
def __init__(
self,
sql,
bigquery_conn_id='bigquery_default',
*args,
**kwargs):
super(BigQueryCheckOperator, self).__init__(sql=sql, *args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.sql = sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id)
class BigQueryValueCheckOperator(ValueCheckOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed
:type sql: string
"""
@apply_defaults
def __init__(
self, sql, pass_value, tolerance=None,
bigquery_conn_id='bigquery_default',
*args, **kwargs):
super(BigQueryValueCheckOperator, self).__init__(
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id)
class BigQueryIntervalCheckOperator(IntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
This method constructs a query like so:
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:type metrics_threshold: dict
"""
@apply_defaults
def __init__(
self, table, metrics_thresholds,
date_filter_column='ds', days_back=-7,
bigquery_conn_id='bigquery_default',
*args, **kwargs):
super(BigQueryIntervalCheckOperator, self).__init__(
table=table, metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column, days_back=days_back,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id)
| apache-2.0 | 8,244,806,103,211,460,000 | 36.096 | 101 | 0.674143 | false |
MeteorAdminz/viper | viper/common/out.py | 4 | 3847 | # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
try:
from terminaltables import AsciiTable
HAVE_TERMTAB = True
except:
HAVE_TERMTAB = False
import textwrap
from viper.common.colors import cyan, yellow, red, green, bold
def print_info(message):
print(bold(cyan("[*]")) + u" {0}".format(message))
def print_item(message, tabs=0):
print(" {0}".format(" " * tabs) + cyan("-") + u" {0}".format(message))
def print_warning(message):
print(bold(yellow("[!]")) + u" {0}".format(message))
def print_error(message):
print(bold(red("[!]")) + u" {0}".format(message))
def print_success(message):
print(bold(green("[+]")) + u" {0}".format(message))
def table(header, rows):
if not HAVE_TERMTAB:
print_error("Missing dependency, install terminaltables (`pip install terminaltables`)")
return
# TODO: Refactor this function, it is some serious ugly code.
content = [header] + rows
# Make sure everything is string
try:
content = [[a.replace('\t', ' ') for a in list(map(unicode, l))] for l in content]
except:
# Python3 way of doing it:
content = [[a.replace('\t', ' ') for a in list(map(str, l))] for l in content]
t = AsciiTable(content)
if not t.ok:
longest_col = t.column_widths.index(max(t.column_widths))
max_length_col = t.column_max_width(longest_col)
if max_length_col > 0:
for i, content in enumerate(t.table_data):
if len(content[longest_col]) > max_length_col:
temp = ''
for l in content[longest_col].splitlines():
if len(l) > max_length_col:
temp += '\n'.join(textwrap.wrap(l, max_length_col)) + '\n'
else:
temp += l + '\n'
content[longest_col] = temp.strip()
t.table_data[i] = content
return t.table
def print_output(output, filename=None):
if not output:
return
if filename:
with open(filename.strip(), 'a') as out:
for entry in output:
if entry['type'] == 'info':
out.write('[*] {0}\n'.format(entry['data']))
elif entry['type'] == 'item':
out.write(' [-] {0}\n'.format(entry['data']))
elif entry['type'] == 'warning':
out.write('[!] {0}\n'.format(entry['data']))
elif entry['type'] == 'error':
out.write('[!] {0}\n'.format(entry['data']))
elif entry['type'] == 'success':
out.write('[+] {0}\n'.format(entry['data']))
elif entry['type'] == 'table':
out.write(str(table(
header=entry['data']['header'],
rows=entry['data']['rows']
)))
out.write('\n')
else:
out.write('{0}\n'.format(entry['data']))
else:
for entry in output:
if entry['type'] == 'info':
print_info(entry['data'])
elif entry['type'] == 'item':
print_item(entry['data'])
elif entry['type'] == 'warning':
print_warning(entry['data'])
elif entry['type'] == 'error':
print_error(entry['data'])
elif entry['type'] == 'success':
print_success(entry['data'])
elif entry['type'] == 'table':
print(table(
header=entry['data']['header'],
rows=entry['data']['rows']
))
else:
print(entry['data'])
| bsd-3-clause | -7,368,233,565,094,316,000 | 35.638095 | 96 | 0.489212 | false |
fallen/artiq | artiq/py2llvm/values.py | 2 | 2852 | from types import SimpleNamespace
from copy import copy
import llvmlite_artiq.ir as ll
class VGeneric:
def __init__(self):
self.llvm_value = None
def new(self):
r = copy(self)
r.llvm_value = None
return r
def __repr__(self):
return "<" + self.__class__.__name__ + ">"
def same_type(self, other):
return isinstance(other, self.__class__)
def merge(self, other):
if not self.same_type(other):
raise TypeError("Incompatible types: {} and {}"
.format(repr(self), repr(other)))
def auto_load(self, builder):
if isinstance(self.llvm_value.type, ll.PointerType):
return builder.load(self.llvm_value)
else:
return self.llvm_value
def auto_store(self, builder, llvm_value):
if self.llvm_value is None:
self.llvm_value = llvm_value
elif isinstance(self.llvm_value.type, ll.PointerType):
builder.store(llvm_value, self.llvm_value)
else:
raise RuntimeError(
"Attempted to set LLVM SSA value multiple times")
def alloca(self, builder, name=""):
if self.llvm_value is not None:
raise RuntimeError("Attempted to alloca existing LLVM value "+name)
self.llvm_value = builder.alloca(self.get_llvm_type(), name=name)
def o_int(self, builder):
return self.o_intx(32, builder)
def o_int64(self, builder):
return self.o_intx(64, builder)
def o_round(self, builder):
return self.o_roundx(32, builder)
def o_round64(self, builder):
return self.o_roundx(64, builder)
def _make_binary_operator(op_name):
def op(l, r, builder):
try:
opf = getattr(l, "o_" + op_name)
except AttributeError:
result = NotImplemented
else:
result = opf(r, builder)
if result is NotImplemented:
try:
ropf = getattr(r, "or_" + op_name)
except AttributeError:
result = NotImplemented
else:
result = ropf(l, builder)
if result is NotImplemented:
raise TypeError(
"Unsupported operand types for {}: {} and {}"
.format(op_name, type(l).__name__, type(r).__name__))
return result
return op
def _make_operators():
d = dict()
for op_name in ("add", "sub", "mul",
"truediv", "floordiv", "mod",
"pow", "lshift", "rshift", "xor",
"eq", "ne", "lt", "le", "gt", "ge"):
d[op_name] = _make_binary_operator(op_name)
d["and_"] = _make_binary_operator("and")
d["or_"] = _make_binary_operator("or")
return SimpleNamespace(**d)
operators = _make_operators()
| gpl-3.0 | -6,064,673,247,013,729,000 | 29.340426 | 79 | 0.54453 | false |
mvo5/snappy | tests/main/interfaces-network-status-classic/fake-portal-network-monitor.py | 7 | 2225 | #!/usr/bin/python3
import os
import sys
import dbus.service
import dbus.mainloop.glib
from gi.repository import GLib
BUS_NAME = "org.freedesktop.portal.Desktop"
OBJECT_PATH = "/org/freedesktop/portal/desktop"
NETWORK_MONITOR_IFACE = "org.freedesktop.portal.NetworkMonitor"
class Portal(dbus.service.Object):
def __init__(self, connection, object_path, config):
super(Portal, self).__init__(connection, object_path)
self._config = config
@dbus.service.method(
dbus_interface=NETWORK_MONITOR_IFACE,
in_signature="",
out_signature="b",
)
def GetAvailable(self):
return True
@dbus.service.method(
dbus_interface=NETWORK_MONITOR_IFACE,
in_signature="",
out_signature="b",
)
def GetMetered(self):
return False
@dbus.service.method(
dbus_interface=NETWORK_MONITOR_IFACE,
in_signature="",
out_signature="u",
)
def GetConnectivity(self):
return dbus.UInt32(4) # Full Network
@dbus.service.method(
dbus_interface=NETWORK_MONITOR_IFACE,
in_signature="",
out_signature="a{sv}",
)
def GetStatus(self):
return dict(
available=self.GetAvailable(),
metered=self.GetMetered(),
connectivity=self.GetConnectivity(),
)
@dbus.service.method(
dbus_interface=NETWORK_MONITOR_IFACE,
in_signature="su",
out_signature="b",
)
def CanReach(self, hostname, port):
return True
def main(argv):
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
main_loop = GLib.MainLoop()
bus = dbus.SessionBus()
# Make sure we quit when the bus shuts down
bus.add_signal_receiver(
main_loop.quit,
signal_name="Disconnected",
path="/org/freedesktop/DBus/Local",
dbus_interface="org.freedesktop.DBus.Local",
)
portal = Portal(bus, OBJECT_PATH, None)
# Allow other services to assume our bus name
bus_name = dbus.service.BusName(
BUS_NAME, bus, allow_replacement=True, replace_existing=True, do_not_queue=True
)
main_loop.run()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-3.0 | -6,726,202,213,183,486,000 | 23.722222 | 87 | 0.624719 | false |
Azure/azure-sdk-for-python | sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/operations/_anomaly_detector_client_operations.py | 1 | 30070 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, IO, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AnomalyDetectorClientOperationsMixin(object):
def detect_entire_series(
self,
body, # type: "_models.DetectRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.EntireDetectResponse"
"""Detect anomalies for the entire series in batch.
This operation generates a model with an entire series, each point is detected with the same
model. With this method, points before and after a certain point are used to determine whether
it is an anomaly. The entire detection can give user an overall status of the time series.
:param body: Time series points and period if needed. Advanced model parameters can also be set
in the request.
:type body: ~azure.ai.anomalydetector.models.DetectRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EntireDetectResponse, or the result of cls(response)
:rtype: ~azure.ai.anomalydetector.models.EntireDetectResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EntireDetectResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.detect_entire_series.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'DetectRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AnomalyDetectorError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('EntireDetectResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
detect_entire_series.metadata = {'url': '/timeseries/entire/detect'} # type: ignore
def detect_last_point(
self,
body, # type: "_models.DetectRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.LastDetectResponse"
"""Detect anomaly status of the latest point in time series.
This operation generates a model using points before the latest one. With this method, only
historical points are used to determine whether the target point is an anomaly. The latest
point detecting operation matches the scenario of real-time monitoring of business metrics.
:param body: Time series points and period if needed. Advanced model parameters can also be set
in the request.
:type body: ~azure.ai.anomalydetector.models.DetectRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LastDetectResponse, or the result of cls(response)
:rtype: ~azure.ai.anomalydetector.models.LastDetectResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LastDetectResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.detect_last_point.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'DetectRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AnomalyDetectorError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('LastDetectResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
detect_last_point.metadata = {'url': '/timeseries/last/detect'} # type: ignore
def detect_change_point(
self,
body, # type: "_models.ChangePointDetectRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ChangePointDetectResponse"
"""Detect change point for the entire series.
Evaluate change point score of every series point.
:param body: Time series points and granularity is needed. Advanced model parameters can also
be set in the request if needed.
:type body: ~azure.ai.anomalydetector.models.ChangePointDetectRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ChangePointDetectResponse, or the result of cls(response)
:rtype: ~azure.ai.anomalydetector.models.ChangePointDetectResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ChangePointDetectResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.detect_change_point.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'ChangePointDetectRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AnomalyDetectorError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ChangePointDetectResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
detect_change_point.metadata = {'url': '/timeseries/changepoint/detect'} # type: ignore
def train_multivariate_model(
self,
model_request, # type: "_models.ModelInfo"
**kwargs # type: Any
):
# type: (...) -> None
"""Train a Multivariate Anomaly Detection Model.
Create and train a multivariate anomaly detection model. The request must include a source
parameter to indicate an externally accessible Azure storage Uri (preferably a Shared Access
Signature Uri). All time-series used in generate the model must be zipped into one single file.
Each time-series will be in a single CSV file in which the first column is timestamp and the
second column is value.
:param model_request: Training request.
:type model_request: ~azure.ai.anomalydetector.models.ModelInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.train_multivariate_model.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(model_request, 'ModelInfo')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, None, response_headers)
train_multivariate_model.metadata = {'url': '/multivariate/models'} # type: ignore
def get_multivariate_model(
self,
model_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Model"
"""Get Multivariate Model.
Get detailed information of multivariate model, including the training status and variables
used in the model.
:param model_id: Model identifier.
:type model_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Model, or the result of cls(response)
:rtype: ~azure.ai.anomalydetector.models.Model
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Model"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_multivariate_model.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Model', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_multivariate_model.metadata = {'url': '/multivariate/models/{modelId}'} # type: ignore
def delete_multivariate_model(
self,
model_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete Multivariate Model.
Delete an existing multivariate model according to the modelId.
:param model_id: Model identifier.
:type model_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_multivariate_model.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_multivariate_model.metadata = {'url': '/multivariate/models/{modelId}'} # type: ignore
def detect_anomaly(
self,
model_id, # type: str
detection_request, # type: "_models.DetectionRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Detect Multivariate Anomaly.
Submit detection multivariate anomaly task with the trained model of modelId, the input schema
should be the same with the training request. Thus request will be complete asynchronously and
will return a resultId for querying the detection result.The request should be a source link to
indicate an externally accessible Azure storage Uri (preferably a Shared Access Signature Uri).
All time-series used in generate the model must be zipped into one single file. Each
time-series will be as follows: the first column is timestamp and the second column is value.
:param model_id: Model identifier.
:type model_id: str
:param detection_request: Detect anomaly request.
:type detection_request: ~azure.ai.anomalydetector.models.DetectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.detect_anomaly.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(detection_request, 'DetectionRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, None, response_headers)
detect_anomaly.metadata = {'url': '/multivariate/models/{modelId}/detect'} # type: ignore
def get_detection_result(
self,
result_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DetectionResult"
"""Get Multivariate Anomaly Detection Result.
Get multivariate anomaly detection result based on resultId returned by the DetectAnomalyAsync
api.
:param result_id: Result identifier.
:type result_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectionResult, or the result of cls(response)
:rtype: ~azure.ai.anomalydetector.models.DetectionResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectionResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_detection_result.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'resultId': self._serialize.url("result_id", result_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DetectionResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_detection_result.metadata = {'url': '/multivariate/results/{resultId}'} # type: ignore
def export_model(
self,
model_id, # type: str
**kwargs # type: Any
):
# type: (...) -> IO
"""Export Multivariate Anomaly Detection Model as Zip file.
Export multivariate anomaly detection model based on modelId.
:param model_id: Model identifier.
:type model_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/zip"
# Construct URL
url = self.export_model.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['content-type']=self._deserialize('str', response.headers.get('content-type'))
deserialized = response.stream_download(self._client._pipeline)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
export_model.metadata = {'url': '/multivariate/models/{modelId}/export'} # type: ignore
def list_multivariate_model(
self,
skip=0, # type: Optional[int]
top=5, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ModelList"]
"""List Multivariate Models.
List models of a subscription.
:param skip: $skip indicates how many models will be skipped.
:type skip: int
:param top: $top indicates how many models will be fetched.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.ai.anomalydetector.models.ModelList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ModelList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_multivariate_model.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ModelList', pipeline_response)
list_of_elem = deserialized.models
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_multivariate_model.metadata = {'url': '/multivariate/models'} # type: ignore
| mit | 7,191,668,309,587,838,000 | 45.190476 | 133 | 0.637712 | false |
sudkannan/xen-hv | tools/xm-test/tests/cpupool/03_cpupool_domain.py | 36 | 2838 | #!/usr/bin/python
import sys
import re
import time
from XmTestLib import *
from pools import *
checkRequirements()
#
# create Pool-1 with 1 CPU and start a VM
#
createStdPool()
name = "TestDomPool-1"
domain = XmTestDomain(extraConfig={'pool' : 'Pool-1'}, name=name)
try:
domain.start(noConsole=True)
except DomainError, ex:
FAIL(str(e))
cmd = "xm list --pool=Pool-1"
status, output = traceCommand(cmd)
if status != 0:
FAIL("%s failed, rc %s" % (cmd,status))
if not re.search(name, output):
FAIL("%s; missing '%s' in Pool-1" % (cmd,name))
domain.stop()
waitForDomain(name)
destroyPool("Pool-1", True)
#
# create Pool-1 with 1 CPU, add a second CPU
# start a VM (with vpcu=3) add a third CPU
# remove 2 CPUs from pool
# create Pool-1 with 1 CPU and start a VM
#
pool_names = ['Pool-1', 'Pool-2']
createStdPool({'name' : pool_names[0], 'cpus' : '"1"'})
name = "TestDomPool-1"
cmd = "xm pool-cpu-add Pool-1 2"
status, output = traceCommand(cmd)
if status != 0:
FAIL("%s failed, rc %s" % (cmd,status))
domain = XmTestDomain(extraConfig={ 'pool' : 'Pool-1'}, name=name)
try:
domain.start(noConsole=True)
except DomainError, ex:
FAIL(str(e))
cmd = "xm pool-cpu-add Pool-1 3"
status, output = traceCommand(cmd)
if status != 0:
FAIL("%s failed, rc %s" % (cmd,status))
cmd = "xm pool-cpu-remove Pool-1 2"
status, output = traceCommand(cmd)
if status != 0:
FAIL("%s failed, rc %s" % (cmd,status))
cmd = "xm pool-cpu-remove Pool-1 3"
status, output = traceCommand(cmd)
if status != 0:
FAIL("%s failed, rc %s" % (cmd,status))
createStdPool({'name' : pool_names[1]})
name2 = "TestDomPool-2"
domain2 = XmTestDomain(extraConfig={ 'pool' : 'Pool-2'}, name=name2)
try:
domain2.start(noConsole=True)
except DomainError, ex:
FAIL(str(e))
domain2.stop()
domain.stop()
waitForDomain(name)
waitForDomain(name2)
for pool in pool_names:
destroyPool(pool, True)
#
# Create 2 pools with 1 cpu per pool.
# Create three domains in each pool, with 1,2,3 VCPUs
# Switch a thrid cpu between the pools.
#
pool_names = ['Pool-1', 'Pool-2']
domains = {}
cpu=3
for pool in pool_names:
createStdPool({'name' : pool})
for dom_nr in range(3):
name = "TestDom%s-%s" % (pool, dom_nr)
domains[name] = XmTestDomain(extraConfig={'pool' : pool},
name=name)
try:
domains[name].start(noConsole=True)
except DomainError, ex:
FAIL(str(ex))
cmd_add_1 = "xm pool-cpu-add Pool-1 %s" % cpu
cmd_rem_1 = "xm pool-cpu-remove Pool-1 %s" % cpu
cmd_add_2 = "xm pool-cpu-add Pool-2 %s" % cpu
cmd_rem_2 = "xm pool-cpu-remove Pool-2 %s" % cpu
for i in range(25):
traceCommand(cmd_add_1)
traceCommand(cmd_rem_1)
traceCommand(cmd_add_2)
traceCommand(cmd_rem_2)
destroyAllDomUs()
for pool in pool_names:
destroyPool(pool, True)
| gpl-2.0 | 923,966,465,198,083,600 | 21.52381 | 68 | 0.647992 | false |
zycdragonball/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py | 8 | 32389 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class AffineBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.test_session():
mu = -1.
# scale corresponds to 1.
bijector = Affine(shift=mu, event_ndims=0)
self.assertEqual("affine", bijector.name)
def testNoBatchScalarViaIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = Affine(
shift=mu, scale_identity_multiplier=2., event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(-np.log(2.),
run(bijector.inverse_log_det_jacobian, x))
def testNoBatchScalarViaDiag(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = Affine(shift=mu, scale_diag=[2.], event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(-np.log(2.),
run(bijector.inverse_log_det_jacobian, x))
def testWeirdSampleNoBatchScalarViaIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2.
bijector = Affine(
shift=mu, scale_identity_multiplier=2., event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [[1., 2, 3], [4, 5, 6]] # Weird sample shape.
self.assertAllClose([[1., 3, 5],
[7, 9, 11]],
run(bijector.forward, x))
self.assertAllClose([[1., 1.5, 2.],
[2.5, 3, 3.5]],
run(bijector.inverse, x))
self.assertAllClose(-np.log(2.),
run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesShiftOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = np.float64([1.])
# One batch, scalar.
# Corresponds to scale = 1.
bijector = Affine(shift=mu, event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesMultiplierOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
multiplier = np.float64([2.])
# One batch, scalar.
# Corresponds to scale = 2, shift = 0.
bijector = Affine(scale_identity_multiplier=multiplier, event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.5], run(bijector.inverse, x))
self.assertAllClose([np.log(0.5)],
run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaDiag(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1.]
# One batch, scalar.
# Corresponds to scale = 1.
bijector = Affine(shift=mu, scale_diag=[1.], event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1.] # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = Affine(shift=mu, event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaDiag(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = Affine(shift=mu, scale_diag=[1.], event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))
def testNoBatchMultivariateIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[1., 0], [0, 1.]]
bijector = Affine(shift=mu)
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1], [-1., -1]]
self.assertAllClose([[2., 0], [0., -2]], run(bijector.forward, x))
self.assertAllClose([[0., 2], [-2., 0]], run(bijector.inverse, x))
self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))
def testNoBatchMultivariateDiag(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[2., 0], [0, 1.]]
bijector = Affine(shift=mu, scale_diag=[2., 1])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(-np.log(2.),
run(bijector.inverse_log_det_jacobian, x))
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1],
[-1., -1]]
self.assertAllClose([[3., 0],
[-1., -2]],
run(bijector.forward, x))
self.assertAllClose([[0., 2],
[-1., 0]],
run(bijector.inverse, x))
self.assertAllClose(-np.log(2.),
run(bijector.inverse_log_det_jacobian, x))
def testNoBatchMultivariateFullDynamic(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32, name="x")
mu = array_ops.placeholder(dtypes.float32, name="mu")
scale_diag = array_ops.placeholder(dtypes.float32, name="scale_diag")
event_ndims = array_ops.placeholder(dtypes.int32, name="event_ndims")
x_value = np.array([[1., 1]], dtype=np.float32)
mu_value = np.array([1., -1], dtype=np.float32)
scale_diag_value = np.array([2., 2], dtype=np.float32)
event_ndims_value = np.array(1, dtype=np.int32)
feed_dict = {
x: x_value,
mu: mu_value,
scale_diag: scale_diag_value,
event_ndims: event_ndims_value
}
bijector = Affine(
shift=mu, scale_diag=scale_diag, event_ndims=event_ndims)
self.assertEqual(1, sess.run(bijector.event_ndims, feed_dict))
self.assertAllClose([[3., 1]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[0., 1]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose(
-np.log(4),
sess.run(bijector.inverse_log_det_jacobian(x), feed_dict))
def testBatchMultivariateIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value, dtype=np.float32)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale = 2.
bijector = Affine(shift=mu, scale_identity_multiplier=scale)
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose(-np.log(4),
run(bijector.inverse_log_det_jacobian, x))
def testBatchMultivariateDiag(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value, dtype=np.float32)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale_diag = [[2., 2]]
bijector = Affine(shift=mu, scale_diag=scale_diag)
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose([-np.log(4)],
run(bijector.inverse_log_det_jacobian, x))
def testBatchMultivariateFullDynamic(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32, name="x")
mu = array_ops.placeholder(dtypes.float32, name="mu")
scale_diag = array_ops.placeholder(dtypes.float32, name="scale_diag")
event_ndims = array_ops.placeholder(dtypes.int32, name="event_ndims")
x_value = np.array([[[1., 1]]], dtype=np.float32)
mu_value = np.array([[1., -1]], dtype=np.float32)
scale_diag_value = np.array([[2., 2]], dtype=np.float32)
event_ndims_value = 1
feed_dict = {
x: x_value,
mu: mu_value,
scale_diag: scale_diag_value,
event_ndims: event_ndims_value
}
bijector = Affine(
shift=mu, scale_diag=scale_diag, event_ndims=event_ndims)
self.assertEqual(1, sess.run(bijector.event_ndims, feed_dict))
self.assertAllClose([[[3., 1]]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[[0., 1]]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose([-np.log(4)],
sess.run(
bijector.inverse_log_det_jacobian(x), feed_dict))
def testIdentityWithDiagUpdate(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_diag=[1.],
event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is vector"
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(-np.log(2.),
run(bijector.inverse_log_det_jacobian, x))
def testIdentityWithTriL(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 2]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_tril=[[1., 0], [2., 1]])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 5]], run(bijector.forward, x))
self.assertAllClose([[1., 0.5]], run(bijector.inverse, x))
self.assertAllClose(-np.log(4.),
run(bijector.inverse_log_det_jacobian, x))
def testDiagWithTriL(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 3]]
bijector = Affine(
shift=mu, scale_diag=[1., 2.], scale_tril=[[1., 0], [2., 1]])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 7]], run(bijector.forward, x))
self.assertAllClose([[1., 1 / 3.]], run(bijector.inverse, x))
self.assertAllClose(-np.log(6.),
run(bijector.inverse_log_det_jacobian, x))
def testIdentityAndDiagWithTriL(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[3., 0], [2, 4]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.0,
scale_diag=[1., 2.],
scale_tril=[[1., 0], [2., 1]])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[2., 9]], run(bijector.forward, x))
self.assertAllClose([[2 / 3., 5 / 12.]], run(bijector.inverse, x))
self.assertAllClose(-np.log(12.),
run(bijector.inverse_log_det_jacobian, x))
def testIdentityWithVDVTUpdate(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 2, 0], [0, 0, 3]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=2.,
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(shift=mu, scale_diag=[10., 2, 3])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 3, 8], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1.5, 4 / 3.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(-np.log(60.),
run(bijector.inverse_log_det_jacobian, x))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x),
run(bijector_ref.inverse_log_det_jacobian, x))
def testDiagWithVDVTUpdate(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 3, 0], [0, 0, 5]]
bijector = Affine(
shift=mu,
scale_diag=[2., 3, 4],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(shift=mu, scale_diag=[10., 3, 5])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 5, 14], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1., 0.8], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(-np.log(150.),
run(bijector.inverse_log_det_jacobian, x))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x),
run(bijector_ref.inverse_log_det_jacobian, x))
def testTriLWithVDVTUpdate(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(
shift=mu, scale_tril=[[10., 0, 0], [1, 3, 0], [2, 3, 5]])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 14 / 15., 4 / 25.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(-np.log(150.),
run(bijector.inverse_log_det_jacobian, x))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x),
run(bijector_ref.inverse_log_det_jacobian, x))
def testTriLWithVDVTUpdateNoDiagonal(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=None,
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(
shift=mu, scale_tril=[[6., 0, 0], [1, 3, 0], [2, 3, 5]])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 2, 3] # Vector.
self.assertAllClose([5., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([1 / 3., 8 / 9., 4 / 30.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(-np.log(90.),
run(bijector.inverse_log_det_jacobian, x))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x),
run(bijector_ref.inverse_log_det_jacobian, x))
def testNoBatchMultivariateRaisesWhenSingular(self):
with self.test_session():
mu = [1., -1]
bijector = Affine(
shift=mu,
# Has zero on the diagonal.
scale_diag=[0., 1],
validate_args=True)
with self.assertRaisesOpError("Condition x > 0"):
bijector.forward([1., 1.]).eval()
def testEventNdimsLargerThanOneRaises(self):
with self.test_session():
mu = [1., -1]
# Scale corresponds to 2x2 identity matrix.
bijector = Affine(shift=mu, event_ndims=2, validate_args=True)
bijector.forward([1., 1.]).eval()
def testScaleZeroScalarRaises(self):
with self.test_session():
mu = -1.
# Check Identity matrix with zero scaling.
bijector = Affine(
shift=mu,
scale_identity_multiplier=0.0,
event_ndims=0,
validate_args=True)
with self.assertRaisesOpError("Condition x > 0"):
bijector.forward(1.).eval()
# Check Diag matrix with zero scaling.
bijector = Affine(
shift=mu, scale_diag=[0.0], event_ndims=0, validate_args=True)
with self.assertRaisesOpError("Condition x > 0"):
bijector.forward(1.).eval()
def testScalarCongruency(self):
with self.test_session():
bijector = Affine(
shift=3.6, scale_identity_multiplier=0.42, event_ndims=0)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def _makeScale(self,
x,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None):
"""Create a scale matrix. Return None if it can not be created."""
c = scale_identity_multiplier
d1 = scale_diag
tril = scale_tril
v = scale_perturb_factor
d2 = scale_perturb_diag
# Ambiguous low rank update.
if v is None and d2 is not None:
return None
if c is None and d1 is None and tril is None:
# Special case when no scale args are passed in. This means use an
# identity matrix.
if v is None and d2 is None:
c = 1.
# No scale.
else:
return None
matrix = np.float32(0.)
if c is not None:
# Infer the dimension from x.
matrix += c * self._matrix_diag(np.ones_like(x))
if d1 is not None:
matrix += self._matrix_diag(np.array(d1, dtype=np.float32))
if tril is not None:
matrix += np.array(tril, dtype=np.float32)
if v is not None:
v = np.array(v, dtype=np.float32)
if v.ndim < 2:
vt = v.T
else:
vt = np.swapaxes(v, axis1=v.ndim - 2, axis2=v.ndim - 1)
if d2 is not None:
d2 = self._matrix_diag(np.array(d2, dtype=np.float32))
right = np.matmul(d2, vt)
else:
right = vt
matrix += np.matmul(v, right)
return matrix
def _matrix_diag(self, d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
def _testLegalInputs(self, shift=None, scale_params=None, x=None):
def _powerset(x):
s = list(x)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
for args in _powerset(scale_params.items()):
with self.test_session():
args = dict(args)
scale_args = dict({"x": x}, **args)
scale = self._makeScale(**scale_args)
bijector_args = dict({"event_ndims": 1}, **args)
# We haven't specified enough information for the scale.
if scale is None:
with self.assertRaisesRegexp(ValueError, ("must be specified.")):
bijector = Affine(shift=shift, **bijector_args)
else:
bijector = Affine(shift=shift, **bijector_args)
np_x = x
# For the case a vector is passed in, we need to make the shape
# match the matrix for matmul to work.
if x.ndim == scale.ndim - 1:
np_x = np.expand_dims(x, axis=-1)
forward = np.matmul(scale, np_x) + shift
if x.ndim == scale.ndim - 1:
forward = np.squeeze(forward, axis=-1)
self.assertAllClose(forward, bijector.forward(x).eval())
backward = np.linalg.solve(scale, np_x - shift)
if x.ndim == scale.ndim - 1:
backward = np.squeeze(backward, axis=-1)
self.assertAllClose(backward, bijector.inverse(x).eval())
ildj = -np.log(np.abs(np.linalg.det(scale)))
# TODO(jvdillon): We need to make it so the scale_identity_multiplier
# case does not deviate in expected shape. Fixing this will get rid of
# these special cases.
if (ildj.ndim > 0 and (len(scale_args) == 1 or (
len(scale_args) == 2 and
scale_args.get("scale_identity_multiplier", None) is not None))):
ildj = np.squeeze(ildj[0])
elif ildj.ndim < scale.ndim - 2:
ildj = np.reshape(ildj, scale.shape[0:-2])
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(x).eval())
def testLegalInputs(self):
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.],
"scale_tril": [[1., 0.],
[-3., 3.]],
"scale_perturb_factor": [[1., 0],
[1.5, 3.]],
"scale_perturb_diag": [3., 1.]
},
x=np.array(
[1., 2], dtype=np.float32))
def testLegalInputsWithBatch(self):
# Shape of scale is [2, 1, 2, 2]
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3.]], [[1., 2]]],
"scale_tril": [[[[1., 0.], [-3., 3.]]], [[[0.5, 0.], [1., 1.]]]],
"scale_perturb_factor": [[[[1., 0], [1.5, 3.]]],
[[[1., 0], [1., 1.]]]],
"scale_perturb_diag": [[[3., 1.]], [[0.5, 1.]]]
},
x=np.array(
[[[1., 2]], [[3., 4]]], dtype=np.float32))
def testNegativeDetTrilPlusVDVT(self):
# scale = [[3.7, 2.7],
# [-0.3, -1.3]]
# inv(scale) = [[0.325, 0.675],
# [-0.075, -0.925]]
# eig(scale) = [3.5324, -1.1324]
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_tril": [[1., 0], [-3, -4]],
"scale_perturb_factor": [[0.1, 0], [0.5, 0.3]],
"scale_perturb_diag": [3., 1]
},
x=np.array(
[1., 2], dtype=np.float32))
def testScalePropertyAssertsCorrectly(self):
with self.test_session():
with self.assertRaises(NotImplementedError):
scale = Affine( # pylint:disable=unused-variable
scale_tril=[[1., 0], [2, 1]],
scale_perturb_factor=[2., 1.]).scale
if __name__ == "__main__":
test.main()
| apache-2.0 | 7,728,507,977,068,099,000 | 37.330178 | 91 | 0.563586 | false |
yosshy/nova | nova/api/openstack/compute/keypairs.py | 13 | 11606 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack.compute.schemas import keypairs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.i18n import _
from nova.objects import keypair as keypair_obj
ALIAS = 'os-keypairs'
authorize = extensions.os_compute_authorizer(ALIAS)
soft_authorize = extensions.os_compute_soft_authorizer(ALIAS)
class KeypairController(wsgi.Controller):
"""Keypair API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.KeypairAPI()
def _filter_keypair(self, keypair, **attrs):
# TODO(claudiub): After v2 and v2.1 is no longer supported,
# keypair.type can be added to the clean dict below
clean = {
'name': keypair.name,
'public_key': keypair.public_key,
'fingerprint': keypair.fingerprint,
}
for attr in attrs:
clean[attr] = keypair[attr]
return clean
@wsgi.Controller.api_version("2.10")
@wsgi.response(201)
@extensions.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v210)
def create(self, req, body):
"""Create or import keypair.
A policy check restricts users from creating keys for other users
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
user_id (optional) - string
"""
# handle optional user-id for admin only
user_id = body['keypair'].get('user_id')
return self._create(req, body, type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@wsgi.response(201)
@extensions.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v22)
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
Keypair will have the type ssh or x509, specified by type.
You can send a public_key to add an existing ssh/x509 key.
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
"""
return self._create(req, body, type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@extensions.expected_errors((400, 403, 409))
@validation.schema(keypairs.create)
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key.
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
return self._create(req, body)
def _create(self, req, body, user_id=None, **keypair_filters):
context = req.environ['nova.context']
params = body['keypair']
name = params['name']
key_type = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH)
user_id = user_id or context.user_id
authorize(context, action='create',
target={'user_id': user_id,
'project_id': context.project_id})
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(context,
user_id, name,
params['public_key'], key_type)
keypair = self._filter_keypair(keypair, user_id=True,
**keypair_filters)
else:
keypair, private_key = self.api.create_key_pair(
context, user_id, name, key_type)
keypair = self._filter_keypair(keypair, user_id=True,
**keypair_filters)
keypair['private_key'] = private_key
return {'keypair': keypair}
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
@wsgi.Controller.api_version("2.1", "2.1")
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
self._delete(req, id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
self._delete(req, id)
@wsgi.Controller.api_version("2.10") # noqa
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
self._delete(req, id, user_id=user_id)
def _delete(self, req, id, user_id=None):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
# handle optional user-id for admin only
user_id = user_id or context.user_id
authorize(context, action='delete',
target={'user_id': user_id,
'project_id': context.project_id})
try:
self.api.delete_key_pair(context, user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
def _get_user_id(self, req):
if 'user_id' in req.GET.keys():
user_id = req.GET.getall('user_id')[0]
return user_id
@wsgi.Controller.api_version("2.10")
@extensions.expected_errors(404)
def show(self, req, id):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._show(req, id, type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@extensions.expected_errors(404)
def show(self, req, id):
return self._show(req, id, type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@extensions.expected_errors(404)
def show(self, req, id):
return self._show(req, id)
def _show(self, req, id, user_id=None, **keypair_filters):
"""Return data for the given key name."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
authorize(context, action='show',
target={'user_id': user_id,
'project_id': context.project_id})
try:
# The return object needs to be a dict in order to pop the 'type'
# field, if the api_version < 2.2.
keypair = self.api.get_key_pair(context, user_id, id)
keypair = self._filter_keypair(keypair, created_at=True,
deleted=True, deleted_at=True,
id=True, user_id=True,
updated_at=True, **keypair_filters)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
# TODO(oomichi): It is necessary to filter a response of keypair with
# _filter_keypair() when v2.1+microversions for implementing consistent
# behaviors in this keypair resource.
return {'keypair': keypair}
@wsgi.Controller.api_version("2.10")
@extensions.expected_errors(())
def index(self, req):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._index(req, type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@extensions.expected_errors(())
def index(self, req):
return self._index(req, type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@extensions.expected_errors(())
def index(self, req):
return self._index(req)
def _index(self, req, user_id=None, **keypair_filters):
"""List of keypairs for a user."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
authorize(context, action='index',
target={'user_id': user_id,
'project_id': context.project_id})
key_pairs = self.api.get_key_pairs(context, user_id)
rval = []
for key_pair in key_pairs:
rval.append({'keypair': self._filter_keypair(key_pair,
**keypair_filters)})
return {'keypairs': rval}
class Controller(wsgi.Controller):
def _add_key_name(self, req, servers):
for server in servers:
db_server = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show'/'detail' methods.
server['key_name'] = db_server['key_name']
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
server = resp_obj.obj['server']
self._add_key_name(req, [server])
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if 'servers' in resp_obj.obj and soft_authorize(context):
servers = resp_obj.obj['servers']
self._add_key_name(req, servers)
class Keypairs(extensions.V21APIExtensionBase):
"""Keypair Support."""
name = "Keypairs"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(ALIAS,
KeypairController())]
return resources
def get_controller_extensions(self):
controller = Controller()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
create_kwargs['key_name'] = server_dict.get('key_name')
def get_server_create_schema(self):
return keypairs.server_create
| apache-2.0 | 6,601,792,388,540,520,000 | 36.43871 | 79 | 0.594434 | false |
SirAnthony/marvin-xmpp | modulemanager.py | 1 | 8878 |
import sys
import os
import imp
from traceback import format_exception
class Module:
def __init__(self, name, module, obj, functions):
self.name = name
self.module = module
self.object = obj
self.functions = functions
self.aliases = obj.aliases if hasattr(obj, 'aliases') else None
def __str__(self):
' :3 '
return "{'name': %s, 'module': %s, 'object': %s, 'functions': %s}" % \
(self.name, self.module, self.object, self.functions)
class Dependences(dict):
"If you use modules with same names, you need to use full names through getitem."
def __setattr__(self, item, value):
if self.__dict__.has_key(item):
dict.__setattr__(self, item, value)
if item.find('.') >= 0:
setattr(self, item.rsplit('.', 1)[-1], value)
else:
self.__setitem__(item, value)
if item.find('.') >= 0:
self.__setitem__(item.rsplit('.', 1)[-1], value)
def __getattr__(self, item):
try:
return self.__getitem__(item)
except KeyError:
return None
class Manager:
def __init__(self):
self.modules = {}
self.functions = {}
self.aliases = {}
self.__loading = []
self.directory = 'plugins'
self.load_dir()
def load_dir(self, directory=None):
directory = directory or self.directory
print "Loading dir " + directory
for fname in os.listdir(directory):
if fname.endswith('.py'):
plugin_name = fname.rsplit('.', 1)[0]
if plugin_name != '__init__':
modulename = '.'.join((directory, plugin_name))
self.load_module(modulename)
self.update_functions()
def load_module(self, modulename):
if not modulename:
return
try:
self.__load(modulename)
except:
print "Could not load %s:" % modulename
print ''.join(format_exception(*sys.exc_info()))
return
finally:
self.__loading.remove(modulename)
return True
def __load(self, modulename):
self.__loading.append(modulename)
if self.__loading.count(modulename) > 1:
return
print "Loading " + modulename
module = None
try:
module = self._reload_hook(self.modules[modulename].module)
except KeyError:
module = self._import_hook(modulename, fromlist='*')
finally:
if self.modules.has_key(modulename):
del self.modules[modulename]
if module:
obj = self.__get_objects(module)
if obj:
depends = None
#TODO: Dependencies needs full path
if hasattr(obj, 'depends'):
depends = Dependences()
for depend in obj.depends:
if depend not in self.modules:
self.load_module(depend)
mdep = self.modules.get(depend)
if mdep:
mdep = mdep.object
setattr(depends, depend, mdep)
setattr(obj, 'depends', depends)
obj = obj()
for m in self.modules.values(): #update module in deps
if hasattr(m.object, 'depends') and m.object.depends.has_key(modulename):
setattr(m.object.depends, modulename, obj)
functions = self.__get_functions(obj)
self.modules[modulename] = Module(modulename, module, obj, functions)
def get(self, name):
if name in self.modules:
return self.modules[name]
def __get_objects(self, module):
#FIXME: Author too lazy
#TODO: many modules in one file
objs = None
for membername in dir(module):
member = getattr(module, membername)
if type(member).__name__ == 'classobj' and hasattr(member, '_marvinModule'):
#setattr(member, 'manager', self)
objs = member
return objs
def __get_functions(self, obj):
''' Checks for public functions presence
and returns list of avaliable.
'''
real = {}
public = []
try:
public = getattr(obj, 'public')
except AttributeError:
pass
for function in public:
try:
func = getattr(obj, function)
except Exception, e:
print 'Bad function %s: %s' % (function, e)
continue
real[function] = func
return real
def update_functions(self):
self.functions = {}
self.aliases = {}
for m in self.modules.values():
for func in m.functions.keys():
if func in self.functions.keys():
print 'Function %s already loaded in module %s. Skipped in %s.' % \
(func, self.functions[func], m.name)
continue
if m.aliases and func in m.aliases.keys():
for alias in m.aliases[func]:
if alias in self.aliases:
print 'Alias %s already loaded for function %s in module %s. Skipped for %s.' % \
(alias, self.aliases[alias], m.name, func)
continue
self.aliases[alias] = func
self.functions[func] = m.name
def _import_hook(self, name, globals=None, locals=None, fromlist=None):
parent = self.__determine_parent(globals)
q, tail = self.__find_head_package(parent, name)
m = self.__load_tail(q, tail)
if not fromlist:
return q
if hasattr(m, "__path__"):
self.__ensure_fromlist(m, fromlist)
return m
def __determine_parent(self, globals):
if not globals or not globals.has_key("__name__"):
return None
pname = globals['__name__']
if globals.has_key("__path__"):
parent = sys.modules[pname]
assert globals is parent.__dict__
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = sys.modules[pname]
assert parent.__name__ == pname
return parent
return None
def __find_head_package(self, parent, name):
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.__import_module(head, qname, parent)
if q: return q, tail
if parent:
qname = head
parent = None
q = self.__import_module(head, qname, parent)
if q: return q, tail
raise ImportError, "No module named " + qname
def __load_tail(self, q, tail):
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.__import_module(head, mname, m)
if not m:
raise ImportError, "No module named " + mname
return m
def __ensure_fromlist(self, m, fromlist, recursive=0):
for sub in fromlist:
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
self.__ensure_fromlist(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.__import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def __import_module(self, partname, fqname, parent):
try:
return self._reload_hook(sys.modules[fqname])
except KeyError:
pass
try:
fp, pathname, stuff = imp.find_module(partname, parent and parent.__path__)
except ImportError:
return None
try:
m = imp.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
return m
# Replacement for reload()
def _reload_hook(self, module):
return imp.reload(module)
| mit | 2,929,908,910,668,652,500 | 33.544747 | 109 | 0.49189 | false |
trac-ja/trac-ja | doc/utils/checkapidoc.py | 2 | 6270 | # -*- coding: utf-8 -*-
"""Trac API doc checker
Verify that all symbols belonging to modules already documented in the doc/api
Sphinx sources are referenced.
See http://trac.edgewall.org/wiki/TracDev/ApiDocs
"""
import fnmatch
import os
import re
import sys
excluded_docs = ['index.rst']
api_doc = 'doc/api'
def usage(cmd):
print "Usage: %s [FILE...]" % (cmd,)
print
print "FILE is a %s file and can be a glob pattern." % (api_doc,)
print "If no files are given, check all."
exit(0)
def main(argv):
api_files = rst_files = [rst for rst in os.listdir('doc/api')
if fnmatch.fnmatch(rst, '*.rst')
and rst not in excluded_docs]
cmd = argv.pop(0)
def has(*options):
for opt in options:
if opt in argv:
return argv.pop(argv.index(opt))
if has('-h', '--help'):
usage(cmd)
verbose = has('-v', '--verbose')
only_documented = not has('-a', '--all')
if argv:
given_files = []
for arg in argv:
arg = arg.replace('\\', '/').replace(api_doc + '/', '')
arg = arg.replace('.rst', '') + '.rst'
if '*' in arg: # glob pattern
given_files += [rst for rst in api_files
if fnmatch.fnmatch(rst, arg)]
elif arg in api_files:
given_files.append(arg)
api_files = given_files
rst_basenames = sorted(f[:-4] for f in rst_files)
for rst in api_files:
basename = rst.replace('.rst', '')
if verbose or len(api_files) > 1:
print "== Checking %s ... " % (rst,)
check_api_doc(basename, verbose, only_documented,
any(f.startswith(basename) and f != basename
for f in rst_basenames))
def check_api_doc(basename, verbose, only_documented, has_submodules):
module_name = basename.replace('_', '.')
try:
module = __import__(module_name, globals(), {}, ['__all__'])
except ImportError, e:
print "Skipping %s (%s)" % (basename, e)
return
all = getattr(module, '__all__', None)
if not all:
print "Warning: %s doesn't define __all__, using exported symbols." % (
module_name,)
all = get_default_symbols(module, only_documented, has_submodules)
no_apidoc = getattr(module, '__no_apidoc__', None)
if no_apidoc:
if isinstance(no_apidoc, basestring):
no_apidoc = [s.strip() for s in no_apidoc.split()]
all = list(set(all) - set(no_apidoc))
symbols, keywords = get_sphinx_documented_symbols(basename + '.rst')
for symbol in sorted(all):
if symbol in symbols:
if verbose:
print " - OK %14s :: %s" % (
keywords[symbols.index(symbol)], symbol)
else:
value = getattr(module, symbol)
cls = getattr(value, '__class__', None)
keyword = 'data'
if not cls or cls.__name__ == 'type':
keyword = 'class'
elif cls.__name__ in ('function', 'instancemethod'):
keyword = 'function'
elif cls.__name__ == 'module':
keyword = 'module'
print " * .. %14s :: %s" % ('auto' + keyword, symbol)
sphinx_doc_re = re.compile(r'''
^.. \s+ ((?:py:|auto)(?:module|class|function|attribute)|data) # keyword
\s* :: \s* ([\w\.]+) # symbol
''', re.MULTILINE | re.VERBOSE)
def get_sphinx_documented_symbols(rst):
doc = file(os.path.join(api_doc, rst)).read()
symbols, keywords = [], []
for k, s in sphinx_doc_re.findall(doc):
symbols.append(s.split('.')[-1])
keywords.append(k)
return symbols, keywords
def get_default_symbols(module, only_documented, has_submodules):
public = get_public_symbols(module) - get_imported_symbols(module,
has_submodules)
# eliminate modules
all = []
for symbol in public:
try:
__import__(symbol)
except ImportError:
all.append(symbol)
# only keep symbols having a docstring
if only_documented:
documented = []
for symbol in all:
value = getattr(module, symbol)
if value.__doc__ and (not getattr(value, '__class__', None) or
value.__doc__ != value.__class__.__doc__):
documented.append(symbol)
all = documented
return all
def get_public_symbols(m):
return set(symbol for symbol in dir(m) if not symbol.startswith('_'))
import_from_re = re.compile(r'''
^ \s* from \s+ ([\w\.]+) \s+ import \s+ # module
( \* # all symbols
| %s (?: [\s\\]* , [\s\\]* %s)* # list of symbols
| \( \s* %s (?: \s* , \s* %s)* \s* \) # list of symbols in parenthesis
)
''' % ((r'(?:\w+|\w+\s+as\s+\w+)',) * 4), re.MULTILINE | re.VERBOSE)
remove_original_re = re.compile(r'\w+\s+as', re.MULTILINE)
def get_imported_symbols(module, has_submodules):
src_filename = module.__file__.replace('\\', '/').replace('.pyc', '.py')
if src_filename.endswith('/__init__.py') and not has_submodules:
return set()
src = file(src_filename).read()
imported = set()
for mod, symbol_list in import_from_re.findall(src):
symbol_list = symbol_list.strip()
if symbol_list == '*':
try:
imported_module = __import__(mod, globals(), {}, ['__all__'])
symbols = set(getattr(imported_module, '__all__', None) or
get_public_symbols(imported_module))
except ImportError:
print "Warning: 'from %s import *' couldn't be resolved" % (
mod,)
continue
else:
if symbol_list and symbol_list[0] == '(' and symbol_list[-1] == ')':
symbol_list = symbol_list[1:-1]
symbols = set(remove_original_re.sub('', symbol_list)
.replace('\\', '').replace(',', ' ').split())
imported |= symbols
return imported
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause | 5,669,423,724,046,707,000 | 35.666667 | 80 | 0.517544 | false |
rkq/cxxexp | third-party/src/boost_1_56_0/libs/python/test/properties.py | 46 | 1989 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
This is test module for properties.
>>> r = properties.ret_type()
>>> r.i = 22.5
>>> r.i
22.5
>>> c = properties.crash_me()
>>> c.i.i
42.5
>>> X = properties.X
>>> x1 = X(1)
value read only
>>> x1.value_r
1
value read - write
>>> x1.value_rw
1
value direct access
>>> x1.value_direct
1
class instance count read - only
>>> X.instance_count
1
class instance count direct
>>> X.instance_count_direct
1
class instance count injected
>>> X.instance_count_injected
1
class instance count from object
>>> x1.instance_count
1
class instance count from object
>>> x1.instance_count_direct
1
class instance count from object:
>>> x1.instance_count_injected
1
as expected you can't assign new value to read only property
>>> x1.value_r = 2
Traceback (most recent call last):
File "properties.py", line 49, in ?
x1.value_r = 2
AttributeError: can't set attribute
setting value_rw to 2. value_direct:
>>> x1.value_rw = 2
>>> x1.value_rw
2
setting value_direct to 3. value_direct:
>>> x1.value_direct = 3
>>> x1.value_direct
3
>>> assert x1.value_r == 3
>>> x2 = X(2)
after creating second intstance of X instances count is 2
>>> x2.instance_count
2
>>> del x2
>>> assert x1.instance_count == 1
>>> assert properties.X.value_r_ds.__doc__ == "value_r_ds is read-only"
>>> assert properties.X.value_rw_ds.__doc__ == "value_rw_ds is read-write"
"""
#import sys; sys.path.append(r'P:\Actimize4.0\smart_const\py_smart_const___Win32_Debug')
import properties_ext as properties
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| mit | -3,873,994,857,562,851,000 | 17.764151 | 88 | 0.665661 | false |
AndrewPeelMV/Blender2.78c | 2.78/scripts/templates_py/operator_mesh_add.py | 3 | 3261 | import bpy
import bmesh
def add_box(width, height, depth):
"""
This function takes inputs and returns vertex and face arrays.
no actual mesh data creation is done here.
"""
verts = [(+1.0, +1.0, -1.0),
(+1.0, -1.0, -1.0),
(-1.0, -1.0, -1.0),
(-1.0, +1.0, -1.0),
(+1.0, +1.0, +1.0),
(+1.0, -1.0, +1.0),
(-1.0, -1.0, +1.0),
(-1.0, +1.0, +1.0),
]
faces = [(0, 1, 2, 3),
(4, 7, 6, 5),
(0, 4, 5, 1),
(1, 5, 6, 2),
(2, 6, 7, 3),
(4, 0, 3, 7),
]
# apply size
for i, v in enumerate(verts):
verts[i] = v[0] * width, v[1] * depth, v[2] * height
return verts, faces
from bpy.props import (
BoolProperty,
BoolVectorProperty,
FloatProperty,
FloatVectorProperty,
)
class AddBox(bpy.types.Operator):
"""Add a simple box mesh"""
bl_idname = "mesh.primitive_box_add"
bl_label = "Add Box"
bl_options = {'REGISTER', 'UNDO'}
width = FloatProperty(
name="Width",
description="Box Width",
min=0.01, max=100.0,
default=1.0,
)
height = FloatProperty(
name="Height",
description="Box Height",
min=0.01, max=100.0,
default=1.0,
)
depth = FloatProperty(
name="Depth",
description="Box Depth",
min=0.01, max=100.0,
default=1.0,
)
layers = BoolVectorProperty(
name="Layers",
description="Object Layers",
size=20,
options={'HIDDEN', 'SKIP_SAVE'},
)
# generic transform props
view_align = BoolProperty(
name="Align to View",
default=False,
)
location = FloatVectorProperty(
name="Location",
subtype='TRANSLATION',
)
rotation = FloatVectorProperty(
name="Rotation",
subtype='EULER',
)
def execute(self, context):
verts_loc, faces = add_box(self.width,
self.height,
self.depth,
)
mesh = bpy.data.meshes.new("Box")
bm = bmesh.new()
for v_co in verts_loc:
bm.verts.new(v_co)
bm.verts.ensure_lookup_table()
for f_idx in faces:
bm.faces.new([bm.verts[i] for i in f_idx])
bm.to_mesh(mesh)
mesh.update()
# add the mesh as an object into the scene with this utility module
from bpy_extras import object_utils
object_utils.object_data_add(context, mesh, operator=self)
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(AddBox.bl_idname, icon='MESH_CUBE')
def register():
bpy.utils.register_class(AddBox)
bpy.types.INFO_MT_mesh_add.append(menu_func)
def unregister():
bpy.utils.unregister_class(AddBox)
bpy.types.INFO_MT_mesh_add.remove(menu_func)
if __name__ == "__main__":
register()
# test call
bpy.ops.mesh.primitive_box_add()
| gpl-2.0 | -7,848,024,896,450,159,000 | 23.335821 | 75 | 0.481141 | false |
IDEALLab/domain_expansion_jmd_2017 | gpc.py | 1 | 2902 | """
Gaussian processes classification that computes the variance
Author(s): Wei Chen ([email protected])
References:
-----------
http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier
"""
import numpy as np
from scipy.linalg import solve
from scipy.special import erf
from sklearn.utils.validation import check_is_fitted
from sklearn.gaussian_process.gpc import _BinaryGaussianProcessClassifierLaplace
from sklearn.base import ClassifierMixin
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class GPClassifier(_BinaryGaussianProcessClassifierLaplace, ClassifierMixin):
def predict_proba(self, X, get_var=False):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
if get_var:
return f_star, var_f_star
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
| mit | -6,060,286,231,814,617,000 | 37.693333 | 99 | 0.604755 | false |
geotagx/pybossa | pybossa/sentinel/__init__.py | 1 | 1378 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SciFabric LTD.
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from redis import sentinel, StrictRedis
class Sentinel(object):
def __init__(self, app=None):
self.app = app
self.master = StrictRedis()
self.slave = self.master
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app):
self.connection = sentinel.Sentinel(app.config['REDIS_SENTINEL'],
socket_timeout=0.1)
redis_db = app.config.get('REDIS_DB') or 0
self.master = self.connection.master_for('mymaster', db=redis_db)
self.slave = self.connection.slave_for('mymaster', db=redis_db)
| agpl-3.0 | 2,157,146,855,135,474,000 | 37.277778 | 77 | 0.670537 | false |
clinicalml/gumbel-max-scm | pymdptoolbox/src/mdptoolbox/__init__.py | 6 | 3136 | # -*- coding: utf-8 -*-
"""Markov Decision Process (MDP) Toolbox
=====================================
The MDP toolbox provides classes and functions for the resolution of
descrete-time Markov Decision Processes.
Available modules
-----------------
:mod:`~mdptoolbox.example`
Examples of transition and reward matrices that form valid MDPs
:mod:`~mdptoolbox.mdp`
Makov decision process algorithms
:mod:`~mdptoolbox.util`
Functions for validating and working with an MDP
How to use the documentation
----------------------------
Documentation is available both as docstrings provided with the code and
in html or pdf format from
`The MDP toolbox homepage <http://www.somewhere.com>`_. The docstring
examples assume that the ``mdptoolbox`` package is imported like so::
>>> import mdptoolbox
To use the built-in examples, then the ``example`` module must be imported::
>>> import mdptoolbox.example
Once the ``example`` module has been imported, then it is no longer neccesary
to issue ``import mdptoolbox``.
Code snippets are indicated by three greater-than signs::
>>> x = 17
>>> x = x + 1
>>> x
18
The documentation can be displayed with
`IPython <http://ipython.scipy.org>`_. For example, to view the docstring of
the ValueIteration class use ``mdp.ValueIteration?<ENTER>``, and to view its
source code use ``mdp.ValueIteration??<ENTER>``.
Acknowledgments
---------------
This module is modified from the MDPtoolbox (c) 2009 INRA available at
http://www.inra.fr/mia/T/MDPtoolbox/.
"""
# Copyright (c) 2011-2013 Steven A. W. Cordwell
# Copyright (c) 2009 INRA
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . import mdp
| mit | -3,355,570,655,434,036,000 | 37.243902 | 79 | 0.735013 | false |
cycomachead/info290 | project/code/analysis/reviews_rf.py | 1 | 3635 | from pandas import *
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import random
STYLE = "American_Brown_Ale"
""" Performs cross validation on data using random forest
Returns the average score.
Percent is the percentage of data to use as validation,
this should be an integer, not a decimal.
Rounds is the number of rounds of cv to run.
"""
def cross_val(data, labels, percent, rounds, rf):
row_count = len(data.index)
scores = []
# Test round times and take average score
for _ in range(rounds):
# randomly select row indices for test/train sets
test_rows = []
for i in range(row_count//percent):
test_rows.append(random.randint(0, row_count-1))
test_rows.sort()
train_rows = [i for i in range(len(data.index))]
train_rows = [i for i in train_rows if i not in test_rows]
train_rows.sort()
# select test/train sets
test_data = data.drop(train_rows)
train_data = data.drop(test_rows)
test_labels = labels.drop(train_rows)
train_labels = labels.drop(test_rows)
# train random forest
fit_cv = rf.fit(train_data, train_labels)
# calculate score
score_cv = rf.score(test_data, test_labels)
scores.append(score_cv)
return sum(scores)/len(scores)
def topX(l, n, c):
tops = {}
for i in range(n):
ind = l.index(max(l))
tops[c[ind]] = l[ind]
l[ind] = 0
return tops
def cross_val_topX(data, labels, percent, rounds, rf, x):
row_count = len(data.index)
scores = []
# Test round times and take average score
for _ in range(rounds):
# randomly select row indices for test/train sets
test_rows = []
for i in range(row_count//percent):
test_rows.append(random.randint(0, row_count-1))
test_rows.sort()
train_rows = [i for i in range(len(data.index))]
train_rows = [i for i in train_rows if i not in test_rows]
train_rows.sort()
# select test/train sets
test_data = data.drop(train_rows)
train_data = data.drop(test_rows)
test_labels = labels.drop(train_rows)
train_labels = labels.drop(test_rows)
# train random forest
fit_cv = rf.fit(train_data, train_labels)
# calculate score
probs = rf.predict_proba(test_data)
classes = rf.classes_
tally = 0
j = 0
for k in test_labels.iteritems():
tops = topX(list(probs[j]), x, classes)
if k[1] in tops.keys():
tally += 1
j += 1
scores.append(float(tally)/float(len(test_labels)))
return sum(scores)/len(scores)
data = read_pickle("./%s.pkl"%(STYLE))
labels = data['beer_id']
del data['beer_id']
data = data.fillna(0)
########################
### Cross Validation ###
########################
"""
criterion = ["gini", "entropy"]
trees = [10,20,50]
samples = [20,50,100,500]
rounds = 10
for c in criterion:
for t in trees:
for s in samples:
print("===== Criterion: %s, Trees: %d, Samples/Leaf: %d ====="%(c, t, s))
rf = RandomForestClassifier(criterion=c, n_estimators=t, min_samples_split=s)
fit = rf.fit(data, labels)
score = rf.score(data, labels)
print("Training Score: %f"%(score))
print("Cross Validation Score: %f"%(cross_val(data, labels, 10, rounds, rf)))
"""
rf = RandomForestClassifier(criterion="gini", n_estimators=50, min_samples_split=50)
score = cross_val_topX(data, labels, 10, 5, rf, 10)
| bsd-2-clause | -3,980,155,234,674,164,000 | 28.795082 | 89 | 0.58762 | false |
jds2001/ocp-checkbox | plainbox/plainbox/impl/commands/crash.py | 2 | 2139 | # This file is part of Checkbox.
#
# Copyright 2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.commands.crash` -- crash sub-command
========================================================
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
import logging
from plainbox.impl.commands import PlainBoxCommand
logger = logging.getLogger("plainbox.commands.crash")
class CrashInvocation:
def __init__(self, ns):
self.ns = ns
def run(self):
if self.ns.action == 'crash':
raise Exception("crashing as requested")
elif self.ns.action == 'hang':
while True:
pass
class CrashCommand(PlainBoxCommand):
"""
Implementation of ``$ plainbox dev crash``
"""
def invoked(self, ns):
return CrashInvocation(ns).run()
def register_parser(self, subparsers):
parser = subparsers.add_parser(
"crash", help="crash the application")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-c', '--crash',
dest='action',
action='store_const',
const='crash',
help='Raise an exception')
group.add_argument(
'-H', '--hang',
dest='action',
action='store_const',
const='hang',
help='Hang the application with a busy loop')
parser.set_defaults(command=self)
| gpl-3.0 | 8,905,315,146,401,546,000 | 27.905405 | 70 | 0.624123 | false |
googlei18n/nototools | nototools/unicode_data.py | 3 | 57675 | #!/usr/bin/env python
# -*- coding: utf-8 -*-#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bleeding-edge version of Unicode Character Database.
Provides an interface similar to Python's own unicodedata package, but with
the bleeding-edge data. The implementation is not efficient at all, it's
just done this way for the ease of use. The data is coming from bleeding
edge version of the Unicode Standard not yet published, so it is expected to
be unstable and sometimes inconsistent.
"""
__author__ = (
"[email protected] (Roozbeh Pournader) and " "[email protected] (Cibu Johny)"
)
import codecs
import collections
import os
from os import path
import re
from nototools.py23 import unichr, unicode, basestring
try:
import unicodedata2 as unicodedata # Unicode 8 compliant native lib
except ImportError:
import unicodedata # Python's internal library
from nototools import tool_utils # parse_int_ranges
# Update this when we update the base version data we use
UNICODE_VERSION = 12.0
_data_is_loaded = False
_property_value_aliases_data = {}
_character_names_data = {}
_general_category_data = {}
_combining_class_data = {}
_decomposition_data = {}
_bidi_mirroring_characters = set()
_script_data = {}
_script_extensions_data = {}
_block_data = {}
_block_range = {}
_block_names = []
_age_data = {}
_bidi_mirroring_glyph_data = {}
_core_properties_data = {}
_indic_positional_data = {}
_indic_syllabic_data = {}
_defined_characters = set()
_script_code_to_long_name = {}
_folded_script_name_to_code = {}
_lower_to_upper_case = {}
# emoji data
_presentation_default_emoji = None
_presentation_default_text = None
_emoji_modifier_base = None
_emoji = None
_emoji_variants = None
_emoji_variants_proposed = None
# non-emoji variant data
_variant_data = None
_variant_data_cps = None
# proposed emoji
_proposed_emoji_data = None
_proposed_emoji_data_cps = None
# emoji sequences
_emoji_sequence_data = None
_emoji_non_vs_to_canonical = None
_emoji_group_data = None
# nameslist/namealiases
_nameslist_see_also = None
_namealiases_alt_names = None
def load_data():
"""Loads the data files needed for the module.
Could be used by processes that care about controlling when the data is
loaded. Otherwise, data will be loaded the first time it's needed.
"""
global _data_is_loaded
if not _data_is_loaded:
_load_property_value_aliases_txt()
_load_unicode_data_txt()
_load_scripts_txt()
_load_script_extensions_txt()
_load_blocks_txt()
_load_derived_age_txt()
_load_derived_core_properties_txt()
_load_bidi_mirroring_txt()
_load_indic_data()
_load_emoji_data()
_load_emoji_sequence_data()
_load_unicode_emoji_variants()
_load_variant_data()
_load_proposed_emoji_data()
_load_nameslist_data()
_load_namealiases_data()
_data_is_loaded = True
def name(char, *args):
"""Returns the name of a character.
Raises a ValueError exception if the character is undefined, unless an
extra argument is given, in which case it will return that argument.
"""
if isinstance(char, int):
char = unichr(char)
# First try and get the name from unidata, which is faster and supports
# CJK and Hangul automatic names
try:
return unicodedata.name(char)
except ValueError as val_error:
cp = ord(char)
load_data()
if cp in _character_names_data:
return _character_names_data[cp]
elif (cp,) in _emoji_sequence_data:
return _emoji_sequence_data[(cp,)][0]
elif args:
return args[0]
else:
raise Exception('no name for "%0x"' % ord(char))
def _char_to_int(char):
"""Converts a potential character to its scalar value."""
if type(char) in [str, type(u"")]:
return ord(char)
else:
return char
def derived_props():
load_data()
return frozenset(_core_properties_data.keys())
def chars_with_property(propname):
load_data()
return frozenset(_core_properties_data[propname])
def category(char):
"""Returns the general category of a character."""
load_data()
char = _char_to_int(char)
try:
return _general_category_data[char]
except KeyError:
return "Cn" # Unassigned
def combining(char):
"""Returns the canonical combining class of a character."""
load_data()
char = _char_to_int(char)
try:
return _combining_class_data[char]
except KeyError:
return 0
def to_upper(char):
"""Returns the upper case for a lower case character.
This is not full upper casing, but simply reflects the 1-1
mapping in UnicodeData.txt."""
load_data()
cp = _char_to_int(char)
try:
if _general_category_data[cp] == "Ll":
return unichr(_lower_to_upper_case[cp])
except KeyError:
pass
return char
def canonical_decomposition(char):
"""Returns the canonical decomposition of a character as a Unicode string.
"""
load_data()
char = _char_to_int(char)
try:
return _decomposition_data[char]
except KeyError:
return u""
def script(char):
"""Returns the script property of a character as a four-letter code."""
load_data()
char = _char_to_int(char)
try:
return _script_data[char]
except KeyError:
return "Zzzz" # Unknown
def script_extensions(char):
"""Returns the script extensions property of a character.
The return value is a frozenset of four-letter script codes.
"""
load_data()
char = _char_to_int(char)
try:
return _script_extensions_data[char]
except KeyError:
return frozenset([script(char)])
def block(char):
"""Returns the block property of a character."""
load_data()
char = _char_to_int(char)
try:
return _block_data[char]
except KeyError:
return "No_Block"
def block_range(block):
"""Returns a range (first, last) of the named block."""
load_data()
return _block_range[block]
def block_chars(block):
"""Returns a frozenset of the cps in the named block."""
load_data()
first, last = _block_range[block]
return frozenset(range(first, last + 1))
def block_names():
"""Returns the names of the blocks in block order."""
load_data()
return _block_names[:]
def age(char):
"""Returns the age property of a character as a string.
Returns None if the character is unassigned."""
load_data()
char = _char_to_int(char)
try:
return _age_data[char]
except KeyError:
return None
# Uniscribe treats these ignorables (Hangul fillers) as spacing.
UNISCRIBE_USED_IGNORABLES = frozenset([0x115F, 0x1160, 0x3164, 0xFFA0])
def is_default_ignorable(char):
"""Returns true if the character has the Default_Ignorable property."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _core_properties_data["Default_Ignorable_Code_Point"]
def default_ignorables():
load_data()
return frozenset(_core_properties_data["Default_Ignorable_Code_Point"])
def is_defined(char):
"""Returns true if the character is defined in the Unicode Standard."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _defined_characters
def is_private_use(char):
"""Returns true if the characters is a private use character."""
return category(char) == "Co"
def mirrored(char):
"""Returns 1 if the characters is bidi mirroring, 0 otherwise."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return int(char in _bidi_mirroring_characters)
def bidi_mirroring_glyph(char):
"""Returns the bidi mirroring glyph property of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _bidi_mirroring_glyph_data[char]
except KeyError:
return None
def mirrored_chars():
return frozenset(_bidi_mirroring_glyph_data.keys())
def indic_positional_category(char):
"""Returns the Indic positional category of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _indic_positional_data[char]
except KeyError:
return "NA"
def indic_syllabic_category(char):
"""Returns the Indic syllabic category of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _bidi_syllabic_data[char]
except KeyError:
return "Other"
def create_script_to_chars():
"""Returns a mapping from script to defined characters, based on script and
extensions, for all scripts."""
load_data()
result = collections.defaultdict(set)
for cp in _defined_characters:
if cp in _script_data:
result[_script_data[cp]].add(cp)
if cp in _script_extensions_data:
for script in _script_extensions_data[cp]:
result[script].add(cp)
return result
_DEFINED_CHARACTERS_CACHE = {}
def defined_characters(version=None, scr=None):
"""Returns the set of all defined characters in the Unicode Standard."""
load_data()
# handle common error where version is passed as string, the age test
# will always pass
if version is not None:
version = float(version)
try:
return _DEFINED_CHARACTERS_CACHE[(version, scr)]
except KeyError:
pass
characters = _defined_characters
if version is not None:
characters = {
char
for char in characters
if age(char) is not None and float(age(char)) <= version
}
if scr is not None:
characters = {
char
for char in characters
if script(char) == scr or scr in script_extensions(char)
}
characters = frozenset(characters)
_DEFINED_CHARACTERS_CACHE[(version, scr)] = characters
return characters
_strip_re = re.compile(r"[-'_ ]+")
def _folded_script_name(script_name):
"""Folds a script name to its bare bones for comparison."""
# string.translate is changed by codecs, the method no longer takes two
# parameters and so script_name.translate(None, "'-_ ") fails to compile
return _strip_re.sub("", script_name).lower()
def script_code(script_name):
"""Returns the four-letter ISO 15924 code of a script from its long name.
"""
load_data()
folded_script_name = _folded_script_name(script_name)
try:
return _HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE[folded_script_name]
except:
return _folded_script_name_to_code.get(folded_script_name, "Zzzz")
# We use some standard script codes that are not assigned to a codepoint
# by unicode, e.g. Zsym. The data based off Scripts.txt doesn't contain
# these so we add them here. There are also a few names with punctuation
# that we special-case
_HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES = {
"Aran": "Nastaliq", # not assigned
"Nkoo": "N'Ko",
"Phag": "Phags-pa",
"Piqd": "Klingon", # not assigned
"Zmth": "Math", # not assigned
"Zsye": "Emoji", # not assigned
"Zsym": "Symbols", # not assigned
}
_HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE = {
_folded_script_name(name): code
for code, name in _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES.items()
}
def human_readable_script_name(code):
"""Returns a human-readable name for the script code."""
try:
return _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES[code]
except KeyError:
load_data()
return _script_code_to_long_name[code]
def all_scripts():
"""Return a frozenset of all four-letter script codes."""
load_data()
return frozenset(_script_code_to_long_name.keys())
_DATA_DIR_PATH = path.join(
path.abspath(path.dirname(__file__)), os.pardir, "third_party", "ucd"
)
def open_unicode_data_file(data_file_name):
"""Opens a Unicode data file.
Args:
data_file_name: A string containing the filename of the data file.
Returns:
A file handle to the data file.
"""
filename = path.join(_DATA_DIR_PATH, data_file_name)
return codecs.open(filename, "r", "utf-8")
def _parse_code_ranges(input_data):
"""Reads Unicode code ranges with properties from an input string.
Reads a Unicode data file already imported into a string. The format is
the typical Unicode data file format with either one character or a
range of characters separated by a semicolon with a property value (and
potentially comments after a number sign, that will be ignored).
Example source data file:
http://www.unicode.org/Public/UNIDATA/Scripts.txt
Example data:
0000..001F ; Common # Cc [32] <control-0000>..<control-001F>
0020 ; Common # Zs SPACE
Args:
input_data: An input string, containing the data.
Returns:
A list of tuples corresponding to the input data, with each tuple
containing the beginning of the range, the end of the range, and the
property value for the range. For example:
[(0, 31, 'Common'), (32, 32, 'Common')]
"""
ranges = []
line_regex = re.compile(
r"^"
r"([0-9A-F]{4,6})" # first character code
r"(?:\.\.([0-9A-F]{4,6}))?" # optional second character code
r"\s*;\s*"
r"([^#]+)"
) # the data, up until the potential comment
for line in input_data.split("\n"):
match = line_regex.match(line)
if not match:
continue
first, last, data = match.groups()
if last is None:
last = first
first = int(first, 16)
last = int(last, 16)
data = data.rstrip()
ranges.append((first, last, data))
return ranges
def _parse_semicolon_separated_data(input_data):
"""Reads semicolon-separated Unicode data from an input string.
Reads a Unicode data file already imported into a string. The format is
the Unicode data file format with a list of values separated by
semicolons. The number of the values on different lines may be different
from another.
Example source data file:
http://www.unicode.org/Public/UNIDATA/PropertyValueAliases.txt
Example data:
sc; Cher ; Cherokee
sc; Copt ; Coptic ; Qaac
Args:
input_data: An input string, containing the data.
Returns:
A list of lists corresponding to the input data, with each individual
list containing the values as strings. For example:
[['sc', 'Cher', 'Cherokee'], ['sc', 'Copt', 'Coptic', 'Qaac']]
"""
all_data = []
for line in input_data.split("\n"):
line = line.split("#", 1)[0].strip() # remove the comment
if not line:
continue
fields = line.split(";")
fields = [field.strip() for field in fields]
all_data.append(fields)
return all_data
def _load_unicode_data_txt():
"""Load character data from UnicodeData.txt."""
global _defined_characters
global _bidi_mirroring_characters
if _defined_characters:
return
with open_unicode_data_file("UnicodeData.txt") as unicode_data_txt:
unicode_data = _parse_semicolon_separated_data(unicode_data_txt.read())
for line in unicode_data:
code = int(line[0], 16)
char_name = line[1]
general_category = line[2]
combining_class = int(line[3])
decomposition = line[5]
if decomposition.startswith("<"):
# We only care about canonical decompositions
decomposition = ""
decomposition = decomposition.split()
decomposition = [unichr(int(char, 16)) for char in decomposition]
decomposition = "".join(decomposition)
bidi_mirroring = line[9] == "Y"
if general_category == "Ll":
upcode = line[12]
if upcode:
upper_case = int(upcode, 16)
_lower_to_upper_case[code] = upper_case
if char_name.endswith("First>"):
last_range_opener = code
elif char_name.endswith("Last>"):
# Ignore surrogates
if "Surrogate" not in char_name:
for char in range(last_range_opener, code + 1):
_general_category_data[char] = general_category
_combining_class_data[char] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(char)
_defined_characters.add(char)
else:
_character_names_data[code] = char_name
_general_category_data[code] = general_category
_combining_class_data[code] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(code)
_decomposition_data[code] = decomposition
_defined_characters.add(code)
_defined_characters = frozenset(_defined_characters)
_bidi_mirroring_characters = frozenset(_bidi_mirroring_characters)
def _load_scripts_txt():
"""Load script property from Scripts.txt."""
with open_unicode_data_file("Scripts.txt") as scripts_txt:
script_ranges = _parse_code_ranges(scripts_txt.read())
for first, last, script_name in script_ranges:
folded_script_name = _folded_script_name(script_name)
script = _folded_script_name_to_code[folded_script_name]
for char_code in range(first, last + 1):
_script_data[char_code] = script
def _load_script_extensions_txt():
"""Load script property from ScriptExtensions.txt."""
with open_unicode_data_file("ScriptExtensions.txt") as se_txt:
script_extensions_ranges = _parse_code_ranges(se_txt.read())
for first, last, script_names in script_extensions_ranges:
script_set = frozenset(script_names.split(" "))
for character_code in range(first, last + 1):
_script_extensions_data[character_code] = script_set
def _load_blocks_txt():
"""Load block name from Blocks.txt."""
with open_unicode_data_file("Blocks.txt") as blocks_txt:
block_ranges = _parse_code_ranges(blocks_txt.read())
for first, last, block_name in block_ranges:
_block_names.append(block_name)
_block_range[block_name] = (first, last)
for character_code in range(first, last + 1):
_block_data[character_code] = block_name
def _load_derived_age_txt():
"""Load age property from DerivedAge.txt."""
with open_unicode_data_file("DerivedAge.txt") as derived_age_txt:
age_ranges = _parse_code_ranges(derived_age_txt.read())
for first, last, char_age in age_ranges:
for char_code in range(first, last + 1):
_age_data[char_code] = char_age
def _load_derived_core_properties_txt():
"""Load derived core properties from Blocks.txt."""
with open_unicode_data_file("DerivedCoreProperties.txt") as dcp_txt:
dcp_ranges = _parse_code_ranges(dcp_txt.read())
for first, last, property_name in dcp_ranges:
for character_code in range(first, last + 1):
try:
_core_properties_data[property_name].add(character_code)
except KeyError:
_core_properties_data[property_name] = {character_code}
def _load_property_value_aliases_txt():
"""Load property value aliases from PropertyValueAliases.txt."""
with open_unicode_data_file("PropertyValueAliases.txt") as pva_txt:
aliases = _parse_semicolon_separated_data(pva_txt.read())
for data_item in aliases:
if data_item[0] == "sc": # Script
code = data_item[1]
long_name = data_item[2]
_script_code_to_long_name[code] = long_name.replace("_", " ")
folded_name = _folded_script_name(long_name)
_folded_script_name_to_code[folded_name] = code
def _load_bidi_mirroring_txt():
"""Load bidi mirroring glyphs from BidiMirroring.txt."""
with open_unicode_data_file("BidiMirroring.txt") as bidi_mirroring_txt:
bmg_pairs = _parse_semicolon_separated_data(bidi_mirroring_txt.read())
for char, bmg in bmg_pairs:
char = int(char, 16)
bmg = int(bmg, 16)
_bidi_mirroring_glyph_data[char] = bmg
def _load_indic_data():
"""Load Indic properties from Indic(Positional|Syllabic)Category.txt."""
with open_unicode_data_file("IndicPositionalCategory.txt") as inpc_txt:
positional_ranges = _parse_code_ranges(inpc_txt.read())
for first, last, char_position in positional_ranges:
for char_code in range(first, last + 1):
_indic_positional_data[char_code] = char_position
with open_unicode_data_file("IndicSyllabicCategory.txt") as insc_txt:
syllabic_ranges = _parse_code_ranges(insc_txt.read())
for first, last, char_syllabic_category in syllabic_ranges:
for char_code in range(first, last + 1):
_indic_syllabic_data[char_code] = char_syllabic_category
def _load_emoji_data():
"""Parse the new draft format of emoji-data.txt"""
global _presentation_default_emoji, _presentation_default_text
global _emoji, _emoji_modifier_base
if _presentation_default_emoji:
return
emoji_sets = {
"Emoji": set(),
"Emoji_Presentation": set(),
"Emoji_Modifier": set(),
"Emoji_Modifier_Base": set(),
"Extended_Pictographic": set(),
"Emoji_Component": set(),
}
set_names = "|".join(sorted(emoji_sets.keys()))
line_re = re.compile(
r"([0-9A-F]{4,6})(?:\.\.([0-9A-F]{4,6}))?\s*;\s*" r"(%s)\s*#.*$" % set_names
)
with open_unicode_data_file("emoji-data.txt") as f:
for line in f:
line = line.strip()
if not line or line[0] == "#":
continue
m = line_re.match(line)
if not m:
raise ValueError('Did not match "%s"' % line)
start = int(m.group(1), 16)
end = start if not m.group(2) else int(m.group(2), 16)
emoji_set = emoji_sets.get(m.group(3))
emoji_set.update(range(start, end + 1))
# allow our legacy use of handshake and wrestlers with skin tone modifiers
emoji_sets["Emoji_Modifier_Base"] |= {0x1F91D, 0x1F93C}
_presentation_default_emoji = frozenset(emoji_sets["Emoji_Presentation"])
_presentation_default_text = frozenset(
emoji_sets["Emoji"] - emoji_sets["Emoji_Presentation"]
)
_emoji_modifier_base = frozenset(emoji_sets["Emoji_Modifier_Base"])
_emoji = frozenset(emoji_sets["Emoji"])
# we have no real use for the 'Emoji_Regional_Indicator' and
# 'Emoji_Component' sets, and they're not documented, so ignore them.
# The regional indicator set is just the 26 regional indicator
# symbols, and the component set is number sign, asterisk, ASCII digits,
# the regional indicators, and the skin tone modifiers.
PROPOSED_EMOJI_AGE = 1000.0
ZWJ = 0x200D
EMOJI_VS = 0xFE0F
EMOJI_SEQUENCE_TYPES = frozenset(
[
"Basic_Emoji",
"Emoji_Keycap_Sequence",
"Emoji_Combining_Sequence",
"Emoji_Flag_Sequence",
"RGI_Emoji_Flag_Sequence",
"RGI_Emoji_Tag_Sequence",
"Emoji_Modifier_Sequence",
"RGI_Emoji_Modifier_Sequence",
"RGI_Emoji_ZWJ_Sequence",
"Emoji_ZWJ_Sequence",
"Emoji_Single_Sequence",
]
)
# Unicode 12 decided to be 'helpful' and included single emoji in the sequence
# data, but unlike all the other data represents these in batches as XXXX..XXXX
# rather than one per line. We can't get name data for these so we can't
# use that data, but still have to parse the line.
def _read_emoji_data(lines):
"""Parse lines of emoji data and return a map from sequence to tuples of
name, age, type."""
line_re = re.compile(
r"(?:([0-9A-F ]+)|([0-9A-F]+\.\.[0-9A-F]+)\s*);\s*(%s)\s*;\s*([^#]*)\s*#\s*E?(\d+\.\d+).*"
% "|".join(EMOJI_SEQUENCE_TYPES)
)
result = {}
for line in lines:
line = line.strip()
if not line or line[0] == "#":
continue
m = line_re.match(line)
if not m:
raise ValueError('"%s" Did not match "%s"' % (line_re.pattern, line))
# group 1 is a sequence, group 2 is a range of single character sequences.
# we can't process the range because we don't have a name for each character
# in the range, so skip it and get these emoji and their names from
# UnicodeData
if m.group(2):
continue
seq_type = m.group(3).strip().encode("ascii")
seq = tuple(int(s, 16) for s in m.group(1).split())
name = m.group(4).strip()
age = float(m.group(5))
result[seq] = (name, age, seq_type)
return result
def _read_emoji_data_file(filename):
with open_unicode_data_file(filename) as f:
return _read_emoji_data(f.readlines())
_EMOJI_QUAL_TYPES = [
"component",
"fully-qualified",
"minimally-qualified",
"unqualified",
]
def _read_emoji_test_data(data_string):
"""Parse the emoji-test.txt data. This has names of proposed emoji that are
not yet in the full Unicode data file. Returns a list of tuples of
sequence, group, subgroup, name.
The data is a string."""
line_re = re.compile(
r"([0-9a-fA-F ]+)\s*;\s*(%s)\s*#\s*(?:[^\s]+)\s+(.*)\s*"
% "|".join(_EMOJI_QUAL_TYPES)
)
result = []
GROUP_PREFIX = "# group: "
SUBGROUP_PREFIX = "# subgroup: "
group = None
subgroup = None
for line in data_string.splitlines():
line = line.strip()
if not line:
continue
if line[0] == "#":
if line.startswith(GROUP_PREFIX):
group = line[len(GROUP_PREFIX) :].strip().encode("ascii")
subgroup = None
elif line.startswith(SUBGROUP_PREFIX):
subgroup = line[len(SUBGROUP_PREFIX) :].strip().encode("ascii")
continue
m = line_re.match(line)
if not m:
raise ValueError('Did not match "%s" in emoji-test.txt' % line)
if m.group(2) not in ["component", "fully-qualified"]:
# We only want component and fully-qualified sequences, as those are
# 'canonical'. 'minimally-qualified' apparently just leave off the
# FEOF emoji presentation tag, we already assume these.
# Information for the unqualified sequences should be
# redundant. At the moment we don't verify this so if the file
# changes we won't catch that.
continue
seq = tuple(int(s, 16) for s in m.group(1).split())
name = m.group(3).strip()
if not (group and subgroup):
raise Exception(
"sequence %s missing group or subgroup" % seq_to_string(seq)
)
result.append((seq, group, subgroup, name))
return result
_SUPPLEMENTAL_EMOJI_GROUP_DATA = """
# group: Misc
# subgroup: used with keycaps
0023 fe0f ; fully-qualified # ? number sign
002a fe0f ; fully-qualified # ? asterisk
0030 fe0f ; fully-qualified # ? digit zero
0031 fe0f ; fully-qualified # ? digit one
0032 fe0f ; fully-qualified # ? digit two
0033 fe0f ; fully-qualified # ? digit three
0034 fe0f ; fully-qualified # ? digit four
0035 fe0f ; fully-qualified # ? digit five
0036 fe0f ; fully-qualified # ? digit six
0037 fe0f ; fully-qualified # ? digit seven
0038 fe0f ; fully-qualified # ? digit eight
0039 fe0f ; fully-qualified # ? digit nine
20e3 ; fully-qualified # ? combining enclosing keycap
# As of Unicode 11 these have group data defined.
# subgroup: skin-tone modifiers
#1f3fb ; fully-qualified # ? emoji modifier fitzpatrick type-1-2
#1f3fc ; fully-qualified # ? emoji modifier fitzpatrick type-3
#1f3fd ; fully-qualified # ? emoji modifier fitzpatrick type-4
#1f3fe ; fully-qualified # ? emoji modifier fitzpatrick type-5
#1f3ff ; fully-qualified # ? emoji modifier fitzpatrick type-6
# subgroup: regional indicator symbols
1f1e6 ; fully-qualified # ? regional indicator symbol letter A
1f1e7 ; fully-qualified # ? regional indicator symbol letter B
1f1e8 ; fully-qualified # ? regional indicator symbol letter C
1f1e9 ; fully-qualified # ? regional indicator symbol letter D
1f1ea ; fully-qualified # ? regional indicator symbol letter E
1f1eb ; fully-qualified # ? regional indicator symbol letter F
1f1ec ; fully-qualified # ? regional indicator symbol letter G
1f1ed ; fully-qualified # ? regional indicator symbol letter H
1f1ee ; fully-qualified # ? regional indicator symbol letter I
1f1ef ; fully-qualified # ? regional indicator symbol letter J
1f1f0 ; fully-qualified # ? regional indicator symbol letter K
1f1f1 ; fully-qualified # ? regional indicator symbol letter L
1f1f2 ; fully-qualified # ? regional indicator symbol letter M
1f1f3 ; fully-qualified # ? regional indicator symbol letter N
1f1f4 ; fully-qualified # ? regional indicator symbol letter O
1f1f5 ; fully-qualified # ? regional indicator symbol letter P
1f1f6 ; fully-qualified # ? regional indicator symbol letter Q
1f1f7 ; fully-qualified # ? regional indicator symbol letter R
1f1f8 ; fully-qualified # ? regional indicator symbol letter S
1f1f9 ; fully-qualified # ? regional indicator symbol letter T
1f1fa ; fully-qualified # ? regional indicator symbol letter U
1f1fb ; fully-qualified # ? regional indicator symbol letter V
1f1fc ; fully-qualified # ? regional indicator symbol letter W
1f1fd ; fully-qualified # ? regional indicator symbol letter X
1f1fe ; fully-qualified # ? regional indicator symbol letter Y
1f1ff ; fully-qualified # ? regional indicator symbol letter Z
#subgroup: unknown flag
fe82b ; fully-qualified # ? unknown flag PUA codepoint
"""
# These are skin tone sequences that Unicode decided not to define. Android
# shipped with them, so we're stuck with them forever regardless of what
# Unicode says.
#
# This data is in the format of emoji-sequences.txt and emoji-zwj-sequences.txt
_LEGACY_ANDROID_SEQUENCES = """
1F91D 1F3FB ; Emoji_Modifier_Sequence; handshake: light skin tone # 9.0
1F91D 1F3FC ; Emoji_Modifier_Sequence; handshake: medium-light skin tone # 9.0
1F91D 1F3FD ; Emoji_Modifier_Sequence; handshake: medium skin tone # 9.0
1F91D 1F3FE ; Emoji_Modifier_Sequence; handshake: medium-dark skin tone # 9.0
1F91D 1F3FF ; Emoji_Modifier_Sequence; handshake: dark skin tone # 9.0
1F93C 1F3FB ; Emoji_Modifier_Sequence ; people wrestling: light skin tone # 9.0
1F93C 1F3FC ; Emoji_Modifier_Sequence ; people wrestling: medium-light skin tone # 9.0
1F93C 1F3FD ; Emoji_Modifier_Sequence ; people wrestling: medium skin tone # 9.0
1F93C 1F3FE ; Emoji_Modifier_Sequence ; people wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF ; Emoji_Modifier_Sequence ; people wrestling: dark skin tone # 9.0
1F93C 1F3FB 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: light skin tone # 9.0
1F93C 1F3FC 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium-light skin tone # 9.0
1F93C 1F3FD 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium skin tone # 9.0
1F93C 1F3FE 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: dark skin tone # 9.0
1F93C 1F3FB 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: light skin tone # 9.0
1F93C 1F3FC 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium-light skin tone # 9.0
1F93C 1F3FD 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium skin tone # 9.0
1F93C 1F3FE 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: dark skin tone # 9.0
"""
# Defines how to insert the new sequences into the standard order data. Would
# have been nice to merge it into the above legacy data but that would have
# required a format change.
_LEGACY_ANDROID_ORDER = """
-1F91D # handshake
1F91D 1F3FB
1F91D 1F3FC
1F91D 1F3FD
1F91D 1F3FE
1F91D 1F3FF
-1F93C # people wrestling
1F93C 1F3FB
1F93C 1F3FC
1F93C 1F3FD
1F93C 1F3FE
1F93C 1F3FF
-1F93C 200D 2642 FE0F # men wrestling
1F93C 1F3FB 200D 2642 FE0F
1F93C 1F3FC 200D 2642 FE0F
1F93C 1F3FD 200D 2642 FE0F
1F93C 1F3FE 200D 2642 FE0F
1F93C 1F3FF 200D 2642 FE0F
-1F93C 200D 2640 FE0F # women wrestling
1F93C 1F3FB 200D 2640 FE0F
1F93C 1F3FC 200D 2640 FE0F
1F93C 1F3FD 200D 2640 FE0F
1F93C 1F3FE 200D 2640 FE0F
1F93C 1F3FF 200D 2640 FE0F
"""
def _get_order_patch(order_text, seq_to_name):
"""Create a mapping from a key sequence to a list of sequence, name tuples.
This will be used to insert additional sequences after the key sequence
in the order data. seq_to_name is a mapping from new sequence to name,
so the names don't have to be duplicated in the order data."""
patch_map = {}
patch_key = None
patch_list = None
def get_sequence(seqtext):
return tuple([int(s, 16) for s in seqtext.split()])
for line in order_text.splitlines():
ix = line.find("#")
if ix >= 0:
line = line[:ix]
line = line.strip()
if not line:
continue
if line.startswith("-"):
if patch_list and patch_key:
patch_map[patch_key] = patch_list
patch_key = get_sequence(line[1:])
patch_list = []
else:
seq = get_sequence(line)
name = seq_to_name[seq] # exception if seq is not in sequence_text
patch_list.append((seq, name))
if patch_list and patch_key:
patch_map[patch_key] = patch_list
return patch_map
def _get_android_order_patch():
"""Get an order patch using the legacy android data."""
# maps from sequence to (name, age, type), we only need the name
seq_data = _read_emoji_data(_LEGACY_ANDROID_SEQUENCES.splitlines())
seq_to_name = {k: v[0] for k, v in seq_data.items()}
return _get_order_patch(_LEGACY_ANDROID_ORDER, seq_to_name)
def _apply_order_patch(patch, group_list):
"""patch is a map from a key sequence to list of sequence, name pairs, and
group_list is an ordered list of sequence, group, subgroup, name tuples.
Iterate through the group list appending each item to a new list, and
after appending an item matching a key sequence, also append all of its
associated sequences in order using the same group and subgroup.
Return the new list. If there are any unused patches, raise an exception."""
result = []
patched = set()
for t in group_list:
result.append(t)
if t[0] in patch:
patched.add(t[0])
_, group, subgroup, _ = t
for seq, name in patch[t[0]]:
result.append((seq, group, subgroup, name))
unused = set(patch.keys()) - patched
if unused:
raise Exception(
"%d unused patch%s\n %s: "
% (
len(unused),
"" if len(unused) == 1 else "es",
"\n ".join(seq_to_string(seq) for seq in sorted(unused)),
)
)
return result
def _load_emoji_group_data():
global _emoji_group_data
if _emoji_group_data:
return
_emoji_group_data = {}
with open_unicode_data_file("emoji-test.txt") as f:
text = f.read()
group_list = _read_emoji_test_data(text)
# patch with android items
patch = _get_android_order_patch()
group_list = _apply_order_patch(patch, group_list)
group_list.extend(_read_emoji_test_data(_SUPPLEMENTAL_EMOJI_GROUP_DATA))
for i, (seq, group, subgroup, name) in enumerate(group_list):
if seq in _emoji_group_data:
print(
"seq %s already in group data as %s"
% (seq_to_string(seq), _emoji_group_data[seq])
)
print(" new value would be %s" % str((i, group, subgroup, name)))
_emoji_group_data[seq] = (i, group, subgroup, name)
assert len(group_list) == len(_emoji_group_data)
def get_emoji_group_data(seq):
"""Return group data for the canonical sequence seq, or None.
Group data is a tuple of index, group, subgroup, and name. The
index is a unique global sort index for the sequence among all
sequences in the group data."""
_load_emoji_group_data()
return _emoji_group_data.get(seq, None)
def get_emoji_groups():
"""Return the main emoji groups, in order."""
_load_emoji_group_data()
groups = []
group = None
for _, g, _, _ in sorted(_emoji_group_data.values()):
if g != group:
group = g
groups.append(group)
return groups
def get_emoji_subgroups(group):
"""Return the subgroups of this group, in order, or None
if the group is not recognized."""
_load_emoji_group_data()
subgroups = []
subgroup = None
for _, g, sg, _ in sorted(_emoji_group_data.values()):
if g == group:
if sg != subgroup:
subgroup = sg
subgroups.append(subgroup)
return subgroups if subgroups else None
def get_emoji_in_group(group, subgroup=None):
"""Return the sorted list of the emoji sequences in the group (limiting to
subgroup if subgroup is not None). Returns None if group does not
exist, and an empty list if subgroup does not exist in group."""
_load_emoji_group_data()
result = None
for seq, (index, g, sg, _) in _emoji_group_data.items():
if g == group:
if result is None:
result = []
if subgroup and sg != subgroup:
continue
result.append(seq)
result.sort(key=lambda s: _emoji_group_data[s][0])
return result
def get_sorted_emoji_sequences(seqs):
"""Seqs is a collection of canonical emoji sequences. Returns a list of
these sequences in the canonical emoji group order. Sequences that are not
canonical are placed at the end, in unicode code point order.
"""
_load_emoji_group_data()
return sorted(seqs, key=lambda s: (_emoji_group_data.get(s, 100000), s))
def _load_emoji_sequence_data():
"""Ensure the emoji sequence data is initialized."""
global _emoji_sequence_data, _emoji_non_vs_to_canonical
if _emoji_sequence_data is not None:
return
_emoji_sequence_data = {}
_emoji_non_vs_to_canonical = {}
def add_data(data):
for k, t in data.items():
if k in _emoji_sequence_data:
print("already have data for sequence:", seq_to_string(k), t)
_emoji_sequence_data[k] = t
if EMOJI_VS in k:
_emoji_non_vs_to_canonical[strip_emoji_vs(k)] = k
for datafile in ["emoji-zwj-sequences.txt", "emoji-sequences.txt"]:
add_data(_read_emoji_data_file(datafile))
add_data(_read_emoji_data(_LEGACY_ANDROID_SEQUENCES.splitlines()))
_load_unicode_data_txt() # ensure character_names_data is populated
_load_emoji_data() # ensure presentation_default_text is populated
_load_emoji_group_data() # ensure group data is populated
# Get names for single emoji from the test data. We will prefer these over
# those in UnicodeData (e.g. prefer "one o'clock" to "clock face one oclock"),
# and if they're not in UnicodeData these are proposed new emoji.
for seq, (_, _, _, emoji_name) in _emoji_group_data.items():
non_vs_seq = strip_emoji_vs(seq)
if len(non_vs_seq) > 1:
continue
cp = non_vs_seq[0]
# If it's not in character names data, it's a proposed emoji.
if cp not in _character_names_data:
# use 'ignore' to strip curly quotes etc if they exist, unicode
# character names are ASCII, and it's probably best to keep it that way.
cp_name = emoji_name.encode("ascii", "ignore").upper()
_character_names_data[cp] = cp_name
is_default_text_presentation = cp in _presentation_default_text
if is_default_text_presentation:
seq = (cp, EMOJI_VS)
emoji_age = float(age(cp)) or PROPOSED_EMOJI_AGE
current_data = _emoji_sequence_data.get(seq) or (
emoji_name,
emoji_age,
"Emoji_Single_Sequence",
)
if is_default_text_presentation:
emoji_name = "(emoji) " + emoji_name
_emoji_sequence_data[seq] = (emoji_name, current_data[1], current_data[2])
# Fill in sequences of single emoji, handling non-canonical to canonical also.
for k in _emoji:
non_vs_seq = (k,)
is_default_text_presentation = k in _presentation_default_text
if is_default_text_presentation:
canonical_seq = (k, EMOJI_VS)
_emoji_non_vs_to_canonical[non_vs_seq] = canonical_seq
else:
canonical_seq = non_vs_seq
if canonical_seq in _emoji_sequence_data:
# Prefer names we have where they exist
emoji_name, emoji_age, seq_type = _emoji_sequence_data[canonical_seq]
else:
emoji_name = name(k, "unnamed").lower()
if name == "unnamed":
continue
emoji_age = age(k)
seq_type = "Emoji_Single_Sequence"
if is_default_text_presentation and not emoji_name.startswith("(emoji) "):
emoji_name = "(emoji) " + emoji_name
_emoji_sequence_data[canonical_seq] = (emoji_name, emoji_age, seq_type)
def get_emoji_sequences(age=None, types=None):
"""Return the set of canonical emoji sequences, filtering to those <= age
if age is not None, and those with type in types (if not a string) or
type == types (if type is a string) if types is not None. By default
all sequences are returned, including those for single emoji."""
_load_emoji_sequence_data()
result = _emoji_sequence_data.keys()
if types is not None:
if isinstance(types, basestring):
types = frozenset([types])
result = [k for k in result if _emoji_sequence_data[k][1] in types]
if age is not None:
age = float(age)
result = [k for k in result if _emoji_sequence_data[k][0] <= age]
return result
def get_emoji_sequence_data(seq):
"""Return a tuple of the name, age, and type for the (possibly non-canonical)
sequence, or None if not recognized as a sequence."""
_load_emoji_sequence_data()
seq = get_canonical_emoji_sequence(seq)
if not seq or seq not in _emoji_sequence_data:
return None
return _emoji_sequence_data[seq]
def get_emoji_sequence_name(seq):
"""Return the name of the (possibly non-canonical) sequence, or None if
not recognized as a sequence."""
data = get_emoji_sequence_data(seq)
return None if not data else data[0]
def get_emoji_sequence_age(seq):
"""Return the age of the (possibly non-canonical) sequence, or None if
not recognized as a sequence. Proposed sequences have PROPOSED_EMOJI_AGE
as the age."""
# floats are a pain since the actual values are decimal. maybe use
# strings to represent age.
data = get_emoji_sequence_data(seq)
return None if not data else data[1]
def get_emoji_sequence_type(seq):
"""Return the type of the (possibly non-canonical) sequence, or None if
not recognized as a sequence. Types are in EMOJI_SEQUENCE_TYPES."""
data = get_emoji_sequence_data(seq)
return None if not data else data[2]
def is_canonical_emoji_sequence(seq):
"""Return true if this is a canonical emoji sequence (has 'vs' where Unicode
says it should), and is known."""
_load_emoji_sequence_data()
return seq in _emoji_sequence_data
def get_canonical_emoji_sequence(seq):
"""Return the canonical version of this emoji sequence if the sequence is
known, or None."""
if is_canonical_emoji_sequence(seq):
return seq
seq = strip_emoji_vs(seq)
return _emoji_non_vs_to_canonical.get(seq, None)
def strip_emoji_vs(seq):
"""Return a version of this emoji sequence with emoji variation selectors
stripped. This is the 'non-canonical' version used by the color emoji font,
which doesn't care how the sequence is represented in text."""
if EMOJI_VS in seq:
return tuple([cp for cp in seq if cp != EMOJI_VS])
return seq
def seq_to_string(seq):
"""Return a string representation of the codepoint sequence."""
return "_".join("%04x" % cp for cp in seq)
def string_to_seq(seq_str):
"""Return a codepoint sequence (tuple) given its string representation."""
return tuple([int(s, 16) for s in seq_str.split("_")])
def is_cp_seq(seq):
return all(0 <= n <= 0x10FFFF for n in seq)
_REGIONAL_INDICATOR_START = 0x1F1E6
_REGIONAL_INDICATOR_END = 0x1F1FF
def is_regional_indicator(cp):
return _REGIONAL_INDICATOR_START <= cp <= _REGIONAL_INDICATOR_END
def is_regional_indicator_seq(cps):
return len(cps) == 2 and all(is_regional_indicator(cp) for cp in cps)
def regional_indicator_to_ascii(cp):
assert is_regional_indicator(cp)
return chr(cp - _REGIONAL_INDICATOR_START + ord("A"))
def ascii_to_regional_indicator(ch):
assert "A" <= ch <= "Z"
return ord(ch) - ord("A") + _REGIONAL_INDICATOR_START
def string_to_regional_indicator_seq(s):
assert len(s) == 2
return ascii_to_regional_indicator(s[0]), ascii_to_regional_indicator(s[1])
def regional_indicator_seq_to_string(cps):
assert len(cps) == 2
return "".join(regional_indicator_to_ascii(cp) for cp in cps)
def is_tag(cp):
return 0xE0020 < cp < 0xE0080 or cp == 0xE0001
def tag_character_to_ascii(cp):
assert is_tag(cp)
if cp == 0xE0001:
return "[begin]"
if cp == 0xE007F:
return "[end]"
return chr(cp - 0xE0000)
def is_regional_tag_seq(seq):
return (
seq[0] == 0x1F3F4
and seq[-1] == 0xE007F
and all(0xE0020 < cp < 0xE007E for cp in seq[1:-1])
)
_FITZ_START = 0x1F3FB
_FITZ_END = 0x1F3FF
def is_skintone_modifier(cp):
return _FITZ_START <= cp <= _FITZ_END
def get_presentation_default_emoji():
_load_emoji_data()
return _presentation_default_emoji
def get_presentation_default_text():
_load_emoji_data()
return _presentation_default_text
def get_emoji():
_load_emoji_data()
return _emoji
def is_emoji(cp):
_load_emoji_data()
return cp in _emoji
def is_emoji_modifier_base(cp):
_load_emoji_data()
return cp in _emoji_modifier_base
def _load_unicode_emoji_variants():
"""Parse StandardizedVariants.txt and initialize a set of characters
that have a defined emoji variant presentation. All such characters
also have a text variant presentation so a single set works for both."""
global _emoji_variants, _emoji_variants_proposed
if _emoji_variants:
return
emoji_variants = set()
# prior to Unicode 11 emoji variants were part of the standard data.
# as of Unicode 11 however they're only in a separate emoji data file.
line_re = re.compile(r"([0-9A-F]{4,6})\s+FE0F\s*;\s*emoji style\s*;")
with open_unicode_data_file("emoji-variation-sequences.txt") as f:
for line in f:
m = line_re.match(line)
if m:
emoji_variants.add(int(m.group(1), 16))
_emoji_variants = frozenset(emoji_variants)
try:
read = 0
skipped = 0
with open_unicode_data_file("proposed-variants.txt") as f:
for line in f:
m = line_re.match(line)
if m:
read += 1
cp = int(m.group(1), 16)
if cp in emoji_variants:
skipped += 1
else:
emoji_variants.add(cp)
print(
"skipped %s %d proposed variants"
% ("all of" if skipped == read else skipped, read)
)
except IOError as e:
if e.errno != 2:
raise
_emoji_variants_proposed = frozenset(emoji_variants)
def get_unicode_emoji_variants(include_proposed="proposed"):
"""Returns the emoji characters that have both emoji and text presentations.
If include_proposed is 'proposed', include the ones proposed in 2016/08. If
include_proposed is 'proposed_extra', also include the emoji Noto proposes
for text presentation treatment to align related characters. Else
include_proposed should resolve to boolean False."""
_load_unicode_emoji_variants()
if not include_proposed:
return _emoji_variants
elif include_proposed == "proposed":
return _emoji_variants_proposed
elif include_proposed == "proposed_extra":
extra = tool_utils.parse_int_ranges("1f4b9 1f4c8-1f4ca 1f507 1f509-1f50a 1f44c")
return _emoji_variants_proposed | extra
else:
raise Exception(
"include_proposed is %s which is not in ['proposed', 'proposed_extra']"
% include_proposed
)
def _load_variant_data():
"""Parse StandardizedVariants.txt and initialize all non-emoji variant
data. The data is a mapping from codepoint to a list of tuples of:
- variant selector
- compatibility character (-1 if there is none)
- shaping context (bitmask, 1 2 4 8 for isolate initial medial final)
The compatibility character is for cjk mappings that map to 'the same'
glyph as another CJK character."""
global _variant_data, _variant_data_cps
if _variant_data:
return
compatibility_re = re.compile(r"\s*CJK COMPATIBILITY IDEOGRAPH-([0-9A-Fa-f]+)")
variants = collections.defaultdict(list)
with open_unicode_data_file("StandardizedVariants.txt") as f:
for line in f:
x = line.find("#")
if x >= 0:
line = line[:x]
line = line.strip()
if not line:
continue
tokens = line.split(";")
cp, var = tokens[0].split(" ")
cp = int(cp, 16)
varval = int(var, 16)
if varval in [0xFE0E, 0xFE0F]:
continue # ignore emoji variants
m = compatibility_re.match(tokens[1].strip())
compat = int(m.group(1), 16) if m else -1
context = 0
if tokens[2]:
ctx = tokens[2]
if ctx.find("isolate") != -1:
context += 1
if ctx.find("initial") != -1:
context += 2
if ctx.find("medial") != -1:
context += 4
if ctx.find("final") != -1:
context += 8
variants[cp].append((varval, compat, context))
_variant_data_cps = frozenset(variants.keys())
_variant_data = variants
def has_variant_data(cp):
_load_variant_data()
return cp in _variant_data
def get_variant_data(cp):
_load_variant_data()
return _variant_data[cp][:] if cp in _variant_data else None
def variant_data_cps():
_load_variant_data()
return _variant_data_cps
# proposed emoji
def _load_proposed_emoji_data():
"""Parse proposed-emoji.txt if it exists to get cps/names of proposed emoji
(but not approved) for this version of Unicode."""
global _proposed_emoji_data, _proposed_emoji_data_cps
if _proposed_emoji_data:
return
_proposed_emoji_data = {}
line_re = re.compile(r"^U\+([a-zA-z0-9]{4,5})\s.*\s\d{4}Q\d\s+(.*)$")
try:
with open_unicode_data_file("proposed-emoji.txt") as f:
for line in f:
line = line.strip()
if not line or line[0] == "#" or line.startswith(u"\u2022"):
continue
m = line_re.match(line)
if not m:
raise ValueError('did not match "%s"' % line)
cp = int(m.group(1), 16)
name = m.group(2)
if cp in _proposed_emoji_data:
raise ValueError(
"duplicate emoji %x, old name: %s, new name: %s"
% (cp, _proposed_emoji_data[cp], name)
)
_proposed_emoji_data[cp] = name
except IOError as e:
if e.errno != 2:
# not file not found, rethrow
raise
_proposed_emoji_data_cps = frozenset(_proposed_emoji_data.keys())
def proposed_emoji_name(cp):
_load_proposed_emoji_data()
return _proposed_emoji_data.get(cp, "")
def proposed_emoji_cps():
_load_proposed_emoji_data()
return _proposed_emoji_data_cps
def is_proposed_emoji(cp):
_load_proposed_emoji_data()
return cp in _proposed_emoji_data_cps
def read_codeset(text):
line_re = re.compile(r"^0x([0-9a-fA-F]{2,6})\s+0x([0-9a-fA-F]{4,6})\s+.*")
codeset = set()
for line in text.splitlines():
m = line_re.match(line)
if m:
cp = int(m.group(2), 16)
codeset.add(cp)
return codeset
def codeset(cpname):
"""Return a set of the unicode codepoints in the code page named cpname, or
None."""
filename = ("%s.txt" % cpname).upper()
filepath = path.join(
path.dirname(__file__), os.pardir, "third_party", "unicode", filename
)
if not path.isfile(filepath):
return None
with open(filepath, "r") as f:
return read_codeset(f.read())
def _dump_emoji_presentation():
"""Dump presentation info, for testing."""
text_p = 0
emoji_p = 0
for cp in sorted(get_emoji()):
cp_name = name(cp, "<error>")
if cp in get_presentation_default_emoji():
presentation = "emoji"
emoji_p += 1
elif cp in get_presentation_default_text():
presentation = "text"
text_p += 1
else:
presentation = "<error>"
print(
"%s%04x %5s %s" % (" " if cp < 0x10000 else "", cp, presentation, cp_name)
)
print(
"%d total emoji, %d text presentation, %d emoji presentation"
% (len(get_emoji()), text_p, emoji_p)
)
def _load_nameslist_data():
global _nameslist_see_also
if _nameslist_see_also is not None:
return
_nameslist_see_also = collections.defaultdict(set)
cp = None
line_re = re.compile(r"^(?:(?:([0-9A-F]{4,6})\t.*)|(?:^\s+([x=])\s+(.*)))$")
see_also_re = re.compile(r"\s*(?:\(.*\s-\s+([0-9A-F]{4,6})\))|([0-9A-F]{4,6})")
with open_unicode_data_file("NamesList.txt") as f:
for line in f:
m = line_re.match(line)
if not m:
continue
if m.group(1):
cp = int(m.group(1), 16)
else:
rel = m.group(2).strip()
val = m.group(3).strip()
if rel != "x":
continue
m = see_also_re.match(val)
if not m:
raise Exception(
'could not match see also val "%s" in line "%s"' % (val, line)
)
ref_cp = int(m.group(1) or m.group(2), 16)
_nameslist_see_also[cp].add(ref_cp)
def see_also(cp):
_load_nameslist_data()
return frozenset(_nameslist_see_also.get(cp))
def _load_namealiases_data():
global _namealiases_alt_names
if _namealiases_alt_names is not None:
return
_namealiases_alt_names = collections.defaultdict(list)
line_re = re.compile(r"([0-9A-F]{4,6});([^;]+);(.*)$")
with open_unicode_data_file("NameAliases.txt") as f:
for line in f:
m = line_re.match(line)
if not m:
continue
cp = int(m.group(1), 16)
name = m.group(2).strip()
name_type = m.group(3).strip()
if not name_type in [
"correction",
"control",
"alternate",
"figment",
"abbreviation",
]:
raise Exception('unknown name type in "%s"' % line)
if name_type == "figment":
continue
_namealiases_alt_names[cp].append((name, name_type))
def alt_names(cp):
"""Return list of name, nametype tuples for cp, or None."""
_load_namealiases_data()
return tuple(_namealiases_alt_names.get(cp))
if __name__ == "__main__":
all_sequences = sorted(get_emoji_sequences())
for k in all_sequences:
if not get_emoji_group_data(k):
print("no data:", seq_to_string(k))
for group in get_emoji_groups():
print("group:", group)
for subgroup in get_emoji_subgroups(group):
print(" subgroup:", subgroup)
print(" %d items" % len(get_emoji_in_group(group, subgroup)))
# dump some information for annotations
for k in get_sorted_emoji_sequences(all_sequences):
age = get_emoji_sequence_age(k)
if age == 12:
print(seq_to_string(k).replace("_", " "), "#", get_emoji_sequence_name(k))
| apache-2.0 | -2,052,905,957,902,834,400 | 32.051576 | 101 | 0.625488 | false |
amitdhiman000/dais | politics/urls.py | 1 | 1894 | """MyOffers URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^latest-laws/$', views.indian_latest_laws_view, name='indian_trending_view'),
url(r'^latest-debates/$', views.indian_latest_debates_view, name='indian_trending_view'),
url(r'^latest-petitions/$', views.indian_latest_petitions_view, name='indian_trending_view'),
url(r'^parliament-const/$', views.parliament_const_view, name='parliament_const_view'),
url(r'^legislative-const/$', views.legislative_const_view, name='legislative_const_view'),
url(r'^indian-parliament-members/$', views.indian_parliament_members_view, name='indian_parliament_members_view'),
url(r'^indian-rajyasabha-members/$', views.indian_rajyasabha_members_view, name='indian_rajyasabha_members_view'),
url(r'^state-legislative-members/$', views.state_legislative_members_view, name='state_legislative_members_view'),
url(r'^center-govt-projects/$', views.center_govt_projects_view, name='center_govt_projects_view'),
url(r'^state-govt-projects/$', views.state_govt_projects_view, name='state_govt_projects_view'),
url(r'^parties/$', views.parties_view, name='parties_view'),
url(r'^indian-gdp/$', views.indian_gdp_view, name='indian_gdp_view'),
url(r'^indian-fdi/$', views.indian_fdi_view, name='indian_fdi_view'),
] | apache-2.0 | 4,509,953,720,459,622,000 | 56.424242 | 115 | 0.733369 | false |
zero-os/0-orchestrator | templates/zerotier/actions.py | 2 | 3468 | def _get_client(job, token):
from zeroos.orchestrator.sal.Node import Node
return Node.from_ays(job.service.parent, token).client
def _get_network(job, token):
client = _get_client(job, token)
for net in client.zerotier.list():
if net['id'] == job.service.model.data.nwid:
return net
def _update_model(job, network):
job.service.model.data.allowDefault = network['allowDefault']
job.service.model.data.allowGlobal = network['allowGlobal']
job.service.model.data.allowManaged = network['allowManaged']
job.service.model.data.assignedAddresses = network['assignedAddresses']
job.service.model.data.bridge = network['bridge']
job.service.model.data.broadcastEnabled = network['broadcastEnabled']
job.service.model.data.dhcp = network['dhcp']
job.service.model.data.mac = network['mac']
job.service.model.data.mtu = network['mtu']
job.service.model.data.name = network['name']
job.service.model.data.netconfRevision = network['netconfRevision']
job.service.model.data.portDeviceName = network['portDeviceName']
job.service.model.data.portError = network['portError']
for route in network['routes']:
if route['via'] is None:
route['via'] = ''
job.service.model.data.routes = network['routes']
job.service.model.data.status = network['status']
job.service.model.data.type = network['type'].lower()
job.service.saveAll()
def install(job):
import time
from zerotier import client as ztclient
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
client = _get_client(job, job.context['token'])
client.zerotier.join(job.service.model.data.nwid)
def get_member():
start = time.time()
while start + 60 > time.time():
resp = zerotier.network.getMember(address, job.service.model.data.nwid)
if resp.content:
return resp.json()
time.sleep(0.5)
raise j.exceptions.RuntimeError('Could not find member on zerotier network')
token = job.service.model.data.token
if token:
address = client.zerotier.info()['address']
zerotier = ztclient.Client()
zerotier.set_auth_header('bearer {}'.format(token))
member = get_member()
if not member['config']['authorized']:
# authorized new member
job.logger.info("authorize new member {} to network {}".format(
member['nodeId'], job.service.model.data.nwid))
member['config']['authorized'] = True
zerotier.network.updateMember(member, member['nodeId'], job.service.model.data.nwid)
while True:
net = _get_network(job, job.context['token'])
if (token and net['status'] == 'OK') or (not token and net['status'] in ['OK', 'ACCESS_DENIED']):
break
time.sleep(1)
_update_model(job, net)
def delete(job):
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
client = _get_client(job, job.context['token'])
client.zerotier.leave(service.model.data.nwid)
def monitor(job):
from zeroos.orchestrator.configuration import get_jwt_token
if job.service.model.actionsState['install'] == 'ok':
_update_model(job, _get_network(job, get_jwt_token(job.service.aysrepo)))
| apache-2.0 | -3,469,933,869,409,491,000 | 35.893617 | 105 | 0.658593 | false |
SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/plugin.audio.jazzradio.com/default.py | 1 | 18506 | ########################################
# JazzRadio.com XBMC plugin
# by Tim C. 'Bitcrusher' Steinmetz
# http://qualisoft.dk
# Github: https://github.com/Bitcrusher/Jazzradio-com-XBMC-plugin.git
# Git Read-only: git://github.com/Bitcrusher/Jazzradio-com-XBMC-plugin.git
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import os
import sys
import re
import urllib
import urllib2
import string
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import pickle
import time
from xml.dom import minidom
from httpcomm import HTTPComm
from ConfigParser import SafeConfigParser
# Import JSON - compatible with Python<v2.6
try:
import json
except ImportError:
import simplejson as json
# Config parser
parser = SafeConfigParser()
parser.read( os.path.dirname(__file__) + "/config.ini" )
# Various constants used throughout the script
HANDLE = int(sys.argv[1])
ADDON = xbmcaddon.Addon(id=parser.get('plugin', 'id'))
# Plugin constants
__plugin__ = ADDON.getAddonInfo('name')
__author__ = "Tim C. Steinmetz"
__url__ = "http://qualisoft.dk/"
__platform__ = "xbmc media center, [LINUX, OS X, WIN32]"
__date__ = parser.get('plugin', 'date')
__version__ = ADDON.getAddonInfo('version')
class musicAddonXbmc:
_addonProfilePath = xbmc.translatePath( ADDON.getAddonInfo('profile') ).decode('utf-8') # Dir where plugin settings and cache will be stored
_cacheStreams = _addonProfilePath + "cache_streamlist.dat"
_cacheListenkey = _addonProfilePath + "cache_listenkey.dat"
_checkinFile = _addonProfilePath + "cache_lastcheckin.dat"
_baseUrl = parser.get('urls', 'baseUrl')
_loginUrl = parser.get('urls', 'loginUrl')
_listenkeyUrl = parser.get('urls', 'listenkeyUrl')
_publicStreamsJson40k = parser.get('urls', 'publicStreamsJson40k') # Public AAC 40k/sec AAC+ JSON url
_premiumStreamsJson40k = parser.get('urls', 'premiumStreamsJson40k') # AAC 40k/sec AAC+ JSON url
_premiumStreamsJson64k = parser.get('urls', 'premiumStreamsJson64k') # AAC 64k/sec AAC+ JSON url
_premiumStreamsJson128k = parser.get('urls', 'premiumStreamsJson128k') # AAC 128k/sec AAC+ JSON url
_premiumStreamsJson256k = parser.get('urls', 'premiumStreamsJson256k') # MP3 256k/sec AAC+ JSON url
_favoritesStreamJson40k = parser.get('urls', 'favoritesStreamJson40k') # Favorites AAC 40k/sec AAC+ playlist url
_favoritesStreamJson64k = parser.get('urls', 'favoritesStreamJson64k') # Favorites AAC 64k/sec AAC+ playlist url
_favoritesStreamJson128k= parser.get('urls', 'favoritesStreamJson128k') # Favorites AAC 128k/sec AAC+ playlist url
_favoritesStreamJson256k= parser.get('urls', 'favoritesStreamJson256k') # Favorites MP3 256k/sec AAC+ playlist url
_httpComm = HTTPComm() # Init CURL thingy
_frontpageHtml = ""
_newChannels = 0
_bitrate = 40
_streamMimeType = 'audio/aac'
def __init__( self ) :
# If stats is allowed and its been at least 24 hours since last checkin
if (ADDON.getSetting('allowstats') == "true") and (self.checkFileTime(self._checkinFile, self._addonProfilePath, 86400) == True) :
open(self._checkinFile, "w")
account = 'public'
if ADDON.getSetting('username') != "" :
account = 'premium'
xbmc.log( 'Submitting stats', xbmc.LOGNOTICE )
self._httpComm.get('http://stats.qualisoft.dk/?plugin='+ ADDON.getAddonInfo('id') + '&version=' + __version__ + '&account=' + account + '&key=' + parser.get('plugin', 'checkinkey'))
xbmc.log( "[PLUGIN] %s v%s (%s)" % ( __plugin__, __version__, __date__ ), xbmc.LOGNOTICE )
# Let's get some tunes!
def start( self ) :
jsonList = [] # list that data from the JSON will be put in
streamList = [] # the final list of channels, with small custom additions
# Check if cachefile has expired
if ADDON.getSetting("forceupdate") == "true" or ((int( ADDON.getSetting("cacheexpire") ) * 60) != 0 and self.checkFileTime(self._cacheStreams, self._addonProfilePath, (int( ADDON.getSetting("cacheexpire") ) * 60)) == True) :
listenkey = "" # will contain the premium listenkey
if ADDON.getSetting('username') != "" and ADDON.getSetting("usefavorites") == 'false' : # if username is set and not using favorites
xbmc.log( "Going for Premium streams", xbmc.LOGNOTICE )
# Checks if forceupdate is set and if the listenkey cachefile exists
if ADDON.getSetting("forceupdate") == "true" or not os.path.exists(self._cacheListenkey) :
listenkey = self.getListenkey()
pickle.dump( listenkey, open(self._cacheListenkey, "w"), protocol=0 ) # saves listenkey for further use
else :
listenkey = pickle.load( open(self._cacheListenkey, "r") )
if ADDON.getSetting('bitrate') == '0' :
self._bitrate = 40
jsonList = self.getJSONChannelList( self._premiumStreamsJson40k )
streamList = self.customizeStreamListAddMenuitem( jsonList, listenkey )
elif ADDON.getSetting('bitrate') == '1' :
self._bitrate = 64
jsonList = self.getJSONChannelList( self._premiumStreamsJson64k )
streamList = self.customizeStreamListAddMenuitem( jsonList, listenkey )
elif ADDON.getSetting('bitrate') == '2' :
self._bitrate = 128
jsonList = self.getJSONChannelList( self._premiumStreamsJson128k )
streamList = self.customizeStreamListAddMenuitem( jsonList, listenkey )
else :
self._bitrate = 256
self._streamMimeType = 'audio/mpeg'
jsonList = self.getJSONChannelList( self._premiumStreamsJson256k )
streamList = self.customizeStreamListAddMenuitem( jsonList, listenkey )
xbmc.log( "Bitrate set to " + str( self._bitrate ), xbmc.LOGNOTICE )
elif ADDON.getSetting('username') != "" and ADDON.getSetting("usefavorites") == 'true' : # if username is set and wants to use favorites
xbmc.log( "Going for Premium favorite streams", xbmc.LOGNOTICE )
listenkey = self.getListenkey()
if ADDON.getSetting('bitrate') == '0' :
self._bitrate = 40
streamList = self.getFavoriteStreamsList( self._favoritesStreamJson40k + "?" + listenkey )
elif ADDON.getSetting('bitrate') == '1' :
self._bitrate = 64
streamList = self.getFavoriteStreamsList( self._favoritesStreamJson64k + "?" + listenkey )
elif ADDON.getSetting('bitrate') == '2' :
self._bitrate = 128
streamList = self.getFavoriteStreamsList( self._favoritesStreamJson128k + "?" + listenkey )
else :
self._bitrate = 256
self._streamMimeType = 'audio/mpeg'
streamList = self.getFavoriteStreamsList( self._favoritesStreamJson256k + "?" + listenkey )
xbmc.log( "Bitrate set to " + str(self._bitrate), xbmc.LOGNOTICE )
for channel in streamList :
self.addItem( channel['name'], channel['playlist'], channel["description"], channel['bitrate'], self._addonProfilePath + "art_" + channel['key'] + ".png", channel['isNew'] )
else :
xbmc.log( "Going for Public streams", xbmc.LOGNOTICE )
jsonList = self.getJSONChannelList( self._publicStreamsJson40k )
streamList = self.customizeStreamListAddMenuitem(jsonList, "") # sending a blank string as listenkey
# save streams to cachefile
pickle.dump( streamList, open(self._cacheStreams, "w"), protocol=0 )
if (self._newChannels > 0) : # Yay! New channels found
xbmc.log( ADDON.getLocalizedString(30130) + " " + ADDON.getLocalizedString(30131) + str(self._newChannels) + ADDON.getLocalizedString(30132) + " " + ADDON.getLocalizedString(30133) + " " + ADDON.getLocalizedString(30134), xbmc.LOGNOTICE )
xbmcgui.Dialog().ok( ADDON.getLocalizedString(30130), ADDON.getLocalizedString(30131) + str(self._newChannels) + ADDON.getLocalizedString(30132), ADDON.getLocalizedString(30133),ADDON.getLocalizedString(30134) )
else :
xbmc.log( "Using cached streams", xbmc.LOGNOTICE )
streamList = pickle.load( open(self._cacheStreams, "r") )
# Add streams to GUI
for channel in streamList :
self.addItem( channel['name'].encode('utf-8'), channel['playlist'], channel["description"], channel['bitrate'], self._addonProfilePath + "art_" + channel['key'] + ".png", channel['isNew'] )
# If streams should be sorted A-Z
if ADDON.getSetting('sortaz') == "true" :
xbmcplugin.addSortMethod( HANDLE, sortMethod=xbmcplugin.SORT_METHOD_LABEL )
# End of channel list
xbmcplugin.endOfDirectory( HANDLE, succeeded=True )
# Resets the 'Force refresh' setting
ADDON.setSetting( id="forceupdate", value="false" )
return True
"""return list - False if it fails
Gets the favorites playlist and returns the streams as a list
Also every channel is added to the GUI from here, as the progress indication
in the GUI would not reflect that something is actually happening till the very end
"""
def customizeStreamListAddMenuitem( self, list, listenkey ) :
# Precompiling regexes
streamurl_re = re.compile('File\d+=([^\n]*)', re.I) # streams in .pls file
streamList = []
# Will add list elements to a new list, with a few additions
for channel in list :
channel['key'] = self.makeChannelIconname( channel['name'] ) # customize the key that is used to find channelart
channel['isNew'] = False # is used to highlight when it's a new channel
channelArt = "art_" + channel['key'] + ".png"
channel['bitrate'] = self._bitrate
channel["description"] = channel["description"].encode('utf-8')
if ADDON.getSetting('username') != "" : # append listenkey to playlist url if username is set
channel['playlist'] = self.getFirstStream( channel['playlist'] + "?" + listenkey, streamurl_re )
else :
channel['playlist'] = self.getFirstStream( channel['playlist'], streamurl_re )
if (not os.path.isfile(self._addonProfilePath + channelArt)) : # if channelart is not in cache
xbmc.log( "Channelart for " + channel['name'].encode("ascii","ignore") + " not found in cache at " + self._addonProfilePath + channelArt, xbmc.LOGNOTICE )
self.getChannelArt( channel['id'], "art_" + channel['key'] )
channel['isNew'] = True
self._newChannels = self._newChannels + 1
streamList.append( channel )
# I'd have prefeered it if I didn't have to add menuitem from within this method
# but I have to, too give the user some visual feedback that stuff is happening
self.addItem( channel['name'].encode('utf-8'), channel['playlist'], channel["description"], self._bitrate, self._addonProfilePath + "art_" + channel['key'] + ".png", channel['isNew'] )
return streamList # returns the channellist so it can be saved to cache
"""return bool
Will check if channelart/icon is present in cache - if not, try to download
"""
def getChannelArt( self, channelId, channelKey ) :
channelArt_re = re.compile('data-id="' + str(channelId) +'">(?:[\n\s]*)<a(?:[^>]*)><img(?:[^>]*)src="([^"]*)"', re.I)
try :
if (self._frontpageHtml == "") : # If frontpage html has not already been downloaded, do it
self._frontpageHtml = self._httpComm.get( self._baseUrl )
channelartDict = channelArt_re.findall( self._frontpageHtml )
# Will download and save the channelart to the cache
self._httpComm.getImage( channelartDict[0], self._addonProfilePath + channelKey + ".png" )
return True
except Exception :
sys.exc_clear() # Clears all exceptions so the script will continue to run
xbmcgui.Dialog().ok( ADDON.getLocalizedString(30160), ADDON.getLocalizedString(30161), ADDON.getLocalizedString(30162) + channelartDict[0] )
xbmc.log( ADDON.getLocalizedString(30160) + " " + ADDON.getLocalizedString(30161) + channelKey + " " + ADDON.getLocalizedString(30162)+ channelartDict[0], xbmc.LOGERROR )
return False
return True
"""return String
Extracts the premium listenkey from the listenkey page html
"""
def getListenkey( self ) :
listenkey_re = re.compile('Key is:<br />[^<]*<strong>([\w\d]*)<', re.DOTALL)
try :
logindata = urllib.urlencode({ 'member_session[username]': ADDON.getSetting('username'),
'member_session[password]': ADDON.getSetting('password') })
self._httpComm.post( self._loginUrl, logindata ) # logs in so the listenkey page is accessible
listenkeyHtml = self._httpComm.get( self._listenkeyUrl)
listenkeyDict = listenkey_re.findall( listenkeyHtml )
xbmc.log( "Found listenkey", xbmc.LOGNOTICE )
return listenkeyDict[0]
except Exception :
sys.exc_clear() # Clears all exceptions so the script will continue to run
xbmcgui.Dialog().ok( ADDON.getLocalizedString(30100), ADDON.getLocalizedString(30101), ADDON.getLocalizedString(30102) )
xbmc.log( ADDON.getLocalizedString(30100) + " " + ADDON.getLocalizedString(30101) + " " + ADDON.getLocalizedString(30102), xbmc.LOGERROR )
return False
return False
"""return list - False if it fails
Will get a HTML page containing JSON data, decode it and return
"""
def getJSONChannelList( self, url ) :
try :
jsonData = self._httpComm.get( url )
jsonData = json.loads(jsonData)
except Exception : # Show error message in XBMC GUI if failing to parse JSON
sys.exc_clear() # Clears all exceptions so the script will continue to run
xbmcgui.Dialog().ok( ADDON.getLocalizedString(30100), ADDON.getLocalizedString(30101), ADDON.getLocalizedString(30102) )
xbmc.log( ADDON.getLocalizedString(30100) + " " + ADDON.getLocalizedString(30101) + " " + ADDON.getLocalizedString(30102), xbmc.LOGERROR )
return False
return jsonData
"""return list - False if it fails
Gets the favorites playlist and returns the streams as a list
"""
def getFavoriteStreamsList( self, url ) :
try :
favoritesPlaylist = self._httpComm.get( url ) # favorites .pls playlist in plaintext
favoritesList = [] # list that will contain streamlist
streamurl_re = re.compile( 'File\d+=([^\n]*)', re.I ) # first stream in .pls file
channeltitle_re = re.compile( 'Title\d+=([^\n]*)', re.I )
streamTitles = channeltitle_re.findall( favoritesPlaylist )
streamUrls = streamurl_re.findall( favoritesPlaylist )
if len(streamUrls) == len( streamTitles ) : # only continue if the count of urls and titles are equal
for i in range(len(streamUrls)) :
listitem = {}
listitem['playlist'] = streamUrls[i]
listitem['name'] = streamTitles[i].replace( parser.get('plugin', 'playlistStripName') + " ", "" ) # favorite stream titles has some "fluff" text it that is removed
listitem['key'] = self.makeChannelIconname( listitem['name'] )
listitem['isNew'] = False
listitem['bitrate'] = self._bitrate
listitem['description'] = ""
favoritesList.append( listitem )
else :
return False
return favoritesList
except Exception : # Show error message in XBMC GUI if failing to parse JSON
#sys.exc_clear() # Clears all exceptions so the script will continue to run
xbmcgui.Dialog().ok( ADDON.getLocalizedString(30120), ADDON.getLocalizedString(30111), url )
xbmc.log( ADDON.getLocalizedString(30120) + " " + ADDON.getLocalizedString(30111) + " " + url, xbmc.LOGERROR )
return False
return favoritesList
"""return string
Will take a channelname, lowercase it and remove spaces, dashes and other special characters
The string returned is normally used as part of the filename for the channelart
"""
def makeChannelIconname( self, channelname ) :
iconreplacement_re = re.compile('[^a-z0-9]', re.I) # regex that hits everything but a-z and 0-9
iconname = string.lower(iconreplacement_re.sub( '', channelname) )
return iconname
"""return bool
Simply adds a music item to the XBMC GUI
"""
# Adds item to XBMC itemlist
def addItem( self, channelTitle, streamUrl, streamDescription, streamBitrate, icon, isNewChannel ) :
if isNewChannel == True : # tart it up a bit if it's a new channel
li = xbmcgui.ListItem(label="[COLOR FF007EFF]" + channelTitle + "[/COLOR]",thumbnailImage=icon)
xbmc.log( "New channel found: " + channelTitle, xbmc.LOGERROR )
else :
li = xbmcgui.ListItem(label=channelTitle, thumbnailImage=icon)
li.setProperty("mimetype", self._streamMimeType)
li.setInfo( type="Music", infoLabels={ "label": channelTitle, "Genre": channelTitle, "Comment": streamDescription, "Size": (streamBitrate * 1024) })
li.setProperty("IsPlayable", "true")
li.setProperty("IsLive", "true")
xbmcplugin.addDirectoryItem(handle=HANDLE, url=streamUrl, listitem=li, isFolder=False)
return True
"""return string
Gets the first stream from a playlist
"""
def getFirstStream( self, playlistUrl, regex ) :
plsData = self._httpComm.get( playlistUrl )
streamurls = regex.findall(plsData)
return streamurls[0]
"""return bool
Checks if a file is older than x seconds
"""
def checkFileTime( self, tmpfile, cachedir, timesince ) :
if not os.path.exists( cachedir ) :
os.makedirs( cachedir )
return False
# If file exists, check timestamp
if os.path.exists( tmpfile ) :
if os.path.getmtime( tmpfile ) > ( time.time() - timesince ) :
xbmc.log( 'It has not been ' + str( timesince/60 ) + ' minutes since ' + tmpfile + ' was last updated', xbmc.LOGNOTICE )
return False
else :
xbmc.log( 'The cachefile ' + tmpfile + ' + has expired', xbmc.LOGNOTICE )
return True
# If file does not exist, return true so the file will be created by scraping the page
else :
xbmc.log( 'The cachefile ' + tmpfile + ' does not exist', xbmc.LOGNOTICE )
return True
MusicAddonInstance = musicAddonXbmc()
MusicAddonInstance.start()
| gpl-2.0 | 3,916,617,286,877,704,000 | 42.700483 | 242 | 0.681941 | false |
nickubels/qlinkplanner | qlinkplanner/settings.py | 1 | 4429 | """
Django settings for qlinkplanner project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Allowed hosts that can reach the planner
ALLOWED_HOSTS = [
'localhost',
os.environ['URL']
]
# Application definition
INSTALLED_APPS = [
'planner',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'qlinkplanner.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'qlinkplanner.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, '../planner/static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
## Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': ('%(asctime)s [%(process)d] [%(levelname)s] '
'pathname=%(pathname)s lineno=%(lineno)s '
'funcname=%(funcName)s message=%(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'qlinkplanner': {
'handlers': ['console', ],
'level': 'INFO',
}
}
}
| mit | -2,883,870,867,445,334,000 | 24.900585 | 91 | 0.641002 | false |
testmana2/test | Helpviewer/WebPlugins/WebPluginInterface.py | 2 | 1760 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing the web plug-in interface.
"""
from __future__ import unicode_literals
class WebPluginInterface(object):
"""
Class implementing the web plug-in interface.
"""
def metaPlugin(self):
"""
Public method to create a meta plug-in object containing plug-in info.
@ireturn meta plug-in object (QWebPluginFactory.Plugin)
@exception NotImplementedError raised to indicate that this method
must be implemented by subclasses
"""
raise NotImplementedError
def create(self, mimeType, url, argumentNames, argumentValues):
"""
Public method to create a plug-in instance for the given data.
@param mimeType MIME type for the plug-in (string)
@param url URL for the plug-in (QUrl)
@param argumentNames list of argument names (list of strings)
@param argumentValues list of argument values (list of strings)
@ireturn reference to the created object (QWidget)
@exception NotImplementedError raised to indicate that this method
must be implemented by subclasses
"""
raise NotImplementedError
def configure(self):
"""
Public method to configure the plug-in.
@exception NotImplementedError raised to indicate that this method
must be implemented by subclasses
"""
raise NotImplementedError
def isAnonymous(self):
"""
Public method to indicate an anonymous plug-in.
@return flag indicating anonymous state (boolean)
"""
return False
| gpl-3.0 | 712,451,422,128,389,100 | 29.877193 | 78 | 0.636932 | false |
GNOME/orca | test/keystrokes/firefox/object_nav_links_in_text.py | 1 | 7815 | #!/usr/bin/python
"""Test of object navigation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Home'",
" VISIBLE: 'Home', cursor=1",
"SPEECH OUTPUT: 'Home link.' voice=hyperlink"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("H"))
sequence.append(utils.AssertPresentationAction(
"2. H for heading",
["BRAILLE LINE: 'Enter Bug: orca \u2013 This page lets you enter a new bug into Bugzilla. h1'",
" VISIBLE: 'Enter Bug: orca \u2013 This page lets', cursor=1",
"SPEECH OUTPUT: 'Enter Bug: orca \u2013 This page lets you enter a new bug into Bugzilla. heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. line Down",
["BRAILLE LINE: 'into Bugzilla. h1'",
" VISIBLE: 'into Bugzilla. h1', cursor=1",
"SPEECH OUTPUT: 'into Bugzilla. heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. line Down",
["BRAILLE LINE: 'Before reporting a bug, please read the'",
" VISIBLE: 'Before reporting a bug, please r', cursor=1",
"SPEECH OUTPUT: 'Before reporting a bug, please read the.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. line Down",
["BRAILLE LINE: 'bug writing guidelines'",
" VISIBLE: 'bug writing guidelines', cursor=1",
"SPEECH OUTPUT: 'bug writing guidelines link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. line Down",
["BRAILLE LINE: ', please look at the list of'",
" VISIBLE: ', please look at the list of', cursor=1",
"SPEECH OUTPUT: ', please look at the list of.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. line Down",
["BRAILLE LINE: 'most'",
" VISIBLE: 'most', cursor=1",
"SPEECH OUTPUT: 'most link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. line Down",
["BRAILLE LINE: 'frequently reported bugs'",
" VISIBLE: 'frequently reported bugs', cursor=1",
"SPEECH OUTPUT: 'frequently reported bugs link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. line Down",
["BRAILLE LINE: ', and please'",
" VISIBLE: ', and please', cursor=1",
"SPEECH OUTPUT: ', and please.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"10. line Down",
["BRAILLE LINE: 'search'",
" VISIBLE: 'search', cursor=1",
"SPEECH OUTPUT: 'search link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"11. line Down",
["BRAILLE LINE: ' or'",
" VISIBLE: ' or', cursor=1",
"SPEECH OUTPUT: 'or.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"12. line Down",
["BRAILLE LINE: 'browse'",
" VISIBLE: 'browse', cursor=1",
"SPEECH OUTPUT: 'browse link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"13. line Down",
["BRAILLE LINE: ' for the bug.'",
" VISIBLE: ' for the bug.', cursor=1",
"SPEECH OUTPUT: 'for the bug.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"14. line Up",
["BRAILLE LINE: 'browse'",
" VISIBLE: 'browse', cursor=1",
"SPEECH OUTPUT: 'browse link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"15. line Up",
["BRAILLE LINE: ' or'",
" VISIBLE: ' or', cursor=1",
"SPEECH OUTPUT: 'or.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"16. line Up",
["BRAILLE LINE: 'search'",
" VISIBLE: 'search', cursor=1",
"SPEECH OUTPUT: 'search link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"17. line Up",
["BRAILLE LINE: ', and please'",
" VISIBLE: ', and please', cursor=1",
"SPEECH OUTPUT: ', and please.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"18. line Up",
["BRAILLE LINE: 'frequently reported bugs'",
" VISIBLE: 'frequently reported bugs', cursor=1",
"SPEECH OUTPUT: 'frequently reported bugs link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"19. line Up",
["BRAILLE LINE: 'most'",
" VISIBLE: 'most', cursor=1",
"SPEECH OUTPUT: 'most link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"20. line Up",
["BRAILLE LINE: ', please look at the list of'",
" VISIBLE: ', please look at the list of', cursor=1",
"SPEECH OUTPUT: ', please look at the list of.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"21. line Up",
["BRAILLE LINE: 'bug writing guidelines'",
" VISIBLE: 'bug writing guidelines', cursor=1",
"SPEECH OUTPUT: 'bug writing guidelines link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"22. line Up",
["BRAILLE LINE: 'Before reporting a bug, please read the'",
" VISIBLE: 'Before reporting a bug, please r', cursor=1",
"SPEECH OUTPUT: 'Before reporting a bug, please read the.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"23. line Up",
["BRAILLE LINE: 'into Bugzilla. h1'",
" VISIBLE: 'into Bugzilla. h1', cursor=1",
"SPEECH OUTPUT: 'into Bugzilla. heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"24. line Up",
["BRAILLE LINE: 'Enter Bug: orca \u2013 This page lets you enter a new bug h1'",
" VISIBLE: 'Enter Bug: orca \u2013 This page lets', cursor=1",
"SPEECH OUTPUT: 'Enter Bug: orca \u2013 This page lets you enter a new bug heading level 1'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 | -7,822,783,101,107,612,000 | 36.037915 | 115 | 0.689827 | false |
tensorflow/models | official/nlp/bert/squad_evaluate_v1_1.py | 1 | 3724 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation of SQuAD predictions (version 1.1).
The functions are copied from
https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/.
The SQuAD dataset is described in this paper:
SQuAD: 100,000+ Questions for Machine Comprehension of Text
Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, Percy Liang
https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf
"""
import collections
import re
import string
# pylint: disable=g-bad-import-order
from absl import logging
# pylint: enable=g-bad-import-order
def _normalize_answer(s):
"""Lowers text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _f1_score(prediction, ground_truth):
"""Computes F1 score by comparing prediction to ground truth."""
prediction_tokens = _normalize_answer(prediction).split()
ground_truth_tokens = _normalize_answer(ground_truth).split()
prediction_counter = collections.Counter(prediction_tokens)
ground_truth_counter = collections.Counter(ground_truth_tokens)
common = prediction_counter & ground_truth_counter
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def _exact_match_score(prediction, ground_truth):
"""Checks if predicted answer exactly matches ground truth answer."""
return _normalize_answer(prediction) == _normalize_answer(ground_truth)
def _metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""Computes the max over all metric scores."""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
"""Evaluates predictions for a dataset."""
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
logging.error(message)
continue
ground_truths = [entry["text"] for entry in qa["answers"]]
prediction = predictions[qa["id"]]
exact_match += _metric_max_over_ground_truths(_exact_match_score,
prediction, ground_truths)
f1 += _metric_max_over_ground_truths(_f1_score, prediction,
ground_truths)
exact_match = exact_match / total
f1 = f1 / total
return {"exact_match": exact_match, "final_f1": f1}
| apache-2.0 | -89,582,168,707,521,360 | 34.132075 | 94 | 0.69522 | false |
sebastic/QGIS | python/plugins/db_manager/dlg_export_vector.py | 3 | 8136 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : Oct 13, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, QObject, SIGNAL, QSettings, QFileInfo
from PyQt4.QtGui import QDialog, QFileDialog, QMessageBox, QApplication, QCursor
import qgis.core
from .ui.ui_DlgExportVector import Ui_DbManagerDlgExportVector as Ui_Dialog
class DlgExportVector(QDialog, Ui_Dialog):
def __init__(self, inLayer, inDb, parent=None):
QDialog.__init__(self, parent)
self.inLayer = inLayer
self.db = inDb
self.setupUi(self)
vectorFilterName = "lastVectorFileFilter" # "lastRasterFileFilter"
self.lastUsedVectorFilterSettingsKey = u"/UI/{0}".format(vectorFilterName)
self.lastUsedVectorDirSettingsKey = u"/UI/{0}Dir".format(vectorFilterName)
# update UI
self.setupWorkingMode()
self.populateFileFilters()
self.populateEncodings()
def setupWorkingMode(self):
# set default values
inCrs = self.inLayer.crs()
srid = inCrs.postgisSrid() if inCrs.isValid() else 4236
self.editSourceSrid.setText("%s" % srid)
self.editTargetSrid.setText("%s" % srid)
QObject.connect(self.btnChooseOutputFile, SIGNAL("clicked()"), self.chooseOutputFile)
self.checkSupports()
def checkSupports(self):
""" update options available for the current input layer """
allowSpatial = self.db.connector.hasSpatialSupport()
hasGeomType = self.inLayer and self.inLayer.hasGeometryType()
self.chkSourceSrid.setEnabled(allowSpatial and hasGeomType)
self.chkTargetSrid.setEnabled(allowSpatial and hasGeomType)
# self.chkSpatialIndex.setEnabled(allowSpatial and hasGeomType)
def chooseOutputFile(self):
# get last used dir
settings = QSettings()
lastUsedDir = settings.value(self.lastUsedVectorDirSettingsKey, ".")
# get selected filter
selectedFilter = self.cboFileFormat.itemData(self.cboFileFormat.currentIndex())
# ask for a filename
filename = QFileDialog.getSaveFileName(self, self.tr("Choose where to save the file"), lastUsedDir,
selectedFilter)
if filename == "":
return
filterString = qgis.core.QgsVectorFileWriter.filterForDriver(selectedFilter)
ext = filterString[filterString.find('.'):]
ext = ext[:ext.find(' ')]
if not filename.lower().endswith(ext):
filename += ext
# store the last used dir
settings.setValue(self.lastUsedVectorDirSettingsKey, QFileInfo(filename).filePath())
self.editOutputFile.setText(filename)
def populateEncodings(self):
# populate the combo with supported encodings
self.cboEncoding.addItems(qgis.core.QgsVectorDataProvider.availableEncodings())
# set the last used encoding
enc = self.inLayer.dataProvider().encoding()
idx = self.cboEncoding.findText(enc)
if idx < 0:
self.cboEncoding.insertItem(0, enc)
idx = 0
self.cboEncoding.setCurrentIndex(idx)
def populateFileFilters(self):
# populate the combo with supported vector file formats
for name, filt in qgis.core.QgsVectorFileWriter.ogrDriverList().iteritems():
self.cboFileFormat.addItem(name, filt)
# set the last used filter
settings = QSettings()
filt = settings.value(self.lastUsedVectorFilterSettingsKey, "ESRI Shapefile")
idx = self.cboFileFormat.findText(filt)
if idx < 0:
idx = 0
self.cboFileFormat.setCurrentIndex(idx)
def accept(self):
# sanity checks
if self.editOutputFile.text() == "":
QMessageBox.information(self, self.tr("Export to file"), self.tr("Output file name is required"))
return
if self.chkSourceSrid.isEnabled() and self.chkSourceSrid.isChecked():
try:
sourceSrid = int(self.editSourceSrid.text())
except ValueError:
QMessageBox.information(self, self.tr("Export to file"),
self.tr("Invalid source srid: must be an integer"))
return
if self.chkTargetSrid.isEnabled() and self.chkTargetSrid.isChecked():
try:
targetSrid = int(self.editTargetSrid.text())
except ValueError:
QMessageBox.information(self, self.tr("Export to file"),
self.tr("Invalid target srid: must be an integer"))
return
# override cursor
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
# store current input layer crs, so I can restore it later
prevInCrs = self.inLayer.crs()
try:
uri = self.editOutputFile.text()
providerName = "ogr"
options = {}
# set the OGR driver will be used
driverName = self.cboFileFormat.itemData(self.cboFileFormat.currentIndex())
options['driverName'] = driverName
# set the output file encoding
if self.chkEncoding.isEnabled() and self.chkEncoding.isChecked():
enc = self.cboEncoding.currentText()
options['fileEncoding'] = enc
if self.chkDropTable.isChecked():
options['overwrite'] = True
outCrs = None
if self.chkTargetSrid.isEnabled() and self.chkTargetSrid.isChecked():
targetSrid = int(self.editTargetSrid.text())
outCrs = qgis.core.QgsCoordinateReferenceSystem(targetSrid)
# update input layer crs
if self.chkSourceSrid.isEnabled() and self.chkSourceSrid.isChecked():
sourceSrid = int(self.editSourceSrid.text())
inCrs = qgis.core.QgsCoordinateReferenceSystem(sourceSrid)
self.inLayer.setCrs(inCrs)
# do the export!
ret, errMsg = qgis.core.QgsVectorLayerImport.importLayer(self.inLayer, uri, providerName, outCrs, False,
False, options)
except Exception as e:
ret = -1
errMsg = unicode(e)
finally:
# restore input layer crs and encoding
self.inLayer.setCrs(prevInCrs)
# restore cursor
QApplication.restoreOverrideCursor()
if ret != 0:
QMessageBox.warning(self, self.tr("Export to file"), self.tr("Error %d\n%s") % (ret, errMsg))
return
# create spatial index
# if self.chkSpatialIndex.isEnabled() and self.chkSpatialIndex.isChecked():
# self.db.connector.createSpatialIndex( (schema, table), geom )
QMessageBox.information(self, self.tr("Export to file"), self.tr("Export finished."))
return QDialog.accept(self)
if __name__ == '__main__':
import sys
a = QApplication(sys.argv)
dlg = DlgExportVector()
dlg.show()
sys.exit(a.exec_())
| gpl-2.0 | 1,475,234,151,142,240,000 | 38.495146 | 116 | 0.580261 | false |
DecipherOne/Troglodyte | Trog Build Dependencies/Python26/Lib/test/test_winreg.py | 2 | 9459 | # Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
from _winreg import *
import os, sys
import unittest
from test import test_support
threading = test_support.import_module("threading")
from platform import machine
test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", "x"*(2**14), REG_BINARY),
]
if test_support.have_unicode:
test_data += [
(unicode("Unicode Val"), unicode("A Unicode value"), REG_SZ,),
("UnicodeExpand", unicode("The path is %path%"), REG_EXPAND_SZ),
("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"),
unicode("values")], REG_MULTI_SZ),
("Multi-mixed", [unicode("Unicode"), unicode("and"), "string",
"values"], REG_MULTI_SZ),
]
class WinregTests(unittest.TestCase):
remote_name = None
def WriteTestData(self, root_key):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
# Create a sub-key
sub_key = CreateKey(key, "sub_key")
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEquals(nkeys, 1, "Not the correct number of sub keys")
self.assertEquals(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEquals(nkeys, 0, "Not the correct number of sub keys")
self.assertEquals(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except EnvironmentError:
pass
def ReadTestData(self, root_key):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEquals(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, "sub_key") as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
self.assertEquals(data in test_data, True,
"Didn't read back the correct test data")
index = index + 1
self.assertEquals(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEquals(read_val, value_data,
"Could not directly read the value")
self.assertEquals(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEquals(read_val, "sub_key", "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except EnvironmentError:
pass
key.Close()
def DeleteTestData(self, root_key):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEquals(nkeys, 0, "subkey not empty before delete")
self.assertEquals(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, "sub_key")
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, "sub_key")
self.fail("Deleting the key twice succeeded")
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except WindowsError: # Use this error name this time
pass
def TestAll(self, root_key):
self.WriteTestData(root_key)
self.ReadTestData(root_key)
self.DeleteTestData(root_key)
def testLocalMachineRegistryWorks(self):
self.TestAll(HKEY_CURRENT_USER)
def testConnectRegistryToLocalMachineWorks(self):
# perform minimal ConnectRegistry test which just invokes it
h = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
h.Close()
def testRemoteMachineRegistryWorks(self):
if not self.remote_name:
return # remote machine name not specified
remote_key = ConnectRegistry(self.remote_name, HKEY_CURRENT_USER)
self.TestAll(remote_key)
def testExpandEnvironmentStrings(self):
r = ExpandEnvironmentStrings(u"%windir%\\test")
self.assertEqual(type(r), unicode)
self.assertEqual(r, os.environ["windir"] + "\\test")
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
# EnumValue or QueryValue to throw "WindowsError: More data is
# available"
done = False
class VeryActiveThread(threading.Thread):
def run(self):
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
use_short = True
long_string = 'x'*2000
while not done:
s = 'x' if use_short else long_string
use_short = not use_short
SetValue(key, 'changing_value', REG_SZ, s)
thread = VeryActiveThread()
thread.start()
try:
with CreateKey(HKEY_CURRENT_USER,
test_key_name+'\\changing_value') as key:
for _ in range(1000):
num_subkeys, num_values, t = QueryInfoKey(key)
for i in range(num_values):
name = EnumValue(key, i)
QueryValue(key, name[0])
finally:
done = True
thread.join()
DeleteKey(HKEY_CURRENT_USER, test_key_name+'\\changing_value')
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
# characters, EnumKey threw "WindowsError: More data is
# available"
name = 'x'*256
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
SetValue(key, name, REG_SZ, 'x')
num_subkeys, num_values, t = QueryInfoKey(key)
EnumKey(key, 0)
finally:
DeleteKey(HKEY_CURRENT_USER, '\\'.join((test_key_name, name)))
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
# throw "WindowsError: More data is available" in 2.6 and 3.1
EnumValue(HKEY_PERFORMANCE_DATA, 0)
QueryValueEx(HKEY_PERFORMANCE_DATA, None)
def test_main():
test_support.run_unittest(WinregTests)
if __name__ == "__main__":
try:
WinregTests.remote_name = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
print "Remote registry calls can be tested using",
print "'test_winreg.py --remote \\\\machine_name'"
WinregTests.remote_name = None
test_main()
| mit | 5,052,397,931,946,083,000 | 39.596567 | 78 | 0.563485 | false |
Catnipz/Catnip-Cogs | avatarchange/avatarchange.py | 1 | 1594 | import discord
import json
import os
import asyncio
from discord.ext import commands
from .utils.dataIO import dataIO
class avatarchange:
"""Automatically change bot profile picture"""
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.json_load('data/luna/settings.json')
@commands.group()
async def luna(self):
"""Luna pic's settings"""
if not ctx.invoked_subcommand:
await self.bot.send_cmd_help(ctx)
def change(self):
if self.settings['total'] < 2:
pic = self.settings['1']
else:
picNum = randint(1, self.settings['total'])
pic = self.settings[picNum]
async with self.session.get(pic) as r:
data = await r.read()
await self.bot.edit_profile(self.bot.settings.password, avatar=data)
asyncio.sleep(300)
@luna.command()
async def add(self, url):
"""Add a pic"""
if total not in self.settings:
self.settings = {
'total': 0
}
else:
pass
t = self.settings['total'] + 1
if not url.endswith('.png')
await self.bot.say("Invalid format. Please use `.png`")
return
else:
self.settings[t] = url
await self.bot.say("The picture has been added")
@luna.command()
async def start(self):
if 1 not in self.settings:
return
else:
self.bot.loop.create_task(change())
def setup(bot):
bot.add_cog(avatarchange(bot))
| gpl-3.0 | -520,928,650,877,356,700 | 23.523077 | 76 | 0.55458 | false |
lfaraone/ausctools | setup.py | 1 | 1100 | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
setup(
name='ausctools',
version='0.1',
description='Tools for generating reports for English Wikipedia functionary activity',
url='https://github.com/lfaraone/ausctools',
author='Luke Faraone',
author_email='[email protected]',
license='GPL-2+',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[
"mwclient>=0.7.2",
"Babel>=2.0",
"PyYAML",
"tabulate>=0.6",
],
package_data={
'examples': ['excuses.yaml.ex'],
},
entry_points={
'console_scripts': [
'functionary-inactivity-report=inactivity_report:main',
],
},
)
| gpl-2.0 | 4,548,408,622,656,852,000 | 25.829268 | 90 | 0.590909 | false |
rg3/youtube-dl | youtube_dl/extractor/udemy.py | 2 | 19323 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_kwargs,
compat_str,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
determine_ext,
extract_attributes,
ExtractorError,
float_or_none,
int_or_none,
js_to_json,
sanitized_Request,
try_get,
unescapeHTML,
url_or_none,
urlencode_postdata,
)
class UdemyIE(InfoExtractor):
IE_NAME = 'udemy'
_VALID_URL = r'''(?x)
https?://
(?:[^/]+\.)?udemy\.com/
(?:
[^#]+\#/lecture/|
lecture/view/?\?lectureId=|
[^/]+/learn/v4/t/lecture/
)
(?P<id>\d+)
'''
_LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1'
_ORIGIN_URL = 'https://www.udemy.com'
_NETRC_MACHINE = 'udemy'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757',
'md5': '98eda5b657e752cf945d8445e261b5c5',
'info_dict': {
'id': '160614',
'ext': 'mp4',
'title': 'Introduction and Installation',
'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876',
'duration': 579.29,
},
'skip': 'Requires udemy account credentials',
}, {
# new URL schema
'url': 'https://www.udemy.com/electric-bass-right-from-the-start/learn/v4/t/lecture/4580906',
'only_matching': True,
}, {
# no url in outputs format entry
'url': 'https://www.udemy.com/learn-web-development-complete-step-by-step-guide-to-success/learn/v4/t/lecture/4125812',
'only_matching': True,
}, {
# only outputs rendition
'url': 'https://www.udemy.com/how-you-can-help-your-local-community-5-amazing-examples/learn/v4/t/lecture/3225750?start=0',
'only_matching': True,
}, {
'url': 'https://wipro.udemy.com/java-tutorial/#/lecture/172757',
'only_matching': True,
}]
def _extract_course_info(self, webpage, video_id):
course = self._parse_json(
unescapeHTML(self._search_regex(
r'ng-init=["\'].*\bcourse=({.+?})[;"\']',
webpage, 'course', default='{}')),
video_id, fatal=False) or {}
course_id = course.get('id') or self._search_regex(
r'data-course-id=["\'](\d+)', webpage, 'course id')
return course_id, course.get('title')
def _enroll_course(self, base_url, webpage, course_id):
def combine_url(base_url, url):
return compat_urlparse.urljoin(base_url, url) if not url.startswith('http') else url
checkout_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/(?:payment|cart)/checkout/.+?)\1',
webpage, 'checkout url', group='url', default=None))
if checkout_url:
raise ExtractorError(
'Course %s is not free. You have to pay for it before you can download. '
'Use this URL to confirm purchase: %s'
% (course_id, combine_url(base_url, checkout_url)),
expected=True)
enroll_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/course/subscribe/.+?)\1',
webpage, 'enroll url', group='url', default=None))
if enroll_url:
webpage = self._download_webpage(
combine_url(base_url, enroll_url),
course_id, 'Enrolling in the course',
headers={'Referer': base_url})
if '>You have enrolled in' in webpage:
self.to_screen('%s: Successfully enrolled in the course' % course_id)
def _download_lecture(self, course_id, lecture_id):
return self._download_json(
'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?'
% (course_id, lecture_id),
lecture_id, 'Downloading lecture JSON', query={
'fields[lecture]': 'title,description,view_html,asset',
'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,captions,data',
})
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message'))
error_data = error.get('data')
if error_data:
error_str += ' - %s' % error_data.get('formErrors')
raise ExtractorError(error_str, expected=True)
def _download_webpage_handle(self, *args, **kwargs):
headers = kwargs.get('headers', {}).copy()
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'
kwargs['headers'] = headers
ret = super(UdemyIE, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
if not ret:
return ret
webpage, _ = ret
if any(p in webpage for p in (
'>Please verify you are a human',
'Access to this page has been denied because we believe you are using automation tools to browse the website',
'"_pxCaptcha"')):
raise ExtractorError(
'Udemy asks you to solve a CAPTCHA. Login with browser, '
'solve CAPTCHA, then export cookies and pass cookie file to '
'youtube-dl with --cookies.', expected=True)
return ret
def _download_json(self, url_or_request, *args, **kwargs):
headers = {
'X-Udemy-Snail-Case': 'true',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'client_id':
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:
url_or_request = sanitized_Request(url_or_request, headers=headers)
response = super(UdemyIE, self)._download_json(url_or_request, *args, **kwargs)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'href=["\'](?:https://www\.udemy\.com)?/user/logout/',
r'>Logout<'))
# already logged in
if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
login_form.update({
'email': username,
'password': password,
})
response = self._download_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(login_form),
headers={
'Referer': self._ORIGIN_URL,
'Origin': self._ORIGIN_URL,
})
if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_extract(self, url):
lecture_id = self._match_id(url)
webpage = self._download_webpage(url, lecture_id)
course_id, _ = self._extract_course_info(webpage, lecture_id)
try:
lecture = self._download_lecture(course_id, lecture_id)
except ExtractorError as e:
# Error could possibly mean we are not enrolled in the course
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self._enroll_course(url, webpage, course_id)
lecture = self._download_lecture(course_id, lecture_id)
else:
raise
title = lecture['title']
description = lecture.get('description')
asset = lecture['asset']
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
stream_url = asset.get('stream_url') or asset.get('streamUrl')
if stream_url:
youtube_url = self._search_regex(
r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
video_id = compat_str(asset['id'])
thumbnail = asset.get('thumbnail_url') or asset.get('thumbnailUrl')
duration = float_or_none(asset.get('data', {}).get('duration'))
subtitles = {}
automatic_captions = {}
formats = []
def extract_output_format(src, f_id):
return {
'url': src.get('url'),
'format_id': '%sp' % (src.get('height') or f_id),
'width': int_or_none(src.get('width')),
'height': int_or_none(src.get('height')),
'vbr': int_or_none(src.get('video_bitrate_in_kbps')),
'vcodec': src.get('video_codec'),
'fps': int_or_none(src.get('frame_rate')),
'abr': int_or_none(src.get('audio_bitrate_in_kbps')),
'acodec': src.get('audio_codec'),
'asr': int_or_none(src.get('audio_sample_rate')),
'tbr': int_or_none(src.get('total_bitrate_in_kbps')),
'filesize': int_or_none(src.get('file_size_in_bytes')),
}
outputs = asset.get('data', {}).get('outputs')
if not isinstance(outputs, dict):
outputs = {}
def add_output_format_meta(f, key):
output = outputs.get(key)
if isinstance(output, dict):
output_format = extract_output_format(output, key)
output_format.update(f)
return output_format
return f
def extract_formats(source_list):
if not isinstance(source_list, list):
return
for source in source_list:
video_url = url_or_none(source.get('file') or source.get('src'))
if not video_url:
continue
if source.get('type') == 'application/x-mpegURL' or determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
format_id = source.get('label')
f = {
'url': video_url,
'format_id': '%sp' % format_id,
'height': int_or_none(format_id),
}
if format_id:
# Some videos contain additional metadata (e.g.
# https://www.udemy.com/ios9-swift/learn/#/lecture/3383208)
f = add_output_format_meta(f, format_id)
formats.append(f)
def extract_subtitles(track_list):
if not isinstance(track_list, list):
return
for track in track_list:
if not isinstance(track, dict):
continue
if track.get('kind') != 'captions':
continue
src = url_or_none(track.get('src'))
if not src:
continue
lang = track.get('language') or track.get(
'srclang') or track.get('label')
sub_dict = automatic_captions if track.get(
'autogenerated') is True else subtitles
sub_dict.setdefault(lang, []).append({
'url': src,
})
for url_kind in ('download', 'stream'):
urls = asset.get('%s_urls' % url_kind)
if isinstance(urls, dict):
extract_formats(urls.get('Video'))
captions = asset.get('captions')
if isinstance(captions, list):
for cc in captions:
if not isinstance(cc, dict):
continue
cc_url = url_or_none(cc.get('url'))
if not cc_url:
continue
lang = try_get(cc, lambda x: x['locale']['locale'], compat_str)
sub_dict = (automatic_captions if cc.get('source') == 'auto'
else subtitles)
sub_dict.setdefault(lang or 'en', []).append({
'url': cc_url,
})
view_html = lecture.get('view_html')
if view_html:
view_html_urls = set()
for source in re.findall(r'<source[^>]+>', view_html):
attributes = extract_attributes(source)
src = attributes.get('src')
if not src:
continue
res = attributes.get('data-res')
height = int_or_none(res)
if src in view_html_urls:
continue
view_html_urls.add(src)
if attributes.get('type') == 'application/x-mpegURL' or determine_ext(src) == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
for f in m3u8_formats:
m = re.search(r'/hls_(?P<height>\d{3,4})_(?P<tbr>\d{2,})/', f['url'])
if m:
if not f.get('height'):
f['height'] = int(m.group('height'))
if not f.get('tbr'):
f['tbr'] = int(m.group('tbr'))
formats.extend(m3u8_formats)
else:
formats.append(add_output_format_meta({
'url': src,
'format_id': '%dp' % height if height else None,
'height': height,
}, res))
# react rendition since 2017.04.15 (see
# https://github.com/rg3/youtube-dl/issues/12744)
data = self._parse_json(
self._search_regex(
r'videojs-setup-data=(["\'])(?P<data>{.+?})\1', view_html,
'setup data', default='{}', group='data'), video_id,
transform_source=unescapeHTML, fatal=False)
if data and isinstance(data, dict):
extract_formats(data.get('sources'))
if not duration:
duration = int_or_none(data.get('duration'))
extract_subtitles(data.get('tracks'))
if not subtitles and not automatic_captions:
text_tracks = self._parse_json(
self._search_regex(
r'text-tracks=(["\'])(?P<data>\[.+?\])\1', view_html,
'text tracks', default='{}', group='data'), video_id,
transform_source=lambda s: js_to_json(unescapeHTML(s)),
fatal=False)
extract_subtitles(text_tracks)
if not formats and outputs:
for format_id, output in outputs.items():
f = extract_output_format(output, format_id)
if f.get('url'):
formats.append(f)
self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
'automatic_captions': automatic_captions,
}
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
_VALID_URL = r'https?://(?:[^/]+\.)?udemy\.com/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/',
'only_matching': True,
}, {
'url': 'https://wipro.udemy.com/java-tutorial/',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
course_path = self._match_id(url)
webpage = self._download_webpage(url, course_path)
course_id, title = self._extract_course_info(webpage, course_path)
self._enroll_course(url, webpage, course_id)
response = self._download_json(
'https://www.udemy.com/api-2.0/courses/%s/cached-subscriber-curriculum-items' % course_id,
course_id, 'Downloading course curriculum', query={
'fields[chapter]': 'title,object_index',
'fields[lecture]': 'title,asset',
'page_size': '1000',
})
entries = []
chapter, chapter_number = [None] * 2
for entry in response['results']:
clazz = entry.get('_class')
if clazz == 'lecture':
asset = entry.get('asset')
if isinstance(asset, dict):
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
continue
lecture_id = entry.get('id')
if lecture_id:
entry = {
'_type': 'url_transparent',
'url': 'https://www.udemy.com/%s/learn/v4/t/lecture/%s' % (course_path, entry['id']),
'title': entry.get('title'),
'ie_key': UdemyIE.ie_key(),
}
if chapter_number:
entry['chapter_number'] = chapter_number
if chapter:
entry['chapter'] = chapter
entries.append(entry)
elif clazz == 'chapter':
chapter_number = entry.get('object_index')
chapter = entry.get('title')
return self.playlist_result(entries, course_id, title)
| unlicense | 7,266,053,584,633,176,000 | 39.424686 | 149 | 0.507478 | false |
odashi/chainer_examples | chainer-1.4/mt_s2s_attention.py | 1 | 13718 | #!/usr/bin/python3
import my_settings
import sys
import math
import numpy as np
from argparse import ArgumentParser
from chainer import functions, optimizers
import chainer.computational_graph as cg
import util.generators as gens
from util.functions import trace, fill_batch2
from util.model_file import ModelFile
from util.vocabulary import Vocabulary
#from util.chainer_cpu_wrapper import wrapper
from util.chainer_gpu_wrapper import wrapper
class AttentionalTranslationModel:
def __init__(self):
pass
def __make_model(self):
self.__model = wrapper.make_model(
# input embedding
w_xi = functions.EmbedID(len(self.__src_vocab), self.__n_embed),
# forward encoder
w_ia = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_aa = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
# backward encoder
w_ib = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_bb = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
# attentional weight estimator
w_aw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_bw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_pw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_we = functions.Linear(self.__n_hidden, 1),
# decoder
w_ap = functions.Linear(self.__n_hidden, self.__n_hidden),
w_bp = functions.Linear(self.__n_hidden, self.__n_hidden),
w_yp = functions.EmbedID(len(self.__trg_vocab), 4 * self.__n_hidden),
w_pp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_cp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_dp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_py = functions.Linear(self.__n_hidden, len(self.__trg_vocab)),
)
@staticmethod
def new(src_vocab, trg_vocab, n_embed, n_hidden):
self = AttentionalTranslationModel()
self.__src_vocab = src_vocab
self.__trg_vocab = trg_vocab
self.__n_embed = n_embed
self.__n_hidden = n_hidden
self.__make_model()
return self
def save(self, filename):
with ModelFile(filename, 'w') as fp:
self.__src_vocab.save(fp.get_file_pointer())
self.__trg_vocab.save(fp.get_file_pointer())
fp.write(self.__n_embed)
fp.write(self.__n_hidden)
wrapper.begin_model_access(self.__model)
fp.write_embed(self.__model.w_xi)
fp.write_linear(self.__model.w_ia)
fp.write_linear(self.__model.w_aa)
fp.write_linear(self.__model.w_ib)
fp.write_linear(self.__model.w_bb)
fp.write_linear(self.__model.w_aw)
fp.write_linear(self.__model.w_bw)
fp.write_linear(self.__model.w_pw)
fp.write_linear(self.__model.w_we)
fp.write_linear(self.__model.w_ap)
fp.write_linear(self.__model.w_bp)
fp.write_embed(self.__model.w_yp)
fp.write_linear(self.__model.w_pp)
fp.write_linear(self.__model.w_cp)
fp.write_linear(self.__model.w_dp)
fp.write_linear(self.__model.w_py)
wrapper.end_model_access(self.__model)
@staticmethod
def load(filename):
self = AttentionalTranslationModel()
with ModelFile(filename) as fp:
self.__src_vocab = Vocabulary.load(fp.get_file_pointer())
self.__trg_vocab = Vocabulary.load(fp.get_file_pointer())
self.__n_embed = int(fp.read())
self.__n_hidden = int(fp.read())
self.__make_model()
wrapper.begin_model_access(self.__model)
fp.read_embed(self.__model.w_xi)
fp.read_linear(self.__model.w_ia)
fp.read_linear(self.__model.w_aa)
fp.read_linear(self.__model.w_ib)
fp.read_linear(self.__model.w_bb)
fp.read_linear(self.__model.w_aw)
fp.read_linear(self.__model.w_bw)
fp.read_linear(self.__model.w_pw)
fp.read_linear(self.__model.w_we)
fp.read_linear(self.__model.w_ap)
fp.read_linear(self.__model.w_bp)
fp.read_embed(self.__model.w_yp)
fp.read_linear(self.__model.w_pp)
fp.read_linear(self.__model.w_cp)
fp.read_linear(self.__model.w_dp)
fp.read_linear(self.__model.w_py)
wrapper.end_model_access(self.__model)
return self
def init_optimizer(self):
self.__opt = optimizers.AdaGrad(lr=0.01)
self.__opt.setup(self.__model)
def __forward(self, is_training, src_batch, trg_batch = None, generation_limit = None):
m = self.__model
tanh = functions.tanh
lstm = functions.lstm
batch_size = len(src_batch)
hidden_size = self.__n_hidden
src_len = len(src_batch[0])
trg_len = len(trg_batch[0]) - 1 if is_training else generation_limit
src_stoi = self.__src_vocab.stoi
trg_stoi = self.__trg_vocab.stoi
trg_itos = self.__trg_vocab.itos
hidden_zeros = wrapper.zeros((batch_size, hidden_size))
sum_e_zeros = wrapper.zeros((batch_size, 1))
# make embedding
list_x = []
for l in range(src_len):
s_x = wrapper.make_var([src_stoi(src_batch[k][l]) for k in range(batch_size)], dtype=np.int32)
list_x.append(s_x)
# forward encoding
c = hidden_zeros
s_a = hidden_zeros
list_a = []
for l in range(src_len):
s_x = list_x[l]
s_i = tanh(m.w_xi(s_x))
c, s_a = lstm(c, m.w_ia(s_i) + m.w_aa(s_a))
list_a.append(s_a)
# backward encoding
c = hidden_zeros
s_b = hidden_zeros
list_b = []
for l in reversed(range(src_len)):
s_x = list_x[l]
s_i = tanh(m.w_xi(s_x))
c, s_b = lstm(c, m.w_ib(s_i) + m.w_bb(s_b))
list_b.insert(0, s_b)
# decoding
c = hidden_zeros
s_p = tanh(m.w_ap(list_a[-1]) + m.w_bp(list_b[0]))
s_y = wrapper.make_var([trg_stoi('<s>') for k in range(batch_size)], dtype=np.int32)
hyp_batch = [[] for _ in range(batch_size)]
accum_loss = wrapper.zeros(()) if is_training else None
#for n in range(src_len):
# print(src_batch[0][n], end=' ')
#print()
for l in range(trg_len):
# calculate attention weights
list_e = []
sum_e = sum_e_zeros
for n in range(src_len):
s_w = tanh(m.w_aw(list_a[n]) + m.w_bw(list_b[n]) + m.w_pw(s_p))
r_e = functions.exp(m.w_we(s_w))
#list_e.append(functions.concat(r_e for _ in range(self.__n_hidden)))
list_e.append(r_e)
sum_e += r_e
#sum_e = functions.concat(sum_e for _ in range(self.__n_hidden))
# make attention vector
s_c = hidden_zeros
s_d = hidden_zeros
for n in range(src_len):
s_e = list_e[n] / sum_e
#s_c += s_e * list_a[n]
#s_d += s_e * list_b[n]
s_c += functions.reshape(functions.batch_matmul(list_a[n], s_e), (batch_size, hidden_size))
s_d += functions.reshape(functions.batch_matmul(list_b[n], s_e), (batch_size, hidden_size))
#zxcv = wrapper.get_data(s_e)[0][0]
#if zxcv > 0.9: asdf='#'
#elif zxcv > 0.7: asdf='*'
#elif zxcv > 0.3: asdf='+'
#elif zxcv > 0.1: asdf='.'
#else: asdf=' '
#print(asdf * len(src_batch[0][n]), end=' ')
# generate next word
c, s_p = lstm(c, m.w_yp(s_y) + m.w_pp(s_p) + m.w_cp(s_c) + m.w_dp(s_d))
r_y = m.w_py(s_p)
output = wrapper.get_data(r_y).argmax(1)
for k in range(batch_size):
hyp_batch[k].append(trg_itos(output[k]))
#print(hyp_batch[0][-1])
if is_training:
s_t = wrapper.make_var([trg_stoi(trg_batch[k][l + 1]) for k in range(batch_size)], dtype=np.int32)
accum_loss += functions.softmax_cross_entropy(r_y, s_t)
s_y = s_t
else:
if all(hyp_batch[k][-1] == '</s>' for k in range(batch_size)): break
s_y = wrapper.make_var(output, dtype=np.int32)
return hyp_batch, accum_loss
def train(self, src_batch, trg_batch):
self.__opt.zero_grads()
hyp_batch, accum_loss = self.__forward(True, src_batch, trg_batch=trg_batch)
#g = cg.build_computational_graph([accum_loss])
#with open('asdf', 'w') as fp: fp.write(g.dump())
#sys.exit()
accum_loss.backward()
self.__opt.clip_grads(10)
self.__opt.update()
return hyp_batch
def predict(self, src_batch, generation_limit):
return self.__forward(False, src_batch, generation_limit=generation_limit)[0]
def parse_args():
def_vocab = 32768
def_embed = 256
def_hidden = 512
def_epoch = 100
def_minibatch = 64
def_generation_limit = 256
p = ArgumentParser(description='Attentional neural machine translation')
p.add_argument('mode', help='\'train\' or \'test\'')
p.add_argument('source', help='[in] source corpus')
p.add_argument('target', help='[in/out] target corpus')
p.add_argument('model', help='[in/out] model file')
p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int,
help='vocabulary size (default: %d)' % def_vocab)
p.add_argument('--embed', default=def_embed, metavar='INT', type=int,
help='embedding layer size (default: %d)' % def_embed)
p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int,
help='hidden layer size (default: %d)' % def_hidden)
p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int,
help='number of training epoch (default: %d)' % def_epoch)
p.add_argument('--minibatch', default=def_minibatch, metavar='INT', type=int,
help='minibatch size (default: %d)' % def_minibatch)
p.add_argument('--generation-limit', default=def_generation_limit, metavar='INT', type=int,
help='maximum number of words to be generated for test input')
args = p.parse_args()
# check args
try:
if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'')
if args.vocab < 1: raise ValueError('you must set --vocab >= 1')
if args.embed < 1: raise ValueError('you must set --embed >= 1')
if args.hidden < 1: raise ValueError('you must set --hidden >= 1')
if args.epoch < 1: raise ValueError('you must set --epoch >= 1')
if args.minibatch < 1: raise ValueError('you must set --minibatch >= 1')
if args.generation_limit < 1: raise ValueError('you must set --generation-limit >= 1')
except Exception as ex:
p.print_usage(file=sys.stderr)
print(ex, file=sys.stderr)
sys.exit()
return args
def train_model(args):
trace('making vocabularies ...')
src_vocab = Vocabulary.new(gens.word_list(args.source), args.vocab)
trg_vocab = Vocabulary.new(gens.word_list(args.target), args.vocab)
trace('making model ...')
model = AttentionalTranslationModel.new(src_vocab, trg_vocab, args.embed, args.hidden)
for epoch in range(args.epoch):
trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
trained = 0
gen1 = gens.word_list(args.source)
gen2 = gens.word_list(args.target)
gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * args.minibatch, order=0), args.minibatch)
model.init_optimizer()
for src_batch, trg_batch in gen3:
src_batch = fill_batch2(src_batch)
trg_batch = fill_batch2(trg_batch)
K = len(src_batch)
hyp_batch = model.train(src_batch, trg_batch)
for k in range(K):
trace('epoch %3d/%3d, sample %8d' % (epoch + 1, args.epoch, trained + k + 1))
trace(' src = ' + ' '.join([x if x != '</s>' else '*' for x in src_batch[k]]))
trace(' trg = ' + ' '.join([x if x != '</s>' else '*' for x in trg_batch[k]]))
trace(' hyp = ' + ' '.join([x if x != '</s>' else '*' for x in hyp_batch[k]]))
trained += K
trace('saving model ...')
model.save(args.model + '.%03d' % (epoch + 1))
trace('finished.')
def test_model(args):
trace('loading model ...')
model = AttentionalTranslationModel.load(args.model)
trace('generating translation ...')
generated = 0
with open(args.target, 'w') as fp:
for src_batch in gens.batch(gens.word_list(args.source), args.minibatch):
src_batch = fill_batch2(src_batch)
K = len(src_batch)
trace('sample %8d - %8d ...' % (generated + 1, generated + K))
hyp_batch = model.predict(src_batch, args.generation_limit)
for hyp in hyp_batch:
hyp.append('</s>')
hyp = hyp[:hyp.index('</s>')]
print(' '.join(hyp), file=fp)
generated += K
trace('finished.')
def main():
args = parse_args()
trace('initializing ...')
wrapper.init()
if args.mode == 'train': train_model(args)
elif args.mode == 'test': test_model(args)
if __name__ == '__main__':
main()
| mit | -8,451,619,857,469,180,000 | 37.642254 | 114 | 0.548331 | false |
skggm/skggm | examples/estimator_suite.py | 1 | 17686 | """
Suite of estimator comparisons
==============================
Compare inverse covariance estimators and model selection methods.
Derived from example in:
http://scikit-learn.org/stable/auto_examples/covariance/plot_sparse_cov.html
"""
import sys
import numpy as np
import tabulate
import time
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
sys.path.append("..")
sys.path.append("../inverse_covariance")
from inverse_covariance import (
QuicGraphicalLasso,
QuicGraphicalLassoCV,
QuicGraphicalLassoEBIC,
AdaptiveGraphicalLasso,
ModelAverage,
)
plt.ion()
def r_input(val):
if sys.version_info[0] >= 3:
return eval(input(val))
return raw_input(val)
def make_data(n_samples, n_features):
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng
)
cov = np.linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
return X, cov, prec
def multiplot(named_mats, suptitle):
num_rows = len(named_mats) / 3
num_plots = int(np.ceil(num_rows / 4.))
for nn in range(num_plots):
plt.figure(figsize=(10, 8))
plt.subplots_adjust(left=0.02, right=0.98, hspace=0.4)
for i, item in enumerate(named_mats[nn * 4 * 3 : (nn + 1) * 4 * 3]):
lam = None
if len(item) == 3:
name, this_mat, lam = item
elif len(item) == 2:
name, this_mat = item
vmax = np.abs(this_mat).max()
ax = plt.subplot(4, 3, i + 1)
plt.imshow(
np.ma.masked_values(this_mat, 0),
interpolation="nearest",
vmin=-vmax,
vmax=vmax,
cmap=plt.cm.RdBu_r,
)
plt.xticks(())
plt.yticks(())
if lam is None or lam == "":
plt.title("{}".format(name))
else:
plt.title("{}\n(lam={:.2f})".format(name, lam))
plt.suptitle(suptitle + " (page {})".format(nn), fontsize=14)
plt.show()
def show_results(covs, precs):
multiplot(covs, "Covariance Estimates")
multiplot(precs, "Precision Estimates")
def quic_graph_lasso(X, num_folds, metric):
"""Run QuicGraphicalLasso with mode='default' and use standard scikit
GridSearchCV to find the best lambda.
Primarily demonstrates compatibility with existing scikit tooling.
"""
print("QuicGraphicalLasso + GridSearchCV with:")
print(" metric: {}".format(metric))
search_grid = {
"lam": np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True),
"init_method": ["cov"],
"score_metric": [metric],
}
model = GridSearchCV(QuicGraphicalLasso(), search_grid, cv=num_folds, refit=True)
model.fit(X)
bmodel = model.best_estimator_
print(" len(cv_lams): {}".format(len(search_grid["lam"])))
print(" cv-lam: {}".format(model.best_params_["lam"]))
print(" lam_scale_: {}".format(bmodel.lam_scale_))
print(" lam_: {}".format(bmodel.lam_))
return bmodel.covariance_, bmodel.precision_, bmodel.lam_
def quic_graph_lasso_cv(X, metric):
"""Run QuicGraphicalLassoCV on data with metric of choice.
Compare results with GridSearchCV + quic_graph_lasso. The number of
lambdas tested should be much lower with similar final lam_ selected.
"""
print("QuicGraphicalLassoCV with:")
print(" metric: {}".format(metric))
model = QuicGraphicalLassoCV(
cv=2, # cant deal w more folds at small size
n_refinements=6,
n_jobs=1,
init_method="cov",
score_metric=metric,
)
model.fit(X)
print(" len(cv_lams): {}".format(len(model.cv_lams_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_
def adaptive_graph_lasso(X, model_selector, method):
"""Run QuicGraphicalLassoCV or QuicGraphicalLassoEBIC as a two step adaptive fit
with method of choice (currently: 'binary', 'inverse', 'inverse_squared').
Compare the support and values to the model-selection estimator.
"""
metric = "log_likelihood"
print("Adaptive {} with:".format(model_selector))
print(" adaptive-method: {}".format(method))
if model_selector == "QuicGraphicalLassoCV":
print(" metric: {}".format(metric))
model = AdaptiveGraphicalLasso(
estimator=QuicGraphicalLassoCV(
cv=2, # cant deal w more folds at small size
n_refinements=6,
init_method="cov",
score_metric=metric,
),
method=method,
)
elif model_selector == "QuicGraphicalLassoEBIC":
model = AdaptiveGraphicalLasso(
estimator=QuicGraphicalLassoEBIC(), method=method
)
model.fit(X)
lam_norm_ = np.linalg.norm(model.estimator_.lam_)
print(" ||lam_||_2: {}".format(lam_norm_))
return model.estimator_.covariance_, model.estimator_.precision_, lam_norm_
def quic_graph_lasso_ebic_manual(X, gamma=0):
"""Run QuicGraphicalLasso with mode='path' and gamma; use EBIC criteria for model
selection.
The EBIC criteria is built into InverseCovarianceEstimator base class
so we demonstrate those utilities here.
"""
print("QuicGraphicalLasso (manual EBIC) with:")
print(" mode: path")
print(" gamma: {}".format(gamma))
model = QuicGraphicalLasso(
lam=1.0,
mode="path",
init_method="cov",
path=np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True),
)
model.fit(X)
ebic_index = model.ebic_select(gamma=gamma)
covariance_ = model.covariance_[ebic_index]
precision_ = model.precision_[ebic_index]
lam_ = model.lam_at_index(ebic_index)
print(" len(path lams): {}".format(len(model.path_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(lam_))
print(" ebic_index: {}".format(ebic_index))
return covariance_, precision_, lam_
def quic_graph_lasso_ebic(X, gamma=0):
"""Run QuicGraphicalLassoEBIC with gamma.
QuicGraphicalLassoEBIC is a convenience class. Results should be identical to
those obtained via quic_graph_lasso_ebic_manual.
"""
print("QuicGraphicalLassoEBIC with:")
print(" mode: path")
print(" gamma: {}".format(gamma))
model = QuicGraphicalLassoEBIC(lam=1.0, init_method="cov", gamma=gamma)
model.fit(X)
print(" len(path lams): {}".format(len(model.path_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_
def model_average(X, penalization):
"""Run ModelAverage in default mode (QuicGraphicalLassoCV) to obtain proportion
matrix.
NOTE: This returns precision_ proportions, not cov, prec estimates, so we
return the raw proportions for "cov" and the threshold support
estimate for prec.
"""
n_trials = 100
print("ModelAverage with:")
print(" estimator: QuicGraphicalLasso (default)")
print(" n_trials: {}".format(n_trials))
print(" penalization: {}".format(penalization))
# if penalization is random, first find a decent scalar lam_ to build
# random perturbation matrix around. lam doesn't matter for fully-random.
lam = 0.5
if penalization == "random":
cv_model = QuicGraphicalLassoCV(
cv=2, n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric
)
cv_model.fit(X)
lam = cv_model.lam_
print(" lam: {}".format(lam))
model = ModelAverage(
n_trials=n_trials, penalization=penalization, lam=lam, n_jobs=1
)
model.fit(X)
print(" lam_: {}".format(model.lam_))
return model.proportion_, model.support_, model.lam_
def adaptive_model_average(X, penalization, method):
"""Run ModelAverage in default mode (QuicGraphicalLassoCV) to obtain proportion
matrix.
NOTE: Only method = 'binary' really makes sense in this case.
"""
n_trials = 100
print("Adaptive ModelAverage with:")
print(" estimator: QuicGraphicalLasso (default)")
print(" n_trials: {}".format(n_trials))
print(" penalization: {}".format(penalization))
print(" adaptive-method: {}".format(method))
# if penalization is random, first find a decent scalar lam_ to build
# random perturbation matrix around. lam doesn't matter for fully-random.
lam = 0.5
if penalization == "random":
cv_model = QuicGraphicalLassoCV(
cv=2, n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric
)
cv_model.fit(X)
lam = cv_model.lam_
print(" lam: {}".format(lam))
model = AdaptiveGraphicalLasso(
estimator=ModelAverage(
n_trials=n_trials, penalization=penalization, lam=lam, n_jobs=1
),
method=method,
)
model.fit(X)
lam_norm_ = np.linalg.norm(model.estimator_.lam_)
print(" ||lam_||_2: {}".format(lam_norm_))
return model.estimator_.covariance_, model.estimator_.precision_, lam_norm_
def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov)
def graph_lasso(X, num_folds):
"""Estimate inverse covariance via scikit-learn GraphLassoCV class.
"""
print("GraphLasso (sklearn)")
model = GraphLassoCV(cv=num_folds)
model.fit(X)
print(" lam_: {}".format(model.alpha_))
return model.covariance_, model.precision_, model.alpha_
def sk_ledoit_wolf(X):
"""Estimate inverse covariance via scikit-learn ledoit_wolf function.
"""
print("Ledoit-Wolf (sklearn)")
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = np.linalg.inv(lw_cov_)
return lw_cov_, lw_prec_
def _count_support_diff(m, m_hat):
n_features, _ = m.shape
m_no_diag = m.copy()
m_no_diag[np.diag_indices(n_features)] = 0
m_hat_no_diag = m_hat.copy()
m_hat_no_diag[np.diag_indices(n_features)] = 0
m_nnz = len(np.nonzero(m_no_diag.flat)[0])
m_hat_nnz = len(np.nonzero(m_hat_no_diag.flat)[0])
nnz_intersect = len(
np.intersect1d(np.nonzero(m_no_diag.flat)[0], np.nonzero(m_hat_no_diag.flat)[0])
)
return m_nnz + m_hat_nnz - (2 * nnz_intersect)
if __name__ == "__main__":
n_samples = 100
n_features = 20
cv_folds = 3
# make data
X, true_cov, true_prec = make_data(n_samples, n_features)
plot_covs = [("True", true_cov), ("True", true_cov), ("True", true_cov)]
plot_precs = [
("True", true_prec, ""),
("True", true_prec, ""),
("True", true_prec, ""),
]
results = []
# Empirical
start_time = time.time()
cov, prec = empirical(X)
end_time = time.time()
ctime = end_time - start_time
name = "Empirical"
plot_covs.append((name, cov))
plot_precs.append((name, prec, ""))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, ""])
print(" frobenius error: {}".format(error))
print("")
# sklearn LedoitWolf
start_time = time.time()
cov, prec = sk_ledoit_wolf(X)
end_time = time.time()
ctime = end_time - start_time
name = "Ledoit-Wolf (sklearn)"
plot_covs.append((name, cov))
plot_precs.append((name, prec, ""))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, ""])
print(" frobenius error: {}".format(error))
print("")
# sklearn GraphLassoCV
start_time = time.time()
cov, prec, lam = graph_lasso(X, cv_folds)
end_time = time.time()
ctime = end_time - start_time
name = "GraphLassoCV (sklearn)"
plot_covs.append((name, cov))
plot_precs.append((name, prec, lam))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, lam])
print(" frobenius error: {}".format(error))
print("")
# QuicGraphicalLasso + GridSearchCV
params = [
("QuicGraphicalLasso GSCV : ll", "log_likelihood"),
("QuicGraphicalLasso GSCV : kl", "kl"),
("QuicGraphicalLasso GSCV : fro", "frobenius"),
]
for name, metric in params:
start_time = time.time()
cov, prec, lam = quic_graph_lasso(X, cv_folds, metric=metric)
end_time = time.time()
ctime = end_time - start_time
plot_covs.append((name, cov))
plot_precs.append((name, prec, lam))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, lam])
print(" frobenius error: {}".format(error))
print("")
# QuicGraphicalLassoCV
params = [
("QuicGraphicalLassoCV : ll", "log_likelihood"),
("QuicGraphicalLassoCV : kl", "kl"),
("QuicGraphicalLassoCV : fro", "frobenius"),
]
for name, metric in params:
start_time = time.time()
cov, prec, lam = quic_graph_lasso_cv(X, metric=metric)
end_time = time.time()
ctime = end_time - start_time
plot_covs.append((name, cov))
plot_precs.append((name, prec, lam))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, lam])
print(" frobenius error: {}".format(error))
print("")
# QuicGraphicalLassoEBIC
params = [
("QuicGraphicalLassoEBIC : BIC", 0),
("QuicGraphicalLassoEBIC : g=0.01", 0.01),
("QuicGraphicalLassoEBIC : g=0.1", 0.1),
]
for name, gamma in params:
start_time = time.time()
# cov, prec, lam = quic_graph_lasso_ebic_manual(X, gamma=gamma)
cov, prec, lam = quic_graph_lasso_ebic(X, gamma=gamma)
end_time = time.time()
ctime = end_time - start_time
plot_covs.append((name, cov))
plot_precs.append((name, prec, lam))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, lam])
print(" error: {}".format(error))
print("")
# Default ModelAverage
params = [
("ModelAverage : random", "random"),
("ModelAverage : fully-random", "fully-random"),
]
for name, model_selector in params:
start_time = time.time()
cov, prec, lam = model_average(X, model_selector)
end_time = time.time()
ctime = end_time - start_time
plot_covs.append((name, cov))
plot_precs.append((name, prec, ""))
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, "", supp_diff, ctime, lam])
print("")
# Adaptive QuicGraphicalLassoCV and QuicGraphicalLassoEBIC
params = [
("Adaptive CV : binary", "QuicGraphicalLassoCV", "binary"),
("Adaptive CV : inv", "QuicGraphicalLassoCV", "inverse"),
("Adaptive CV : inv**2", "QuicGraphicalLassoCV", "inverse_squared"),
("Adaptive BIC : binary", "QuicGraphicalLassoEBIC", "binary"),
("Adaptive BIC : inv", "QuicGraphicalLassoEBIC", "inverse"),
("Adaptive BIC : inv**2", "QuicGraphicalLassoEBIC", "inverse_squared"),
]
for name, model_selector, method in params:
start_time = time.time()
cov, prec, lam = adaptive_graph_lasso(X, model_selector, method)
end_time = time.time()
ctime = end_time - start_time
plot_covs.append((name, cov))
plot_precs.append((name, prec, ""))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, ""])
print(" frobenius error: {}".format(error))
print("")
# Adaptive ModelAverage
params = [("Adaptive MA : random, binary", "random", "binary")]
for name, model_selector, method in params:
start_time = time.time()
cov, prec, lam = adaptive_model_average(X, model_selector, method)
end_time = time.time()
ctime = end_time - start_time
plot_covs.append((name, cov))
plot_precs.append((name, prec, ""))
error = np.linalg.norm(true_cov - cov, ord="fro")
supp_diff = _count_support_diff(true_prec, prec)
results.append([name, error, supp_diff, ctime, ""])
print(" frobenius error: {}".format(error))
print("")
# tabulate errors
print(
tabulate.tabulate(
results,
headers=[
"Estimator",
"Error (Frobenius)",
"Support Diff",
"Time",
"Lambda",
],
tablefmt="pipe",
)
)
print("")
# display results
show_results(plot_covs, plot_precs)
r_input("Press any key to exit...")
| mit | -4,417,101,670,352,049,000 | 33.142857 | 88 | 0.600249 | false |
pointhi/kicad-footprint-generator | KicadModTree/tests/datatypes/test_Vector2D.py | 1 | 5520 | # KicadModTree is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KicadModTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
#
# (C) 2018 by Thomas Pointhuber, <[email protected]>
import unittest
import math
from KicadModTree.Vector import *
class Vector2DTests(unittest.TestCase):
def test_init(self):
p1 = Vector2D([1, 2])
self.assertEqual(p1.x, 1)
self.assertEqual(p1.y, 2)
p2 = Vector2D((4, 5))
self.assertEqual(p2.x, 4)
self.assertEqual(p2.y, 5)
p3 = Vector2D({'x': 7, 'y': 8})
self.assertEqual(p3.x, 7)
self.assertEqual(p3.y, 8)
p3_empty = Vector2D({})
self.assertEqual(p3_empty.x, 0)
self.assertEqual(p3_empty.y, 0)
p4 = Vector2D(p1)
self.assertEqual(p4.x, 1)
self.assertEqual(p4.y, 2)
p5 = Vector2D(1, 2)
self.assertEqual(p5.x, 1)
self.assertEqual(p5.y, 2)
# TODO: test float datatype
# TODO: invalid type tests
# TODO: tests if int is always converted to float
def test_round_to(self):
p1 = Vector2D([1.234, 5.678]).round_to(0)
self.assertAlmostEqual(p1.x, 1.234)
self.assertAlmostEqual(p1.y, 5.678)
p2 = Vector2D([1.234, 5.678]).round_to(0.1)
self.assertAlmostEqual(p2.x, 1.2)
self.assertAlmostEqual(p2.y, 5.7)
p3 = Vector2D([1.234, 5.678]).round_to(0.01)
self.assertAlmostEqual(p3.x, 1.23)
self.assertAlmostEqual(p3.y, 5.68)
p4 = Vector2D([1.234, 5.678]).round_to(0.001)
self.assertAlmostEqual(p4.x, 1.234)
self.assertAlmostEqual(p4.y, 5.678)
p5 = Vector2D([1.234, 5.678]).round_to(0.0001)
self.assertAlmostEqual(p5.x, 1.234)
self.assertAlmostEqual(p5.y, 5.678)
def test_add(self):
p1 = Vector2D([1, 2])
self.assertEqual(p1.x, 1)
self.assertEqual(p1.y, 2)
p2 = p1 + 5
self.assertEqual(p2.x, 6)
self.assertEqual(p2.y, 7)
p3 = p1 + (-5)
self.assertEqual(p3.x, -4)
self.assertEqual(p3.y, -3)
p4 = p1 + [4, 2]
self.assertEqual(p4.x, 5)
self.assertEqual(p4.y, 4)
p5 = p1 + [-5, -3]
self.assertEqual(p5.x, -4)
self.assertEqual(p5.y, -1)
# TODO: invalid type tests
def test_sub(self):
p1 = Vector2D([1, 2])
self.assertEqual(p1.x, 1)
self.assertEqual(p1.y, 2)
p2 = p1 - 5
self.assertEqual(p2.x, -4)
self.assertEqual(p2.y, -3)
p3 = p1 - (-5)
self.assertEqual(p3.x, 6)
self.assertEqual(p3.y, 7)
p4 = p1 - [4, 2]
self.assertEqual(p4.x, -3)
self.assertEqual(p4.y, 0)
p5 = p1 - [-5, -3]
self.assertEqual(p5.x, 6)
self.assertEqual(p5.y, 5)
# TODO: invalid type tests
def test_mul(self):
p1 = Vector2D([1, 2])
self.assertEqual(p1.x, 1)
self.assertEqual(p1.y, 2)
p2 = p1 * 5
self.assertEqual(p2.x, 5)
self.assertEqual(p2.y, 10)
p3 = p1 * (-5)
self.assertEqual(p3.x, -5)
self.assertEqual(p3.y, -10)
p4 = p1 * [4, 5]
self.assertEqual(p4.x, 4)
self.assertEqual(p4.y, 10)
p5 = p1 * [-5, -3]
self.assertEqual(p5.x, -5)
self.assertEqual(p5.y, -6)
# TODO: invalid type tests
def test_div(self):
p1 = Vector2D([1, 2])
self.assertEqual(p1.x, 1)
self.assertEqual(p1.y, 2)
p2 = p1 / 5
self.assertEqual(p2.x, 0.2)
self.assertEqual(p2.y, 0.4)
p3 = p1 / (-5)
self.assertEqual(p3.x, -0.2)
self.assertEqual(p3.y, -0.4)
p4 = p1 / [4, 5]
self.assertEqual(p4.x, 0.25)
self.assertEqual(p4.y, 0.4)
p5 = p1 / [-5, -2]
self.assertEqual(p5.x, -0.2)
self.assertEqual(p5.y, -1)
# TODO: division by zero tests
# TODO: invalid type tests
def test_polar(self):
p1 = Vector2D.from_polar(math.sqrt(2), 45, use_degrees=True)
self.assertAlmostEqual(p1.x, 1)
self.assertAlmostEqual(p1.y, 1)
p1 = Vector2D.from_polar(2, -90, use_degrees=True, origin=(6, 1))
self.assertAlmostEqual(p1.x, 6)
self.assertAlmostEqual(p1.y, -1)
r, a = p1.to_polar(use_degrees=True, origin=(6, 1))
self.assertAlmostEqual(r, 2)
self.assertAlmostEqual(a, -90)
p1.rotate(90, use_degrees=True, origin=(6, 1))
self.assertAlmostEqual(p1.x, 8)
self.assertAlmostEqual(p1.y, 1)
p1 = Vector2D.from_polar(math.sqrt(2), 135, use_degrees=True)
self.assertAlmostEqual(p1.x, -1)
self.assertAlmostEqual(p1.y, 1)
p1.rotate(90, use_degrees=True)
self.assertAlmostEqual(p1.x, -1)
self.assertAlmostEqual(p1.y, -1)
r, a = p1.to_polar(use_degrees=True)
self.assertAlmostEqual(r, math.sqrt(2))
self.assertAlmostEqual(a, -135)
| gpl-3.0 | 6,064,318,419,664,601,000 | 27.601036 | 85 | 0.571196 | false |
ypraveen/avi-heat | avi/heat/resources/action.py | 1 | 16612 | # GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from common import *
from options import *
class URIParamToken(object):
# all schemas
type_schema = properties.Schema(
properties.Schema.STRING,
_("Token type for constructing the URI"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['URI_TOKEN_TYPE_PATH', 'URI_TOKEN_TYPE_REGEX', 'URI_TOKEN_TYPE_STRING', 'URI_TOKEN_TYPE_STRING_GROUP', 'URI_TOKEN_TYPE_HOST']),
],
)
start_index_schema = properties.Schema(
properties.Schema.NUMBER,
_("Index of the starting token in the incoming URI"),
required=False,
update_allowed=True,
)
end_index_schema = properties.Schema(
properties.Schema.NUMBER,
_("Index of the ending token in the incoming URI"),
required=False,
update_allowed=True,
)
str_value_schema = properties.Schema(
properties.Schema.STRING,
_("Constant string to use as a token"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'type',
'start_index',
'end_index',
'str_value',
)
# mapping of properties to their schemas
properties_schema = {
'type': type_schema,
'start_index': start_index_schema,
'end_index': end_index_schema,
'str_value': str_value_schema,
}
class PoolServer(object):
# all schemas
ip_schema = properties.Schema(
properties.Schema.MAP,
_("IP address of the server in the poool"),
schema=IpAddr.properties_schema,
required=True,
update_allowed=True,
)
hostname_schema = properties.Schema(
properties.Schema.STRING,
_("DNS resolvable name of the server. May be used in place of the IP address."),
required=False,
update_allowed=True,
)
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Port of the pool server listening for HTTP/HTTPS. Default value is the default port in the pool."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'ip',
'hostname',
'port',
)
# mapping of properties to their schemas
properties_schema = {
'ip': ip_schema,
'hostname': hostname_schema,
'port': port_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ip': getattr(IpAddr, 'field_references', {}),
}
class HTTPCookieData(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_("Cookie name"),
required=False,
update_allowed=True,
)
value_schema = properties.Schema(
properties.Schema.STRING,
_("Cookie value"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'value',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'value': value_schema,
}
class URIParamQuery(object):
# all schemas
keep_query_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use or drop the query of the incoming request URI in the request URI to the backend server"),
required=False,
update_allowed=True,
)
add_string_schema = properties.Schema(
properties.Schema.STRING,
_("Concatenate a string to the query of the incoming request URI and then use it in the request URI going to the backend server"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'keep_query',
'add_string',
)
# mapping of properties to their schemas
properties_schema = {
'keep_query': keep_query_schema,
'add_string': add_string_schema,
}
class HTTPHdrValue(object):
# all schemas
var_schema = properties.Schema(
properties.Schema.STRING,
_("Variable"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP_POLICY_VAR_SSL_CLIENT_SERIAL', 'HTTP_POLICY_VAR_SSL_CIPHER', 'HTTP_POLICY_VAR_SSL_CLIENT_FINGERPRINT', 'HTTP_POLICY_VAR_USER_NAME', 'HTTP_POLICY_VAR_HTTP_HDR', 'HTTP_POLICY_VAR_VS_PORT', 'HTTP_POLICY_VAR_SSL_CLIENT_SUBJECT', 'HTTP_POLICY_VAR_SSL_SERVER_NAME', 'HTTP_POLICY_VAR_CLIENT_IP', 'HTTP_POLICY_VAR_VS_IP', 'HTTP_POLICY_VAR_SSL_CLIENT_RAW', 'HTTP_POLICY_VAR_SSL_CLIENT_ISSUER', 'HTTP_POLICY_VAR_SSL_PROTOCOL']),
],
)
val_schema = properties.Schema(
properties.Schema.STRING,
_("HTTP header value or variable representing an HTTP header"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'var',
'val',
)
# mapping of properties to their schemas
properties_schema = {
'var': var_schema,
'val': val_schema,
}
class HTTPSwitchingAction(object):
# all schemas
action_schema = properties.Schema(
properties.Schema.STRING,
_("Content switching action type"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP_SWITCHING_SELECT_LOCAL', 'HTTP_SWITCHING_SELECT_POOLGROUP', 'HTTP_SWITCHING_SELECT_POOL']),
],
)
pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("UUID of the pool of servers to serve the request You can either provide UUID or provide a name with the prefix 'get_avi_uuid_for_name:', e.g., 'get_avi_uuid_for_name:my_obj_name'."),
required=False,
update_allowed=True,
)
status_code_schema = properties.Schema(
properties.Schema.STRING,
_("HTTP status code to use when serving local response"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP_LOCAL_RESPONSE_STATUS_CODE_403', 'HTTP_LOCAL_RESPONSE_STATUS_CODE_429', 'HTTP_LOCAL_RESPONSE_STATUS_CODE_200', 'HTTP_LOCAL_RESPONSE_STATUS_CODE_404']),
],
)
file_schema = properties.Schema(
properties.Schema.MAP,
_("File from which to serve local response to the request"),
schema=HTTPLocalFile.properties_schema,
required=False,
update_allowed=True,
)
server_schema = properties.Schema(
properties.Schema.MAP,
_("Specific pool server to select"),
schema=PoolServer.properties_schema,
required=False,
update_allowed=True,
)
pool_group_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("UUID of the pool group to serve the request You can either provide UUID or provide a name with the prefix 'get_avi_uuid_for_name:', e.g., 'get_avi_uuid_for_name:my_obj_name'."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'action',
'pool_uuid',
'status_code',
'file',
'server',
'pool_group_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'action': action_schema,
'pool_uuid': pool_uuid_schema,
'status_code': status_code_schema,
'file': file_schema,
'server': server_schema,
'pool_group_uuid': pool_group_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'pool_uuid': 'pool',
'pool_group_uuid': 'poolgroup',
'file': getattr(HTTPLocalFile, 'field_references', {}),
'server': getattr(PoolServer, 'field_references', {}),
}
class URIParam(object):
# all schemas
type_schema = properties.Schema(
properties.Schema.STRING,
_("URI param type"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['URI_PARAM_TYPE_TOKENIZED']),
],
)
tokens_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=URIParamToken.properties_schema,
required=True,
update_allowed=False,
)
tokens_schema = properties.Schema(
properties.Schema.LIST,
_("Token config either for the URI components or a constant string"),
schema=tokens_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'type',
'tokens',
)
# mapping of properties to their schemas
properties_schema = {
'type': type_schema,
'tokens': tokens_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'tokens': getattr(URIParamToken, 'field_references', {}),
}
class HTTPHdrData(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_("HTTP header name"),
required=False,
update_allowed=True,
)
value_schema = properties.Schema(
properties.Schema.MAP,
_("HTTP header value"),
schema=HTTPHdrValue.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'value',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'value': value_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'value': getattr(HTTPHdrValue, 'field_references', {}),
}
class HTTPRedirectAction(object):
# all schemas
protocol_schema = properties.Schema(
properties.Schema.STRING,
_("Protocol type"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP', 'HTTPS']),
],
)
host_schema = properties.Schema(
properties.Schema.MAP,
_("Host config"),
schema=URIParam.properties_schema,
required=False,
update_allowed=True,
)
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Port to which redirect the request"),
required=False,
update_allowed=True,
)
path_schema = properties.Schema(
properties.Schema.MAP,
_("Path config"),
schema=URIParam.properties_schema,
required=False,
update_allowed=True,
)
keep_query_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Keep or drop the query of the incoming request URI in the redirected URI"),
required=False,
update_allowed=True,
)
status_code_schema = properties.Schema(
properties.Schema.STRING,
_("HTTP redirect status code"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP_REDIRECT_STATUS_CODE_302', 'HTTP_REDIRECT_STATUS_CODE_301', 'HTTP_REDIRECT_STATUS_CODE_307']),
],
)
# properties list
PROPERTIES = (
'protocol',
'host',
'port',
'path',
'keep_query',
'status_code',
)
# mapping of properties to their schemas
properties_schema = {
'protocol': protocol_schema,
'host': host_schema,
'port': port_schema,
'path': path_schema,
'keep_query': keep_query_schema,
'status_code': status_code_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'path': getattr(URIParam, 'field_references', {}),
'host': getattr(URIParam, 'field_references', {}),
}
class HTTPRewriteURLAction(object):
# all schemas
host_hdr_schema = properties.Schema(
properties.Schema.MAP,
_("Host config"),
schema=URIParam.properties_schema,
required=False,
update_allowed=True,
)
path_schema = properties.Schema(
properties.Schema.MAP,
_("Path config"),
schema=URIParam.properties_schema,
required=False,
update_allowed=True,
)
query_schema = properties.Schema(
properties.Schema.MAP,
_("Query config"),
schema=URIParamQuery.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'host_hdr',
'path',
'query',
)
# mapping of properties to their schemas
properties_schema = {
'host_hdr': host_hdr_schema,
'path': path_schema,
'query': query_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'host_hdr': getattr(URIParam, 'field_references', {}),
'query': getattr(URIParamQuery, 'field_references', {}),
'path': getattr(URIParam, 'field_references', {}),
}
class HTTPRewriteLocHdrAction(object):
# all schemas
protocol_schema = properties.Schema(
properties.Schema.STRING,
_("HTTP protocol type"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP', 'HTTPS']),
],
)
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Port to use in the redirected URI"),
required=False,
update_allowed=True,
)
host_schema = properties.Schema(
properties.Schema.MAP,
_("Host config"),
schema=URIParam.properties_schema,
required=False,
update_allowed=True,
)
path_schema = properties.Schema(
properties.Schema.MAP,
_("Path config"),
schema=URIParam.properties_schema,
required=False,
update_allowed=True,
)
keep_query_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Keep or drop the query from the server side redirect URI"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'protocol',
'port',
'host',
'path',
'keep_query',
)
# mapping of properties to their schemas
properties_schema = {
'protocol': protocol_schema,
'port': port_schema,
'host': host_schema,
'path': path_schema,
'keep_query': keep_query_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'path': getattr(URIParam, 'field_references', {}),
'host': getattr(URIParam, 'field_references', {}),
}
class HTTPHdrAction(object):
# all schemas
action_schema = properties.Schema(
properties.Schema.STRING,
_("ADD: A new header with the new value is added irrespective of the existence of an HTTP header of the given name. REPLACE: A new header with the new value is added if no header of the given name exists, else existing headers with the given name are removed and a new header with the new value is added. REMOVE: All the headers of the given name are removed."),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP_REPLACE_HDR', 'HTTP_ADD_HDR', 'HTTP_REMOVE_HDR']),
],
)
hdr_schema = properties.Schema(
properties.Schema.MAP,
_("HTTP header information"),
schema=HTTPHdrData.properties_schema,
required=False,
update_allowed=True,
)
cookie_schema = properties.Schema(
properties.Schema.MAP,
_("Cookie information"),
schema=HTTPCookieData.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'action',
'hdr',
'cookie',
)
# mapping of properties to their schemas
properties_schema = {
'action': action_schema,
'hdr': hdr_schema,
'cookie': cookie_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'cookie': getattr(HTTPCookieData, 'field_references', {}),
'hdr': getattr(HTTPHdrData, 'field_references', {}),
}
| apache-2.0 | -3,698,605,126,283,690,500 | 27.251701 | 463 | 0.602215 | false |
Morgan-Stanley/treadmill | lib/python/treadmill/sproc/appmonitor.py | 2 | 10599 | """Syncronizes cell Zookeeper with LDAP data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import itertools
import logging
import math
import time
import click
import six
from treadmill import alert
from treadmill import appenv
from treadmill import context
from treadmill import restclient
from treadmill import utils
from treadmill import yamlwrapper as yaml
from treadmill import zknamespace as z
from treadmill import zkutils
from treadmill import zkwatchers
from treadmill.scheduler import masterapi
_LOGGER = logging.getLogger(__name__)
# Allow 2 * count tokens to accumulate during 1 hour.
_INTERVAL = float(60 * 60)
# Delay monitoring for non-existent apps.
_DELAY_INTERVAL = float(5 * 60)
def make_alerter(alerts_dir, cell):
"""Create alert function."""
def send_alert(instance, summary, **kwargs):
"""Send alert."""
_LOGGER.debug('Sending alert for %s', instance)
alert.create(
alerts_dir, type_='monitor.suspended',
instanceid='{}/{}'.format(cell, instance),
summary=summary,
**kwargs
)
return send_alert
def reevaluate(api_url, alert_f, state, zkclient, last_waited):
"""Evaluate state and adjust app count based on monitor"""
# Disable too many branches/statements warning.
#
# pylint: disable=R0912
# pylint: disable=R0915
grouped = dict(state['scheduled'])
monitors = dict(state['monitors'])
# Do not create a copy, suspended is accessed by ref.
suspended = state['suspended']
waited = {}
modified = False
now = time.time()
# remove outdated information in suspended dict
extra = six.viewkeys(suspended) - six.viewkeys(monitors)
for name in extra:
suspended.pop(name, None)
modified = True
# Increase available tokens.
for name, conf in six.iteritems(monitors):
if suspended.get(name, 0) > now:
_LOGGER.debug('Ignoring app %s - suspended.', name)
continue
# Either app is not suspended or it is past-due - remove it from
# suspended dict.
if suspended.pop(name, None) is not None:
alert_f(name, 'Monitor active again', status='clear')
modified = True
# Max value reached, nothing to do.
max_value = conf['count'] * 2
available = conf['available']
if available < max_value:
delta = conf['rate'] * (now - conf['last_update'])
conf['available'] = min(available + delta, max_value)
conf['last_update'] = now
for name, conf in six.iteritems(monitors):
if suspended.get(name, 0) > now:
_LOGGER.debug('Monitor is suspended for: %s.', name)
continue
count = conf['count']
available = conf['available']
current_count = len(grouped.get(name, []))
_LOGGER.debug('App: %r current: %d, target %d',
name, current_count, count)
if count == current_count:
continue
elif count > current_count:
needed = count - current_count
allowed = int(min(needed, math.floor(available)))
_LOGGER.debug('%s => need %d, allow %d', name, needed, allowed)
if allowed <= 0:
# in this case available <= 0 as needed >= 1
# we got estimated wait time, now + wait seconds
waited[name] = now + int((1 - available) / conf['rate'])
# new wait item, need modify
if name not in last_waited:
alert_f(name, 'Monitor suspended: Rate limited')
modified = True
continue
try:
# scheduled, remove app from waited list
_scheduled = restclient.post(
[api_url],
'/instance/{}?count={}'.format(name, allowed),
payload={},
headers={'X-Treadmill-Trusted-Agent': 'monitor'}
)
if name in last_waited:
# this means app jump out of wait, need to clear it from zk
alert_f(name, 'Monitor active again', status='clear')
modified = True
conf['available'] -= allowed
except restclient.NotFoundError:
_LOGGER.info('App not configured: %s', name)
suspended[name] = now + _DELAY_INTERVAL
alert_f(name, 'Monitor suspended: App not configured')
modified = True
except restclient.BadRequestError:
_LOGGER.exception('Unable to start: %s', name)
suspended[name] = now + _DELAY_INTERVAL
alert_f(name, 'Monitor suspended: Unable to start')
modified = True
except restclient.ValidationError:
_LOGGER.exception('Invalid manifest: %s', name)
suspended[name] = now + _DELAY_INTERVAL
alert_f(name, 'Monitor suspended: Invalid manifest')
modified = True
except Exception: # pylint: disable=W0703
_LOGGER.exception('Unable to create instances: %s: %s',
name, needed)
elif count < current_count:
extra = []
policy = conf.get('policy')
if policy is None:
policy = 'fifo'
if policy == 'fifo':
extra = grouped[name][:current_count - count]
elif policy == 'lifo':
extra = grouped[name][count - current_count:]
else:
_LOGGER.warning('Invalid scale policy: %s', policy)
continue
try:
response = restclient.post(
[api_url], '/instance/_bulk/delete',
payload=dict(instances=list(extra)),
headers={'X-Treadmill-Trusted-Agent': 'monitor'}
)
_LOGGER.info('deleted: %r - %s', extra, response)
# this means we reduce the count number, no need to wait
modified = True
except Exception: # pylint: disable=W0703
_LOGGER.exception('Unable to delete instances: %r', extra)
# total inactive means
waited.update(suspended)
if modified:
_LOGGER.info('Updating suspended app monitors')
zkutils.update(zkclient, z.path.appmonitor(), waited)
return waited
def _run_sync(api_url, alerts_dir, once):
"""Sync app monitor count with instance count."""
zkclient = context.GLOBAL.zk.conn
alerter = make_alerter(alerts_dir, context.GLOBAL.cell)
state = {
'scheduled': {},
'monitors': {},
'suspended': {},
}
@zkclient.ChildrenWatch(z.path.scheduled())
@utils.exit_on_unhandled
def _scheduled_watch(children):
"""Watch scheduled instances."""
scheduled = sorted(children)
def _appname_fn(instancename):
return instancename.rpartition('#')[0]
grouped = collections.defaultdict(
list,
{
k: list(v)
for k, v in itertools.groupby(scheduled, _appname_fn)
}
)
state['scheduled'] = grouped
return True
def _watch_monitor(name):
"""Watch monitor."""
# Establish data watch on each monitor.
@zkwatchers.ExistingDataWatch(zkclient, z.path.appmonitor(name))
@utils.exit_on_unhandled
def _monitor_data_watch(data, stat, event):
"""Monitor individual monitor."""
if (event is not None and event.type == 'DELETED') or stat is None:
_LOGGER.info('Removing watch on deleted monitor: %s', name)
return
try:
loaded = yaml.load(data)
count = loaded['count']
policy = loaded.get('policy')
except Exception: # pylint: disable=W0703
_LOGGER.exception('Invalid monitor: %s', name)
return
_LOGGER.info('Reconfigure monitor: %s, count: %s', name, count)
state['monitors'][name] = {
'count': count,
'available': 2.0 * count,
'last_update': time.time(),
'policy': policy,
'rate': (2.0 * count / _INTERVAL)
}
@zkclient.ChildrenWatch(z.path.appmonitor())
@utils.exit_on_unhandled
def _appmonitors_watch(children):
"""Watch app monitors."""
monitors = set(children)
extra = six.viewkeys(state['monitors']) - monitors
for name in extra:
_LOGGER.info('Removing extra monitor: %r', name)
if state['monitors'].pop(name, None) is None:
_LOGGER.warning(
'Failed to remove non-existent monitor: %r', name
)
missing = monitors - six.viewkeys(state['monitors'])
for name in missing:
_LOGGER.info('Adding missing monitor: %s', name)
_watch_monitor(name)
_LOGGER.info('Ready, loading waited app list')
last_waited = masterapi.get_suspended_appmonitors(zkclient)
while True:
time.sleep(1)
last_waited = reevaluate(
api_url, alerter, state, zkclient, last_waited
)
if once:
break
def init():
"""Return top level command handler."""
@click.command()
@click.option('--no-lock', is_flag=True, default=False,
help='Run without lock.')
@click.option('--api', required=True, help='Cell API url.')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--once', is_flag=True, default=False,
help='Run once.')
def top(no_lock, api, approot, once):
"""Sync LDAP data with Zookeeper data."""
tm_env = appenv.AppEnvironment(root=approot)
if not no_lock:
lock = zkutils.make_lock(context.GLOBAL.zk.conn,
z.path.election(__name__))
_LOGGER.info('Waiting for leader lock.')
with lock:
_run_sync(api, tm_env.alerts_dir, once)
else:
_LOGGER.info('Running without lock.')
_run_sync(api, tm_env.alerts_dir, once)
return top
| apache-2.0 | -4,280,549,583,378,512,000 | 32.330189 | 79 | 0.55543 | false |
pantsbuild/pants | tests/python/pants_test/pantsd/service/test_pants_service.py | 4 | 1756 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import threading
import pytest
from pants.pantsd.service.pants_service import PantsService
class RunnableTestService(PantsService):
def run(self):
pass
@pytest.fixture
def service() -> RunnableTestService:
return RunnableTestService()
def test_init(service: RunnableTestService) -> None:
assert bool(service.name) is True
def test_run_abstract() -> None:
with pytest.raises(TypeError):
PantsService() # type: ignore[abstract]
def test_terminate(service: RunnableTestService) -> None:
service.terminate()
assert service._state.is_terminating
def test_maybe_pause(service: RunnableTestService) -> None:
# Confirm that maybe_pause with/without a timeout does not deadlock when we are not
# marked Pausing/Paused.
service._state.maybe_pause(timeout=None)
service._state.maybe_pause(timeout=0.5)
def test_pause_and_resume(service: RunnableTestService) -> None:
service.mark_pausing()
# Confirm that we don't transition to Paused without a service thread to maybe_pause.
assert service._state.await_paused(timeout=0.5) is False
# Spawn a thread to call maybe_pause.
t = threading.Thread(target=service._state.maybe_pause)
t.daemon = True
t.start()
# Confirm that we observe the pause from the main thread, and that the child thread pauses
# there without exiting.
assert service._state.await_paused(timeout=5) is True
t.join(timeout=0.5)
assert t.is_alive() is True
# Resume the service, and confirm that the child thread exits.
service.resume()
t.join(timeout=5)
assert t.is_alive() is False
| apache-2.0 | -1,526,787,788,519,844,900 | 29.275862 | 94 | 0.722096 | false |
vineethtw/fusion | fusion/common/config.py | 1 | 5617 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Fusion
"""
import logging as sys_logging
import os
from oslo.config import cfg
from fusion.common import wsgi
from fusion.openstack.common import log as logging
import traceback
DEFAULT_PORT = 8000
logger = logging.getLogger(__name__)
paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_("The flavor to use")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use"))]
service_opts = [
cfg.IntOpt('max_template_size',
default=524288,
help='Maximum raw byte size of any template.')
]
github_group = cfg.OptGroup('github')
github_opts = [
cfg.StrOpt('api_base',
default="https://api.github.com",
help="Github API base path"),
cfg.StrOpt('organization',
default="",
help="organization owning all the templates"),
cfg.StrOpt('template_file',
default="",
help="name of the template file"),
cfg.StrOpt('metadata_file',
default="",
help="name of the metadata file"),
cfg.StrOpt('username',
default="",
help="github username"),
cfg.StrOpt('password',
default="",
help="github password"),
cfg.StrOpt('default_version',
default="stable",
help="default template version")
]
cache_group = cfg.OptGroup('cache')
cache_opts = [
cfg.StrOpt('cache_root',
default="cache_root was not defined!",
help="Location for cache folder"),
cfg.IntOpt('default_timeout',
default=3600,
help="default timeout for filesystem cache"),
cfg.StrOpt('redis_connection_string',
default="redis_connection_string was not defined!",
help="redis connection string"),
cfg.ListOpt('memcache_servers',
default="memcache_servers was not defined!",
help="memcache servers"),
]
proxy_group = cfg.OptGroup('proxy')
proxy_opts = [
cfg.StrOpt('heat_host',
default=None,
help="Heat host"),
cfg.StrOpt('heat_protocol',
default="https",
help="Heat Protocol")
]
cfg.CONF.register_opts(service_opts)
cfg.CONF.register_group(paste_deploy_group)
cfg.CONF.register_opts(paste_deploy_opts, group=paste_deploy_group)
cfg.CONF.register_opts(github_opts, group=github_group)
cfg.CONF.register_opts(cache_opts, group=cache_group)
cfg.CONF.register_opts(proxy_opts, group=proxy_group)
def _get_deployment_flavor():
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
"""
flavor = cfg.CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
config_path = cfg.CONF.find_file(
cfg.CONF.paste_deploy['api_paste_config'])
if config_path is None:
return None
return os.path.abspath(config_path)
def load_paste_app(app_name=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file.
:param app_name: name of the application to load
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
if app_name is None:
app_name = cfg.CONF.prog
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor()
conf_file = _get_deployment_config_file()
if conf_file is None:
raise RuntimeError(_("Unable to locate config file"))
try:
app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF)
# Log the options used when starting if we're in debug mode...
if cfg.CONF.debug:
cfg.CONF.log_opt_values(logging.getLogger(app_name),
sys_logging.DEBUG)
return app
except (LookupError, ImportError) as e:
traceback.format_exc()
raise RuntimeError(_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
def safe_get_config(group, name):
if group not in cfg.CONF:
logger.warn("Could not find %s group in the configuration file! This"
"might be cause due to bad configuration.")
return None
return getattr(getattr(cfg.CONF, group),name)
| apache-2.0 | -5,267,703,576,715,912,000 | 31.097143 | 78 | 0.612961 | false |
esmoyon1/GeonodeV1 | geonode/base/__init__.py | 7 | 1280 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_noop as _
from geonode.notifications_helper import NotificationsAppConfigBase
class BaseAppConfig(NotificationsAppConfigBase):
name = 'geonode.base'
NOTIFICATIONS = (("request_download_resourcebase", _("Request to download a resource"),
_("A request for downloading a resource was sent")),
)
default_app_config = 'geonode.base.BaseAppConfig'
| gpl-3.0 | 9,109,169,835,199,277,000 | 39 | 91 | 0.645313 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/zope.interface-3.6.1-py2.7-macosx-10.10-x86_64.egg/zope/interface/tests/odd.py | 50 | 3130 | ##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Odd meta class that doesn't subclass type.
This is used for testing support for ExtensionClass in new interfaces.
>>> class A(object):
... __metaclass__ = MetaClass
... a = 1
...
>>> A.__name__
'A'
>>> A.__bases__ == (object,)
True
>>> class B(object):
... __metaclass__ = MetaClass
... b = 1
...
>>> class C(A, B): pass
...
>>> C.__name__
'C'
>>> int(C.__bases__ == (A, B))
1
>>> a = A()
>>> aa = A()
>>> a.a
1
>>> aa.a
1
>>> aa.a = 2
>>> a.a
1
>>> aa.a
2
>>> c = C()
>>> c.a
1
>>> c.b
1
>>> c.b = 2
>>> c.b
2
>>> C.c = 1
>>> c.c
1
>>> import sys
>>> if sys.version[0] == '2': # This test only makes sense under Python 2.x
... from types import ClassType
... assert not isinstance(C, (type, ClassType))
>>> int(C.__class__.__class__ is C.__class__)
1
$Id: odd.py 110699 2010-04-09 08:16:17Z regebro $
"""
# class OddClass is an odd meta class
class MetaMetaClass(type):
def __getattribute__(self, name):
if name == '__class__':
return self
return type.__getattribute__(self, name)
class MetaClass(object):
"""Odd classes
"""
__metaclass__ = MetaMetaClass
def __init__(self, name, bases, dict):
self.__name__ = name
self.__bases__ = bases
self.__dict__.update(dict)
def __call__(self):
return OddInstance(self)
def __getattr__(self, name):
for b in self.__bases__:
v = getattr(b, name, self)
if v is not self:
return v
raise AttributeError(name)
def __repr__(self):
return "<odd class %s at %s>" % (self.__name__, hex(id(self)))
class OddInstance(object):
def __init__(self, cls):
self.__dict__['__class__'] = cls
def __getattribute__(self, name):
dict = object.__getattribute__(self, '__dict__')
if name == '__dict__':
return dict
v = dict.get(name, self)
if v is not self:
return v
return getattr(dict['__class__'], name)
def __setattr__(self, name, v):
self.__dict__[name] = v
def __delattr__(self, name):
del self.__dict__[name]
def __repr__(self):
return "<odd %s instance at %s>" % (
self.__class__.__name__, hex(id(self)))
# DocTest:
if __name__ == "__main__":
import doctest, __main__
doctest.testmod(__main__, isprivate=lambda *a: False)
| gpl-2.0 | 9,006,141,493,095,993,000 | 22.89313 | 78 | 0.50639 | false |
rahuldhote/odoo | openerp/http.py | 66 | 61060 | # -*- coding: utf-8 -*-
#----------------------------------------------------------
# OpenERP HTTP layer
#----------------------------------------------------------
import ast
import collections
import contextlib
import datetime
import errno
import functools
import getpass
import inspect
import logging
import mimetypes
import os
import pprint
import random
import re
import sys
import tempfile
import threading
import time
import traceback
import urlparse
import warnings
from zlib import adler32
import babel.core
import psycopg2
import simplejson
import werkzeug.contrib.sessions
import werkzeug.datastructures
import werkzeug.exceptions
import werkzeug.local
import werkzeug.routing
import werkzeug.wrappers
import werkzeug.wsgi
from werkzeug.wsgi import wrap_file
try:
import psutil
except ImportError:
psutil = None
import openerp
from openerp import SUPERUSER_ID
from openerp.service.server import memory_info
from openerp.service import security, model as service_model
from openerp.tools.func import lazy_property
from openerp.tools import ustr
_logger = logging.getLogger(__name__)
rpc_request = logging.getLogger(__name__ + '.rpc.request')
rpc_response = logging.getLogger(__name__ + '.rpc.response')
# 1 week cache for statics as advised by Google Page Speed
STATIC_CACHE = 60 * 60 * 24 * 7
#----------------------------------------------------------
# RequestHandler
#----------------------------------------------------------
# Thread local global request object
_request_stack = werkzeug.local.LocalStack()
request = _request_stack()
"""
A global proxy that always redirect to the current request object.
"""
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...
if len(args) > 2:
args = list(args)
args[2] = '*'
return tuple(args)
# don't trigger debugger for those exceptions, they carry user-facing warnings
# and indications, they're not necessarily indicative of anything being
# *broken*
NO_POSTMORTEM = (openerp.osv.orm.except_orm,
openerp.exceptions.AccessError,
openerp.exceptions.AccessDenied,
openerp.exceptions.Warning,
openerp.exceptions.RedirectWarning)
def dispatch_rpc(service_name, method, params):
""" Handle a RPC call.
This is pure Python code, the actual marshalling (from/to XML-RPC) is done
in a upper layer.
"""
try:
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
start_time = time.time()
start_rss, start_vms = 0, 0
if psutil:
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
openerp.netsvc.log(rpc_request, logging.DEBUG, '%s.%s' % (service_name, method), replace_request_password(params))
threading.current_thread().uid = None
threading.current_thread().dbname = None
if service_name == 'common':
dispatch = openerp.service.common.dispatch
elif service_name == 'db':
dispatch = openerp.service.db.dispatch
elif service_name == 'object':
dispatch = openerp.service.model.dispatch
elif service_name == 'report':
dispatch = openerp.service.report.dispatch
else:
dispatch = openerp.service.wsgi_server.rpc_handlers.get(service_name)
result = dispatch(method, params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
end_rss, end_vms = 0, 0
if psutil:
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s.%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (service_name, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
if rpc_response_flag:
openerp.netsvc.log(rpc_response, logging.DEBUG, logline, result)
else:
openerp.netsvc.log(rpc_request, logging.DEBUG, logline, replace_request_password(params), depth=1)
return result
except NO_POSTMORTEM:
raise
except openerp.exceptions.DeferredException, e:
_logger.exception(openerp.tools.exception_to_unicode(e))
openerp.tools.debugger.post_mortem(openerp.tools.config, e.traceback)
raise
except Exception, e:
_logger.exception(openerp.tools.exception_to_unicode(e))
openerp.tools.debugger.post_mortem(openerp.tools.config, sys.exc_info())
raise
def local_redirect(path, query=None, keep_hash=False, forward_debug=True, code=303):
url = path
if not query:
query = {}
if forward_debug and request and request.debug:
query['debug'] = None
if query:
url += '?' + werkzeug.url_encode(query)
if keep_hash:
return redirect_with_hash(url, code)
else:
return werkzeug.utils.redirect(url, code)
def redirect_with_hash(url, code=303):
# Most IE and Safari versions decided not to preserve location.hash upon
# redirect. And even if IE10 pretends to support it, it still fails
# inexplicably in case of multiple redirects (and we do have some).
# See extensive test page at http://greenbytes.de/tech/tc/httpredirects/
if request.httprequest.user_agent.browser in ('firefox',):
return werkzeug.utils.redirect(url, code)
return "<html><head><script>window.location = '%s' + location.hash;</script></head></html>" % url
class WebRequest(object):
""" Parent class for all Odoo Web request types, mostly deals with
initialization and setup of the request object (the dispatching itself has
to be handled by the subclasses)
:param httprequest: a wrapped werkzeug Request object
:type httprequest: :class:`werkzeug.wrappers.BaseRequest`
.. attribute:: httprequest
the original :class:`werkzeug.wrappers.Request` object provided to the
request
.. attribute:: params
:class:`~collections.Mapping` of request parameters, not generally
useful as they're provided directly to the handler method as keyword
arguments
"""
def __init__(self, httprequest):
self.httprequest = httprequest
self.httpresponse = None
self.httpsession = httprequest.session
self.disable_db = False
self.uid = None
self.endpoint = None
self.auth_method = None
self._cr = None
# prevents transaction commit, use when you catch an exception during handling
self._failed = None
# set db/uid trackers - they're cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
if self.db:
threading.current_thread().dbname = self.db
if self.session.uid:
threading.current_thread().uid = self.session.uid
@lazy_property
def env(self):
"""
The :class:`~openerp.api.Environment` bound to current request.
Raises a :class:`RuntimeError` if the current requests is not bound
to a database.
"""
if not self.db:
return RuntimeError('request not bound to a database')
return openerp.api.Environment(self.cr, self.uid, self.context)
@lazy_property
def context(self):
"""
:class:`~collections.Mapping` of context values for the current
request
"""
return dict(self.session.context)
@lazy_property
def lang(self):
self.session._fix_lang(self.context)
return self.context["lang"]
@lazy_property
def session(self):
"""
a :class:`OpenERPSession` holding the HTTP session data for the
current http session
"""
return self.httprequest.session
@property
def cr(self):
"""
:class:`~openerp.sql_db.Cursor` initialized for the current method
call.
Accessing the cursor when the current request uses the ``none``
authentication will raise an exception.
"""
# can not be a lazy_property because manual rollback in _call_function
# if already set (?)
if not self.db:
return RuntimeError('request not bound to a database')
if not self._cr:
self._cr = self.registry.cursor()
return self._cr
def __enter__(self):
_request_stack.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
_request_stack.pop()
if self._cr:
if exc_type is None and not self._failed:
self._cr.commit()
self._cr.close()
# just to be sure no one tries to re-use the request
self.disable_db = True
self.uid = None
def set_handler(self, endpoint, arguments, auth):
# is this needed ?
arguments = dict((k, v) for k, v in arguments.iteritems()
if not k.startswith("_ignored_"))
endpoint.arguments = arguments
self.endpoint = endpoint
self.auth_method = auth
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
self._failed = exception # prevent tx commit
if not isinstance(exception, NO_POSTMORTEM) \
and not isinstance(exception, werkzeug.exceptions.HTTPException):
openerp.tools.debugger.post_mortem(
openerp.tools.config, sys.exc_info())
raise
def _call_function(self, *args, **kwargs):
request = self
if self.endpoint.routing['type'] != self._request_type:
msg = "%s, %s: Function declared as capable of handling request of type '%s' but called with a request of type '%s'"
params = (self.endpoint.original, self.httprequest.path, self.endpoint.routing['type'], self._request_type)
_logger.error(msg, *params)
raise werkzeug.exceptions.BadRequest(msg % params)
kwargs.update(self.endpoint.arguments)
# Backward for 7.0
if self.endpoint.first_arg_is_req:
args = (request,) + args
# Correct exception handling and concurency retry
@service_model.check
def checked_call(___dbname, *a, **kw):
# The decorator can call us more than once if there is an database error. In this
# case, the request cursor is unusable. Rollback transaction to create a new one.
if self._cr:
self._cr.rollback()
return self.endpoint(*a, **kw)
if self.db:
return checked_call(self.db, *args, **kwargs)
return self.endpoint(*args, **kwargs)
@property
def debug(self):
""" Indicates whether the current request is in "debug" mode
"""
return 'debug' in self.httprequest.args
@contextlib.contextmanager
def registry_cr(self):
warnings.warn('please use request.registry and request.cr directly', DeprecationWarning)
yield (self.registry, self.cr)
@lazy_property
def session_id(self):
"""
opaque identifier for the :class:`OpenERPSession` instance of
the current request
.. deprecated:: 8.0
Use the ``sid`` attribute on :attr:`.session`
"""
return self.session.sid
@property
def registry(self):
"""
The registry to the database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
.. deprecated:: 8.0
use :attr:`.env`
"""
return openerp.modules.registry.RegistryManager.get(self.db) if self.db else None
@property
def db(self):
"""
The database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
"""
return self.session.db if not self.disable_db else None
@lazy_property
def httpsession(self):
""" HTTP session data
.. deprecated:: 8.0
Use :attr:`.session` instead.
"""
return self.session
def route(route=None, **kw):
"""
Decorator marking the decorated method as being a handler for
requests. The method must be part of a subclass of ``Controller``.
:param route: string or array. The route part that will determine which
http requests will match the decorated method. Can be a
single string or an array of strings. See werkzeug's routing
documentation for the format of route expression (
http://werkzeug.pocoo.org/docs/routing/ ).
:param type: The type of request, can be ``'http'`` or ``'json'``.
:param auth: The type of authentication method, can on of the following:
* ``user``: The user must be authenticated and the current request
will perform using the rights of the user.
* ``admin``: The user may not be authenticated and the current request
will perform using the admin user.
* ``none``: The method is always active, even if there is no
database. Mainly used by the framework and authentication
modules. There request code will not have any facilities to access
the database nor have any configuration indicating the current
database nor the current user.
:param methods: A sequence of http methods this route applies to. If not
specified, all methods are allowed.
:param cors: The Access-Control-Allow-Origin cors directive value.
"""
routing = kw.copy()
assert not 'type' in routing or routing['type'] in ("http", "json")
def decorator(f):
if route:
if isinstance(route, list):
routes = route
else:
routes = [route]
routing['routes'] = routes
@functools.wraps(f)
def response_wrap(*args, **kw):
response = f(*args, **kw)
if isinstance(response, Response) or f.routing_type == 'json':
return response
if isinstance(response, basestring):
return Response(response)
if isinstance(response, werkzeug.exceptions.HTTPException):
response = response.get_response(request.httprequest.environ)
if isinstance(response, werkzeug.wrappers.BaseResponse):
response = Response.force_type(response)
response.set_default()
return response
_logger.warn("<function %s.%s> returns an invalid response type for an http request" % (f.__module__, f.__name__))
return response
response_wrap.routing = routing
response_wrap.original_func = f
return response_wrap
return decorator
class JsonRequest(WebRequest):
""" Request handler for `JSON-RPC 2
<http://www.jsonrpc.org/specification>`_ over HTTP
* ``method`` is ignored
* ``params`` must be a JSON object (not an array) and is passed as keyword
arguments to the handler method
* the handler method's result is returned as JSON-RPC ``result`` and
wrapped in the `JSON-RPC Response
<http://www.jsonrpc.org/specification#response_object>`_
Sucessful request::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"result": { "res1": "val1" },
"id": null}
Request producing a error::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"error": {"code": 1,
"message": "End user error message.",
"data": {"code": "codestring",
"debug": "traceback" } },
"id": null}
"""
_request_type = "json"
def __init__(self, *args):
super(JsonRequest, self).__init__(*args)
self.jsonp_handler = None
args = self.httprequest.args
jsonp = args.get('jsonp')
self.jsonp = jsonp
request = None
request_id = args.get('id')
if jsonp and self.httprequest.method == 'POST':
# jsonp 2 steps step1 POST: save call
def handler():
self.session['jsonp_request_%s' % (request_id,)] = self.httprequest.form['r']
self.session.modified = True
headers=[('Content-Type', 'text/plain; charset=utf-8')]
r = werkzeug.wrappers.Response(request_id, headers=headers)
return r
self.jsonp_handler = handler
return
elif jsonp and args.get('r'):
# jsonp method GET
request = args.get('r')
elif jsonp and request_id:
# jsonp 2 steps step2 GET: run and return result
request = self.session.pop('jsonp_request_%s' % (request_id,), '{}')
else:
# regular jsonrpc2
request = self.httprequest.stream.read()
# Read POST content or POST Form Data named "request"
try:
self.jsonrequest = simplejson.loads(request)
except simplejson.JSONDecodeError:
msg = 'Invalid JSON data: %r' % (request,)
_logger.error('%s: %s', self.httprequest.path, msg)
raise werkzeug.exceptions.BadRequest(msg)
self.params = dict(self.jsonrequest.get("params", {}))
self.context = self.params.pop('context', dict(self.session.context))
def _json_response(self, result=None, error=None):
response = {
'jsonrpc': '2.0',
'id': self.jsonrequest.get('id')
}
if error is not None:
response['error'] = error
if result is not None:
response['result'] = result
if self.jsonp:
# If we use jsonp, that's mean we are called from another host
# Some browser (IE and Safari) do no allow third party cookies
# We need then to manage http sessions manually.
response['session_id'] = self.session_id
mime = 'application/javascript'
body = "%s(%s);" % (self.jsonp, simplejson.dumps(response),)
else:
mime = 'application/json'
body = simplejson.dumps(response)
return Response(
body, headers=[('Content-Type', mime),
('Content-Length', len(body))])
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(JsonRequest, self)._handle_exception(exception)
except Exception:
if not isinstance(exception, (openerp.exceptions.Warning, SessionExpiredException)):
_logger.exception("Exception during JSON request handling.")
error = {
'code': 200,
'message': "Odoo Server Error",
'data': serialize_exception(exception)
}
if isinstance(exception, AuthenticationError):
error['code'] = 100
error['message'] = "Odoo Session Invalid"
if isinstance(exception, SessionExpiredException):
error['code'] = 100
error['message'] = "Odoo Session Expired"
return self._json_response(error=error)
def dispatch(self):
if self.jsonp_handler:
return self.jsonp_handler()
try:
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
endpoint = self.endpoint.method.__name__
model = self.params.get('model')
method = self.params.get('method')
args = self.params.get('args', [])
start_time = time.time()
_, start_vms = 0, 0
if psutil:
_, start_vms = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
rpc_request.debug('%s: %s %s, %s',
endpoint, model, method, pprint.pformat(args))
result = self._call_function(**self.params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
_, end_vms = 0, 0
if psutil:
_, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s: %s %s: time:%.3fs mem: %sk -> %sk (diff: %sk)' % (
endpoint, model, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
if rpc_response_flag:
rpc_response.debug('%s, %s', logline, pprint.pformat(result))
else:
rpc_request.debug(logline)
return self._json_response(result)
except Exception, e:
return self._handle_exception(e)
def serialize_exception(e):
tmp = {
"name": type(e).__module__ + "." + type(e).__name__ if type(e).__module__ else type(e).__name__,
"debug": traceback.format_exc(),
"message": ustr(e),
"arguments": to_jsonable(e.args),
}
if isinstance(e, openerp.osv.osv.except_osv):
tmp["exception_type"] = "except_osv"
elif isinstance(e, openerp.exceptions.Warning):
tmp["exception_type"] = "warning"
elif isinstance(e, openerp.exceptions.AccessError):
tmp["exception_type"] = "access_error"
elif isinstance(e, openerp.exceptions.AccessDenied):
tmp["exception_type"] = "access_denied"
return tmp
def to_jsonable(o):
if isinstance(o, str) or isinstance(o,unicode) or isinstance(o, int) or isinstance(o, long) \
or isinstance(o, bool) or o is None or isinstance(o, float):
return o
if isinstance(o, list) or isinstance(o, tuple):
return [to_jsonable(x) for x in o]
if isinstance(o, dict):
tmp = {}
for k, v in o.items():
tmp[u"%s" % k] = to_jsonable(v)
return tmp
return ustr(o)
def jsonrequest(f):
"""
.. deprecated:: 8.0
Use the :func:`~openerp.http.route` decorator instead.
"""
base = f.__name__.lstrip('/')
if f.__name__ == "index":
base = ""
return route([base, base + "/<path:_ignored_path>"], type="json", auth="user", combine=True)(f)
class HttpRequest(WebRequest):
""" Handler for the ``http`` request type.
matched routing parameters, query string parameters, form_ parameters
and files are passed to the handler method as keyword arguments.
In case of name conflict, routing parameters have priority.
The handler method's result can be:
* a falsy value, in which case the HTTP response will be an
`HTTP 204`_ (No Content)
* a werkzeug Response object, which is returned as-is
* a ``str`` or ``unicode``, will be wrapped in a Response object and
interpreted as HTML
.. _form: http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2
.. _HTTP 204: http://tools.ietf.org/html/rfc7231#section-6.3.5
"""
_request_type = "http"
def __init__(self, *args):
super(HttpRequest, self).__init__(*args)
params = self.httprequest.args.to_dict()
params.update(self.httprequest.form.to_dict())
params.update(self.httprequest.files.to_dict())
params.pop('session_id', None)
self.params = params
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(HttpRequest, self)._handle_exception(exception)
except SessionExpiredException:
if not request.params.get('noredirect'):
query = werkzeug.urls.url_encode({
'redirect': request.httprequest.url,
})
return werkzeug.utils.redirect('/web/login?%s' % query)
except werkzeug.exceptions.HTTPException, e:
return e
def dispatch(self):
if request.httprequest.method == 'OPTIONS' and request.endpoint and request.endpoint.routing.get('cors'):
headers = {
'Access-Control-Max-Age': 60 * 60 * 24,
'Access-Control-Allow-Headers': 'Origin, X-Requested-With, Content-Type, Accept'
}
return Response(status=200, headers=headers)
r = self._call_function(**self.params)
if not r:
r = Response(status=204) # no content
return r
def make_response(self, data, headers=None, cookies=None):
""" Helper for non-HTML responses, or HTML responses with custom
response headers or cookies.
While handlers can just return the HTML markup of a page they want to
send as a string if non-HTML data is returned they need to create a
complete response object, or the returned data will not be correctly
interpreted by the clients.
:param basestring data: response body
:param headers: HTTP headers to set on the response
:type headers: ``[(name, value)]``
:param collections.Mapping cookies: cookies to set on the client
"""
response = Response(data, headers=headers)
if cookies:
for k, v in cookies.iteritems():
response.set_cookie(k, v)
return response
def render(self, template, qcontext=None, lazy=True, **kw):
""" Lazy render of a QWeb template.
The actual rendering of the given template will occur at then end of
the dispatching. Meanwhile, the template and/or qcontext can be
altered or even replaced by a static response.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param bool lazy: whether the template rendering should be deferred
until the last possible moment
:param kw: forwarded to werkzeug's Response object
"""
response = Response(template=template, qcontext=qcontext, **kw)
if not lazy:
return response.render()
return response
def not_found(self, description=None):
""" Shortcut for a `HTTP 404
<http://tools.ietf.org/html/rfc7231#section-6.5.4>`_ (Not Found)
response
"""
return werkzeug.exceptions.NotFound(description)
def httprequest(f):
"""
.. deprecated:: 8.0
Use the :func:`~openerp.http.route` decorator instead.
"""
base = f.__name__.lstrip('/')
if f.__name__ == "index":
base = ""
return route([base, base + "/<path:_ignored_path>"], type="http", auth="user", combine=True)(f)
#----------------------------------------------------------
# Controller and route registration
#----------------------------------------------------------
addons_module = {}
addons_manifest = {}
controllers_per_module = collections.defaultdict(list)
class ControllerType(type):
def __init__(cls, name, bases, attrs):
super(ControllerType, cls).__init__(name, bases, attrs)
# flag old-style methods with req as first argument
for k, v in attrs.items():
if inspect.isfunction(v) and hasattr(v, 'original_func'):
# Set routing type on original functions
routing_type = v.routing.get('type')
parent = [claz for claz in bases if isinstance(claz, ControllerType) and hasattr(claz, k)]
parent_routing_type = getattr(parent[0], k).original_func.routing_type if parent else routing_type or 'http'
if routing_type is not None and routing_type is not parent_routing_type:
routing_type = parent_routing_type
_logger.warn("Subclass re-defines <function %s.%s.%s> with different type than original."
" Will use original type: %r" % (cls.__module__, cls.__name__, k, parent_routing_type))
v.original_func.routing_type = routing_type or parent_routing_type
spec = inspect.getargspec(v.original_func)
first_arg = spec.args[1] if len(spec.args) >= 2 else None
if first_arg in ["req", "request"]:
v._first_arg_is_req = True
# store the controller in the controllers list
name_class = ("%s.%s" % (cls.__module__, cls.__name__), cls)
class_path = name_class[0].split(".")
if not class_path[:2] == ["openerp", "addons"]:
module = ""
else:
# we want to know all modules that have controllers
module = class_path[2]
# but we only store controllers directly inheriting from Controller
if not "Controller" in globals() or not Controller in bases:
return
controllers_per_module[module].append(name_class)
class Controller(object):
__metaclass__ = ControllerType
class EndPoint(object):
def __init__(self, method, routing):
self.method = method
self.original = getattr(method, 'original_func', method)
self.routing = routing
self.arguments = {}
@property
def first_arg_is_req(self):
# Backward for 7.0
return getattr(self.method, '_first_arg_is_req', False)
def __call__(self, *args, **kw):
return self.method(*args, **kw)
def routing_map(modules, nodb_only, converters=None):
routing_map = werkzeug.routing.Map(strict_slashes=False, converters=converters)
def get_subclasses(klass):
def valid(c):
return c.__module__.startswith('openerp.addons.') and c.__module__.split(".")[2] in modules
subclasses = klass.__subclasses__()
result = []
for subclass in subclasses:
if valid(subclass):
result.extend(get_subclasses(subclass))
if not result and valid(klass):
result = [klass]
return result
uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values()
for module in modules:
if module not in controllers_per_module:
continue
for _, cls in controllers_per_module[module]:
subclasses = uniq(c for c in get_subclasses(cls) if c is not cls)
if subclasses:
name = "%s (extended by %s)" % (cls.__name__, ', '.join(sub.__name__ for sub in subclasses))
cls = type(name, tuple(reversed(subclasses)), {})
o = cls()
members = inspect.getmembers(o, inspect.ismethod)
for _, mv in members:
if hasattr(mv, 'routing'):
routing = dict(type='http', auth='user', methods=None, routes=None)
methods_done = list()
# update routing attributes from subclasses(auth, methods...)
for claz in reversed(mv.im_class.mro()):
fn = getattr(claz, mv.func_name, None)
if fn and hasattr(fn, 'routing') and fn not in methods_done:
methods_done.append(fn)
routing.update(fn.routing)
if not nodb_only or routing['auth'] == "none":
assert routing['routes'], "Method %r has not route defined" % mv
endpoint = EndPoint(mv, routing)
for url in routing['routes']:
if routing.get("combine", False):
# deprecated v7 declaration
url = o._cp_path.rstrip('/') + '/' + url.lstrip('/')
if url.endswith("/") and len(url) > 1:
url = url[: -1]
xtra_keys = 'defaults subdomain build_only strict_slashes redirect_to alias host'.split()
kw = {k: routing[k] for k in xtra_keys if k in routing}
routing_map.add(werkzeug.routing.Rule(url, endpoint=endpoint, methods=routing['methods'], **kw))
return routing_map
#----------------------------------------------------------
# HTTP Sessions
#----------------------------------------------------------
class AuthenticationError(Exception):
pass
class SessionExpiredException(Exception):
pass
class Service(object):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
def __init__(self, session, service_name):
self.session = session
self.service_name = service_name
def __getattr__(self, method):
def proxy_method(*args):
result = dispatch_rpc(self.service_name, method, args)
return result
return proxy_method
class Model(object):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
def __init__(self, session, model):
self.session = session
self.model = model
self.proxy = self.session.proxy('object')
def __getattr__(self, method):
self.session.assert_valid()
def proxy(*args, **kw):
# Can't provide any retro-compatibility for this case, so we check it and raise an Exception
# to tell the programmer to adapt his code
if not request.db or not request.uid or self.session.db != request.db \
or self.session.uid != request.uid:
raise Exception("Trying to use Model with badly configured database or user.")
if method.startswith('_'):
raise Exception("Access denied")
mod = request.registry[self.model]
meth = getattr(mod, method)
# make sure to instantiate an environment
cr = request.env.cr
result = meth(cr, request.uid, *args, **kw)
# reorder read
if method == "read":
if isinstance(result, list) and len(result) > 0 and "id" in result[0]:
index = {}
for r in result:
index[r['id']] = r
result = [index[x] for x in args[0] if x in index]
return result
return proxy
class OpenERPSession(werkzeug.contrib.sessions.Session):
def __init__(self, *args, **kwargs):
self.inited = False
self.modified = False
self.rotate = False
super(OpenERPSession, self).__init__(*args, **kwargs)
self.inited = True
self._default_values()
self.modified = False
def __getattr__(self, attr):
return self.get(attr, None)
def __setattr__(self, k, v):
if getattr(self, "inited", False):
try:
object.__getattribute__(self, k)
except:
return self.__setitem__(k, v)
object.__setattr__(self, k, v)
def authenticate(self, db, login=None, password=None, uid=None):
"""
Authenticate the current user with the given db, login and
password. If successful, store the authentication parameters in the
current session and request.
:param uid: If not None, that user id will be used instead the login
to authenticate the user.
"""
if uid is None:
wsgienv = request.httprequest.environ
env = dict(
base_location=request.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
uid = dispatch_rpc('common', 'authenticate', [db, login, password, env])
else:
security.check(db, uid, password)
self.db = db
self.uid = uid
self.login = login
self.password = password
request.uid = uid
request.disable_db = False
if uid: self.get_context()
return uid
def check_security(self):
"""
Check the current authentication parameters to know if those are still
valid. This method should be called at each request. If the
authentication fails, a :exc:`SessionExpiredException` is raised.
"""
if not self.db or not self.uid:
raise SessionExpiredException("Session expired")
security.check(self.db, self.uid, self.password)
def logout(self, keep_db=False):
for k in self.keys():
if not (keep_db and k == 'db'):
del self[k]
self._default_values()
self.rotate = True
def _default_values(self):
self.setdefault("db", None)
self.setdefault("uid", None)
self.setdefault("login", None)
self.setdefault("password", None)
self.setdefault("context", {})
def get_context(self):
"""
Re-initializes the current user's session context (based on his
preferences) by calling res.users.get_context() with the old context.
:returns: the new context
"""
assert self.uid, "The user needs to be logged-in to initialize his context"
self.context = request.registry.get('res.users').context_get(request.cr, request.uid) or {}
self.context['uid'] = self.uid
self._fix_lang(self.context)
return self.context
def _fix_lang(self, context):
""" OpenERP provides languages which may not make sense and/or may not
be understood by the web client's libraries.
Fix those here.
:param dict context: context to fix
"""
lang = context.get('lang')
# inane OpenERP locale
if lang == 'ar_AR':
lang = 'ar'
# lang to lang_REGION (datejs only handles lang_REGION, no bare langs)
if lang in babel.core.LOCALE_ALIASES:
lang = babel.core.LOCALE_ALIASES[lang]
context['lang'] = lang or 'en_US'
# Deprecated to be removed in 9
"""
Damn properties for retro-compatibility. All of that is deprecated,
all of that.
"""
@property
def _db(self):
return self.db
@_db.setter
def _db(self, value):
self.db = value
@property
def _uid(self):
return self.uid
@_uid.setter
def _uid(self, value):
self.uid = value
@property
def _login(self):
return self.login
@_login.setter
def _login(self, value):
self.login = value
@property
def _password(self):
return self.password
@_password.setter
def _password(self, value):
self.password = value
def send(self, service_name, method, *args):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
return dispatch_rpc(service_name, method, args)
def proxy(self, service):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
return Service(self, service)
def assert_valid(self, force=False):
"""
.. deprecated:: 8.0
Use :meth:`check_security` instead.
Ensures this session is valid (logged into the openerp server)
"""
if self.uid and not force:
return
# TODO use authenticate instead of login
self.uid = self.proxy("common").login(self.db, self.login, self.password)
if not self.uid:
raise AuthenticationError("Authentication failure")
def ensure_valid(self):
"""
.. deprecated:: 8.0
Use :meth:`check_security` instead.
"""
if self.uid:
try:
self.assert_valid(True)
except Exception:
self.uid = None
def execute(self, model, func, *l, **d):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
model = self.model(model)
r = getattr(model, func)(*l, **d)
return r
def exec_workflow(self, model, id, signal):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
self.assert_valid()
r = self.proxy('object').exec_workflow(self.db, self.uid, self.password, model, signal, id)
return r
def model(self, model):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
Get an RPC proxy for the object ``model``, bound to this session.
:param model: an OpenERP model name
:type model: str
:rtype: a model object
"""
if not self.db:
raise SessionExpiredException("Session expired")
return Model(self, model)
def save_action(self, action):
"""
This method store an action object in the session and returns an integer
identifying that action. The method get_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = self.setdefault('saved_actions', {"next": 1, "actions": {}})
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = action
saved_actions["next"] = key + 1
self.modified = True
return key
def get_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = self.get('saved_actions', {})
return saved_actions.get("actions", {}).get(key)
def session_gc(session_store):
if random.random() < 0.001:
# we keep session one week
last_week = time.time() - 60*60*24*7
for fname in os.listdir(session_store.path):
path = os.path.join(session_store.path, fname)
try:
if os.path.getmtime(path) < last_week:
os.unlink(path)
except OSError:
pass
#----------------------------------------------------------
# WSGI Layer
#----------------------------------------------------------
# Add potentially missing (older ubuntu) font mime types
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-ttf', '.ttf')
class Response(werkzeug.wrappers.Response):
""" Response object passed through controller route chain.
In addition to the :class:`werkzeug.wrappers.Response` parameters, this
class's constructor can take the following additional parameters
for QWeb Lazy Rendering.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param int uid: User id to use for the ir.ui.view render call,
``None`` to use the request's user (the default)
these attributes are available as parameters on the Response object and
can be altered at any time before rendering
Also exposes all the attributes and methods of
:class:`werkzeug.wrappers.Response`.
"""
default_mimetype = 'text/html'
def __init__(self, *args, **kw):
template = kw.pop('template', None)
qcontext = kw.pop('qcontext', None)
uid = kw.pop('uid', None)
super(Response, self).__init__(*args, **kw)
self.set_default(template, qcontext, uid)
def set_default(self, template=None, qcontext=None, uid=None):
self.template = template
self.qcontext = qcontext or dict()
self.uid = uid
# Support for Cross-Origin Resource Sharing
if request.endpoint and 'cors' in request.endpoint.routing:
self.headers.set('Access-Control-Allow-Origin', request.endpoint.routing['cors'])
methods = 'GET, POST'
if request.endpoint.routing['type'] == 'json':
methods = 'POST'
elif request.endpoint.routing.get('methods'):
methods = ', '.join(request.endpoint.routing['methods'])
self.headers.set('Access-Control-Allow-Methods', methods)
@property
def is_qweb(self):
return self.template is not None
def render(self):
""" Renders the Response's template, returns the result
"""
view_obj = request.registry["ir.ui.view"]
uid = self.uid or request.uid or openerp.SUPERUSER_ID
return view_obj.render(
request.cr, uid, self.template, self.qcontext,
context=request.context)
def flatten(self):
""" Forces the rendering of the response's template, sets the result
as response body and unsets :attr:`.template`
"""
self.response.append(self.render())
self.template = None
class DisableCacheMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def start_wrapped(status, headers):
referer = environ.get('HTTP_REFERER', '')
parsed = urlparse.urlparse(referer)
debug = parsed.query.count('debug') >= 1
new_headers = []
unwanted_keys = ['Last-Modified']
if debug:
new_headers = [('Cache-Control', 'no-cache')]
unwanted_keys += ['Expires', 'Etag', 'Cache-Control']
for k, v in headers:
if k not in unwanted_keys:
new_headers.append((k, v))
start_response(status, new_headers)
return self.app(environ, start_wrapped)
class Root(object):
"""Root WSGI application for the OpenERP Web Client.
"""
def __init__(self):
self._loaded = False
@lazy_property
def session_store(self):
# Setup http sessions
path = openerp.tools.config.session_dir
_logger.debug('HTTP sessions stored in: %s', path)
return werkzeug.contrib.sessions.FilesystemSessionStore(path, session_class=OpenERPSession)
@lazy_property
def nodb_routing_map(self):
_logger.info("Generating nondb routing")
return routing_map([''] + openerp.conf.server_wide_modules, True)
def __call__(self, environ, start_response):
""" Handle a WSGI request
"""
if not self._loaded:
self._loaded = True
self.load_addons()
return self.dispatch(environ, start_response)
def load_addons(self):
""" Load all addons from addons path containing static files and
controllers and configure them. """
# TODO should we move this to ir.http so that only configured modules are served ?
statics = {}
for addons_path in openerp.modules.module.ad_paths:
for module in sorted(os.listdir(str(addons_path))):
if module not in addons_module:
manifest_path = os.path.join(addons_path, module, '__openerp__.py')
path_static = os.path.join(addons_path, module, 'static')
if os.path.isfile(manifest_path) and os.path.isdir(path_static):
manifest = ast.literal_eval(open(manifest_path).read())
if not manifest.get('installable', True):
continue
manifest['addons_path'] = addons_path
_logger.debug("Loading %s", module)
if 'openerp.addons' in sys.modules:
m = __import__('openerp.addons.' + module)
else:
m = None
addons_module[module] = m
addons_manifest[module] = manifest
statics['/%s/static' % module] = path_static
if statics:
_logger.info("HTTP Configuring static files")
app = werkzeug.wsgi.SharedDataMiddleware(self.dispatch, statics, cache_timeout=STATIC_CACHE)
self.dispatch = DisableCacheMiddleware(app)
def setup_session(self, httprequest):
# recover or create session
session_gc(self.session_store)
sid = httprequest.args.get('session_id')
explicit_session = True
if not sid:
sid = httprequest.headers.get("X-Openerp-Session-Id")
if not sid:
sid = httprequest.cookies.get('session_id')
explicit_session = False
if sid is None:
httprequest.session = self.session_store.new()
else:
httprequest.session = self.session_store.get(sid)
return explicit_session
def setup_db(self, httprequest):
db = httprequest.session.db
# Check if session.db is legit
if db:
if db not in db_filter([db], httprequest=httprequest):
_logger.warn("Logged into database '%s', but dbfilter "
"rejects it; logging session out.", db)
httprequest.session.logout()
db = None
if not db:
httprequest.session.db = db_monodb(httprequest)
def setup_lang(self, httprequest):
if not "lang" in httprequest.session.context:
lang = httprequest.accept_languages.best or "en_US"
lang = babel.core.LOCALE_ALIASES.get(lang, lang).replace('-', '_')
httprequest.session.context["lang"] = lang
def get_request(self, httprequest):
# deduce type of request
if httprequest.args.get('jsonp'):
return JsonRequest(httprequest)
if httprequest.mimetype in ("application/json", "application/json-rpc"):
return JsonRequest(httprequest)
else:
return HttpRequest(httprequest)
def get_response(self, httprequest, result, explicit_session):
if isinstance(result, Response) and result.is_qweb:
try:
result.flatten()
except(Exception), e:
if request.db:
result = request.registry['ir.http']._handle_exception(e)
else:
raise
if isinstance(result, basestring):
response = Response(result, mimetype='text/html')
else:
response = result
if httprequest.session.should_save:
if httprequest.session.rotate:
self.session_store.delete(httprequest.session)
httprequest.session.sid = self.session_store.generate_key()
httprequest.session.modified = True
self.session_store.save(httprequest.session)
# We must not set the cookie if the session id was specified using a http header or a GET parameter.
# There are two reasons to this:
# - When using one of those two means we consider that we are overriding the cookie, which means creating a new
# session on top of an already existing session and we don't want to create a mess with the 'normal' session
# (the one using the cookie). That is a special feature of the Session Javascript class.
# - It could allow session fixation attacks.
if not explicit_session and hasattr(response, 'set_cookie'):
response.set_cookie('session_id', httprequest.session.sid, max_age=90 * 24 * 60 * 60)
return response
def dispatch(self, environ, start_response):
"""
Performs the actual WSGI dispatching for the application.
"""
try:
httprequest = werkzeug.wrappers.Request(environ)
httprequest.app = self
explicit_session = self.setup_session(httprequest)
self.setup_db(httprequest)
self.setup_lang(httprequest)
request = self.get_request(httprequest)
def _dispatch_nodb():
try:
func, arguments = self.nodb_routing_map.bind_to_environ(request.httprequest.environ).match()
except werkzeug.exceptions.HTTPException, e:
return request._handle_exception(e)
request.set_handler(func, arguments, "none")
result = request.dispatch()
return result
with request:
db = request.session.db
if db:
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
try:
with openerp.tools.mute_logger('openerp.sql_db'):
ir_http = request.registry['ir.http']
except (AttributeError, psycopg2.OperationalError):
# psycopg2 error or attribute error while constructing
# the registry. That means the database probably does
# not exists anymore or the code doesnt match the db.
# Log the user out and fall back to nodb
request.session.logout()
result = _dispatch_nodb()
else:
result = ir_http._dispatch()
openerp.modules.registry.RegistryManager.signal_caches_change(db)
else:
result = _dispatch_nodb()
response = self.get_response(httprequest, result, explicit_session)
return response(environ, start_response)
except werkzeug.exceptions.HTTPException, e:
return e(environ, start_response)
def get_db_router(self, db):
if not db:
return self.nodb_routing_map
return request.registry['ir.http'].routing_map()
def db_list(force=False, httprequest=None):
dbs = dispatch_rpc("db", "list", [force])
return db_filter(dbs, httprequest=httprequest)
def db_filter(dbs, httprequest=None):
httprequest = httprequest or request.httprequest
h = httprequest.environ.get('HTTP_HOST', '').split(':')[0]
d, _, r = h.partition('.')
if d == "www" and r:
d = r.partition('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return dbs
def db_monodb(httprequest=None):
"""
Magic function to find the current database.
Implementation details:
* Magic
* More magic
Returns ``None`` if the magic is not magic enough.
"""
httprequest = httprequest or request.httprequest
dbs = db_list(True, httprequest)
# try the db already in the session
db_session = httprequest.session.db
if db_session in dbs:
return db_session
# if there is only one possible db, we take that one
if len(dbs) == 1:
return dbs[0]
return None
def send_file(filepath_or_fp, mimetype=None, as_attachment=False, filename=None, mtime=None,
add_etags=True, cache_timeout=STATIC_CACHE, conditional=True):
"""This is a modified version of Flask's send_file()
Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first.
:param filepath_or_fp: the filename of the file to send.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param filename: the filename for the attachment if it differs from the file's filename or
if using file object without 'name' attribute (eg: E-tags with StringIO).
:param mtime: last modification time to use for contitional response.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `False` to disable conditional responses.
:param cache_timeout: the timeout in seconds for the headers.
"""
if isinstance(filepath_or_fp, (str, unicode)):
if not filename:
filename = os.path.basename(filepath_or_fp)
file = open(filepath_or_fp, 'rb')
if not mtime:
mtime = os.path.getmtime(filepath_or_fp)
else:
file = filepath_or_fp
if not filename:
filename = getattr(file, 'name', None)
file.seek(0, 2)
size = file.tell()
file.seek(0)
if mimetype is None and filename:
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = werkzeug.datastructures.Headers()
if as_attachment:
if filename is None:
raise TypeError('filename unavailable, required for sending as attachment')
headers.add('Content-Disposition', 'attachment', filename=filename)
headers['Content-Length'] = size
data = wrap_file(request.httprequest.environ, file)
rv = Response(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
if isinstance(mtime, str):
try:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
mtime = datetime.datetime.strptime(mtime.split('.')[0], server_format)
except Exception:
mtime = None
if mtime is not None:
rv.last_modified = mtime
rv.cache_control.public = True
if cache_timeout:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time.time() + cache_timeout)
if add_etags and filename and mtime:
rv.set_etag('odoo-%s-%s-%s' % (
mtime,
size,
adler32(
filename.encode('utf-8') if isinstance(filename, unicode)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request.httprequest)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
#----------------------------------------------------------
# RPC controller
#----------------------------------------------------------
class CommonController(Controller):
@route('/jsonrpc', type='json', auth="none")
def jsonrpc(self, service, method, args):
""" Method used by client APIs to contact OpenERP. """
return dispatch_rpc(service, method, args)
@route('/gen_session_id', type='json', auth="none")
def gen_session_id(self):
nsession = root.session_store.new()
return nsession.sid
# register main wsgi handler
root = Root()
openerp.service.wsgi_server.register_wsgi_handler(root)
# vim:et:ts=4:sw=4:
| agpl-3.0 | 8,174,387,106,085,391,000 | 36.972637 | 178 | 0.581232 | false |
Densvin/RSSVK | vkfeed/utils.py | 1 | 3711 | '''Various utility functions.'''
from __future__ import unicode_literals
import calendar
import cgi
import datetime
import httplib
import logging
import os
import re
if not os.getenv('RSSvk_TESTS'):
import google.appengine.api.urlfetch as urlfetch
from google.appengine.ext.webapp import template
from vkfeed.core import Error
LOG = logging.getLogger(__name__)
class HTTPNotFoundError(Error):
'''Raised on HTTP Page Not Found error.'''
def __init__(self, *args, **kwargs):
Error.__init__(self, *args, **kwargs)
def fetch_url(url, content_type = 'text/html'):
'''Fetches the specified URL.'''
LOG.info('Fetching "%s"...', url)
try:
page = _fetch_url(url, headers = { 'Accept-Language': 'ru,en' })
except urlfetch.Error as e:
raise Error('Failed to fetch the page: {0}.', e)
else:
if page.status_code == httplib.OK:
LOG.info('"%s" has been successfully fetched.', url)
else:
error_class = HTTPNotFoundError if page.status_code == httplib.NOT_FOUND else Error
raise error_class('The server returned error: {0} ({1}).',
httplib.responses.get(page.status_code, 'Unknown error'), page.status_code)
content = page.content
for key in page.headers:
if key.lower() == 'content-type':
value, params = cgi.parse_header(page.headers[key])
if value != content_type:
raise Error('The server returned a page with invalid content type: {0}.', value)
if content_type.startswith('text/'):
for param in params:
if param.lower() == 'charset':
content_encoding = params[param]
break
else:
content_encoding = 'UTF-8'
try:
content = content.decode(content_encoding)
except UnicodeDecodeError:
raise Error('The server returned a page in invalid encoding.')
break
else:
raise Error('The server returned a page with missing content type.')
return content
def http_timestamp(date):
"""Returns a timestamp corresponding to the specified HTTP date.
FIXME: there is no support for timezone parsing in standard python
libraries. Thus, we are supporting a GMT zone only.
"""
for fmt in (
"%a, %d %b %Y %H:%M:%S GMT", # RFC 1123
"%a, %d %b %Y %H:%M:%S GMT+00:00", # RFC 1123
"%a, %d %b %Y %H:%M:%S +0000", # ???
"%A, %d-%b-%y %H:%M:%S GMT", # RFC 850
"%A, %d-%b-%y %H:%M:%S GMT+00:00", # RFC 850
"%a %b %d %H:%M:%S %Y" # asctime(3)
):
try:
timeo = datetime.datetime.strptime(date, fmt)
except ValueError:
continue
return calendar.timegm(datetime.datetime.utctimetuple(timeo))
raise Exception("Invalid HTTP date format")
def render_template(name, params = {}):
'''Renders the specified template.'''
return template.render(os.path.join('templates', name), params)
def zero_subscribers(user_agent):
'''Returns True if the feed has zero subscribers.'''
return re.search(r'[^0-9]0\s+(?:reader|subscriber)', user_agent, re.IGNORECASE) is not None
def _fetch_url(*args, **kwargs):
'''
Sometimes urlfetch.fetch() raises weird error 'ApplicationError: 5' when it
shouldn't. So this wrapper ignores errors and tries to fetch the URL again.
'''
tries = 3
while True:
try:
return urlfetch.fetch(*args, **kwargs)
except urlfetch.Error as e:
if tries <= 1:
raise e
tries -= 1
| bsd-2-clause | -4,000,278,410,973,525,000 | 28.452381 | 96 | 0.582592 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.