repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
alfa-addon/addon | plugin.video.alfa/lib/python_libtorrent/python_libtorrent/functions.py | 1 | 10908 | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from __future__ import absolute_import
from builtins import object
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import time
import xbmc, xbmcgui, xbmcaddon
from .net import HTTP
from core import filetools ### Alfa
from core import ziptools
from platformcode import config ### Alfa
#__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__= filetools.join(filetools.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '2.0.2' ### Alfa
__plugin__ = "python-libtorrent v.2.0.2" ### Alfa
__icon__= filetools.join(filetools.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
if PY3:
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGINFO )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGINFO )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGINFO )
else:
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager(object):
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=filetools.dirname(filetools.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
ver1 = 2
ver2 = 0
if ver1 > 1 or (ver1 == 1 and ver2 >= 2):
global __libbaseurl__
__libbaseurl__ = ['https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent', \
'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/downloads/libtorrent']
else:
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
def check_exist(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
for libname in get_libname(self.platform):
if not filetools.exists(filetools.join(self.dest_path, libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = filetools.join(self.dest_path, libname)
self.sizepath=filetools.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(filetools.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = filetools.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
ver1 = 2
ver2 = 0
if ver1 > 1 or (ver1 == 1 and ver2 >= 2):
global __libbaseurl__
__libbaseurl__ = ['https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent', \
'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/downloads/libtorrent']
else:
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
p_version = self.platform['version']
if PY3: p_version += '_PY3'
dest = filetools.join(self.dest_path, libname)
log("try to fetch %s/%s/%s" % (self.platform['system'], p_version, libname))
for url_lib in __libbaseurl__: ### Alfa
url = "%s/%s/%s/%s.zip" % (url_lib, self.platform['system'], p_version, libname)
url_size = "%s/%s/%s/%s.size.txt" % (url_lib, self.platform['system'], p_version, libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
response = self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
if response.code != 200: continue ### Alfa
response = self.http.fetch(url_size, download=dest + '.size.txt', progress=False) ### Alfa
log("%s -> %s" % (url_size, dest + '.size.txt'))
if response.code != 200: continue ### Alfa
try:
unzipper = ziptools.ziptools()
unzipper.extract("%s.zip" % dest, self.dest_path)
except:
xbmc.executebuiltin('Extract("%s.zip","%s")' % (dest, self.dest_path))
time.sleep(1)
if filetools.exists(dest):
filetools.remove(dest + ".zip")
except:
import traceback
text = 'Failed download %s!' % libname
log(text)
log(traceback.format_exc(1))
#xbmc.executebuiltin("Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
continue
else:
filetools.copy(filetools.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
#dest_alfa = filetools.join(filetools.translatePath(__settings__.getAddonInfo('Path')), \
# 'lib', libname) ### Alfa
#filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = filetools.join(filetools.translatePath(__settings__.getAddonInfo('Profile')), \
'bin', libname) ### Alfa
filetools.remove(dest_alfa, silent=True)
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
break
else:
return False
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
for libname in get_libname(self.platform):
libpath = filetools.join(self.dest_path, libname)
size = str(filetools.getsize(libpath))
new_libpath = filetools.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size = str(filetools.getsize(new_libpath))
if size != new_size:
res = filetools.remove(new_libpath, su=True)
if res:
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
res = filetools.copy(libpath, new_libpath, ch_mod='777', su=True) ### ALFA
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path
| gpl-3.0 | 4,003,463,386,131,299,000 | 50.201878 | 127 | 0.530992 | false | 4.033284 | false | false | false |
h4ck3rm1k3/ansible | v2/ansible/parsing/mod_args.py | 1 | 10144 | # (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
from types import NoneType
from ansible.errors import AnsibleParserError
from ansible.plugins import module_loader
from ansible.parsing.splitter import parse_kv
class ModuleArgsParser:
"""
There are several ways a module and argument set can be expressed:
# legacy form (for a shell command)
- action: shell echo hi
# common shorthand for local actions vs delegate_to
- local_action: shell echo hi
# most commonly:
- copy: src=a dest=b
# legacy form
- action: copy src=a dest=b
# complex args form, for passing structured data
- copy:
src: a
dest: b
# gross, but technically legal
- action:
module: copy
args:
src: a
dest: b
# extra gross, but also legal. in this case, the args specified
# will act as 'defaults' and will be overriden by any args specified
# in one of the other formats (complex args under the action, or
# parsed from the k=v string
- command: 'pwd'
args:
chdir: '/tmp'
This class has some of the logic to canonicalize these into the form
- module: <module_name>
delegate_to: <optional>
args: <args>
Args may also be munged for certain shell command parameters.
"""
def __init__(self, task_ds=dict()):
assert isinstance(task_ds, dict)
self._task_ds = task_ds
def _split_module_string(self, str):
'''
when module names are expressed like:
action: copy src=a dest=b
the first part of the string is the name of the module
and the rest are strings pertaining to the arguments.
'''
tokens = str.split()
if len(tokens) > 1:
return (tokens[0], " ".join(tokens[1:]))
else:
return (tokens[0], "")
def _handle_shell_weirdness(self, action, args):
'''
given an action name and an args dictionary, return the
proper action name and args dictionary. This mostly is due
to shell/command being treated special and nothing else
'''
# don't handle non shell/command modules in this function
# TODO: in terms of the whole app, should 'raw' also fit here?
if action not in ['shell', 'command']:
return (action, args)
# the shell module really is the command module with an additional
# parameter
if action == 'shell':
action = 'command'
args['_uses_shell'] = True
return (action, args)
def _normalize_parameters(self, thing, action=None, additional_args=dict()):
'''
arguments can be fuzzy. Deal with all the forms.
'''
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
final_args.update(additional_args)
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's an 'old style' invocation.
# otherwise, it's not
if action is not None:
args = self._normalize_old_style_args(thing, action)
else:
(action, args) = self._normalize_new_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
args = args['args']
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
final_args.update(args)
return (action, final_args)
def _normalize_old_style_args(self, thing, action):
'''
deals with fuzziness in old-style (action/local_action) module invocations
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'local_action' : 'shell echo hi' }
{ 'action' : 'shell echo hi' }
{ 'local_action' : { 'module' : 'ec2', 'x' : 1, 'y': 2 }}
standardized outputs like:
( 'command', { _raw_params: 'echo hi', _uses_shell: True }
'''
if isinstance(thing, dict):
# form is like: local_action: { module: 'xyz', x: 2, y: 3 } ... uncommon!
args = thing
elif isinstance(thing, string_types):
# form is like: local_action: copy src=a dest=b ... pretty common
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(thing, check_raw=check_raw)
elif isinstance(thing, NoneType):
# this can happen with modules which take no params, like ping:
args = None
else:
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return args
def _normalize_new_style_args(self, thing):
'''
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and always returning dictionaries
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'shell' : 'echo hi' }
{ 'ec2' : { 'region' : 'xyz' }
{ 'ec2' : 'region=xyz' }
standardized outputs like:
('ec2', { region: 'xyz'} )
'''
action = None
args = None
if isinstance(thing, dict):
# form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args
thing = thing.copy()
if 'module' in thing:
action = thing['module']
args = thing.copy()
del args['module']
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b ... common shorthand throughout ansible
(action, args) = self._split_module_string(thing)
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(args, check_raw=check_raw)
else:
# need a dict or a string, so giving up
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return (action, args)
def parse(self):
'''
Given a task in one of the supported forms, parses and returns
returns the action, arguments, and delegate_to values for the
task, dealing with all sorts of levels of fuzziness.
'''
thing = None
action = None
delegate_to = None
args = dict()
#
# We can have one of action, local_action, or module specified
#
# this is the 'extra gross' scenario detailed above, so we grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
# FIXME: add test cases for this
additional_args = self._task_ds.get('args', dict())
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
delegate_to = None
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
# local_action is similar but also implies a delegate_to
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# module: <stuff> is the more new-style invocation
# walk the input dictionary to see we recognize a module name
for (item, value) in iteritems(self._task_ds):
if item in module_loader or item == 'meta':
# finding more than one module name is a problem
if action is not None:
raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
action = item
thing = value
action, args = self._normalize_parameters(value, action=action, additional_args=additional_args)
# if we didn't see any module in the task at all, it's not a task really
if action is None:
raise AnsibleParserError("no action detected in task", obj=self._task_ds)
# FIXME: disabled for now, as there are other places besides the shell/script modules where
# having variables as the sole param for the module is valid (include_vars, add_host, and group_by?)
#elif args.get('_raw_params', '') != '' and action not in ('command', 'shell', 'script', 'include_vars'):
# raise AnsibleParserError("this task has extra params, which is only allowed in the command, shell or script module.", obj=self._task_ds)
# shell modules require special handling
(action, args) = self._handle_shell_weirdness(action, args)
return (action, args, delegate_to)
| gpl-3.0 | -3,236,003,832,605,205,000 | 35.489209 | 149 | 0.605185 | false | 4.283784 | false | false | false |
sgraham/nope | tools/telemetry/telemetry/value/__init__.py | 1 | 12490 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The Value hierarchy provides a way of representing the values measurements
produce such that they can be merged across runs, grouped by page, and output
to different targets.
The core Value concept provides the basic functionality:
- association with a page, may be none
- naming and units
- importance tracking [whether a value will show up on a waterfall or output
file by default]
- other metadata, such as a description of what was measured
- default conversion to scalar and string
- merging properties
A page may actually run a few times during a single telemetry session.
Downstream consumers of test results typically want to group these runs
together, then compute summary statistics across runs. Value provides the
Merge* family of methods for this kind of aggregation.
"""
import os
from telemetry.core import discover
from telemetry.core import util
# When combining a pair of Values togehter, it is sometimes ambiguous whether
# the values should be concatenated, or one should be picked as representative.
# The possible merging policies are listed here.
CONCATENATE = 'concatenate'
PICK_FIRST = 'pick-first'
# When converting a Value to its buildbot equivalent, the context in which the
# value is being interpreted actually affects the conversion. This is insane,
# but there you have it. There are three contexts in which Values are converted
# for use by buildbot, represented by these output-intent values.
PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
class Value(object):
"""An abstract value produced by a telemetry page test.
"""
def __init__(self, page, name, units, important, description,
interaction_record):
"""A generic Value object.
Args:
page: A Page object, may be given as None to indicate that the value
represents results for multiple pages.
name: A value name string, may contain a dot. Values from the same test
with the same prefix before the dot may be considered to belong to
the same chart.
units: A units string.
important: Whether the value is "important". Causes the value to appear
by default in downstream UIs.
description: A string explaining in human-understandable terms what this
value represents.
interaction_record: The string label of the TimelineInteractionRecord with
which this value is associated.
"""
# TODO(eakuefner): Check user story here after migration (crbug.com/442036)
if not isinstance(name, basestring):
raise ValueError('name field of Value must be string.')
if not isinstance(units, basestring):
raise ValueError('units field of Value must be string.')
if not isinstance(important, bool):
raise ValueError('important field of Value must be bool.')
if not ((description is None) or isinstance(description, basestring)):
raise ValueError('description field of Value must absent or string.')
if not ((interaction_record is None) or
isinstance(interaction_record, basestring)):
raise ValueError('interaction_record field of Value must absent or '
'string.')
self.page = page
self.name = name
self.units = units
self.important = important
self.description = description
self.interaction_record = interaction_record
def IsMergableWith(self, that):
return (self.units == that.units and
type(self) == type(that) and
self.important == that.important and
self.interaction_record == that.interaction_record)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
"""Combines the provided list of values into a single compound value.
When a page runs multiple times, it may produce multiple values. This
function is given the same-named values across the multiple runs, and has
the responsibility of producing a single result.
It must return a single Value. If merging does not make sense, the
implementation must pick a representative value from one of the runs.
For instance, it may be given
[ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
and it might produce
ListOfScalarValues(page, 'a', [1, 2])
"""
raise NotImplementedError()
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values,
group_by_name_suffix=False):
"""Combines the provided values into a single compound value.
When a full pageset runs, a single value_name will usually end up getting
collected for multiple pages. For instance, we may end up with
[ScalarValue(page1, 'a', 1),
ScalarValue(page2, 'a', 2)]
This function takes in the values of the same name, but across multiple
pages, and produces a single summary result value. In this instance, it
could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
Some results are so specific to a page that they make no sense when
aggregated across pages. If merging values of this type across pages is
non-sensical, this method may return None.
If group_by_name_suffix is True, then x.z and y.z are considered to be the
same value and are grouped together. If false, then x.z and y.z are
considered different.
"""
raise NotImplementedError()
def _IsImportantGivenOutputIntent(self, output_context):
if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
return False
elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
return self.important
elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
return self.important
def GetBuildbotDataType(self, output_context):
"""Returns the buildbot's equivalent data_type.
This should be one of the values accepted by perf_tests_results_helper.py.
"""
raise NotImplementedError()
def GetBuildbotValue(self):
"""Returns the buildbot's equivalent value."""
raise NotImplementedError()
def GetChartAndTraceNameForPerPageResult(self):
chart_name, _ = _ConvertValueNameToChartAndTraceName(self.name)
trace_name = self.page.display_name
return chart_name, trace_name
@property
def name_suffix(self):
"""Returns the string after a . in the name, or the full name otherwise."""
if '.' in self.name:
return self.name.split('.', 1)[1]
else:
return self.name
def GetChartAndTraceNameForComputedSummaryResult(
self, trace_tag):
chart_name, trace_name = (
_ConvertValueNameToChartAndTraceName(self.name))
if trace_tag:
return chart_name, trace_name + trace_tag
else:
return chart_name, trace_name
def GetRepresentativeNumber(self):
"""Gets a single scalar value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def GetRepresentativeString(self):
"""Gets a string value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
@staticmethod
def GetJSONTypeName():
"""Gets the typename for serialization to JSON using AsDict."""
raise NotImplementedError()
def AsDict(self):
"""Pre-serializes a value to a dict for output as JSON."""
return self._AsDictImpl()
def _AsDictImpl(self):
d = {
'name': self.name,
'type': self.GetJSONTypeName(),
'units': self.units,
'important': self.important
}
if self.description:
d['description'] = self.description
if self.interaction_record:
d['interaction_record'] = self.interaction_record
if self.page:
d['page_id'] = self.page.id
return d
def AsDictWithoutBaseClassEntries(self):
full_dict = self.AsDict()
base_dict_keys = set(self._AsDictImpl().keys())
# Extracts only entries added by the subclass.
return dict([(k, v) for (k, v) in full_dict.iteritems()
if k not in base_dict_keys])
@staticmethod
def FromDict(value_dict, page_dict):
"""Produces a value from a value dict and a page dict.
Value dicts are produced by serialization to JSON, and must be accompanied
by a dict mapping page IDs to pages, also produced by serialization, in
order to be completely deserialized. If deserializing multiple values, use
ListOfValuesFromListOfDicts instead.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
return Value.ListOfValuesFromListOfDicts([value_dict], page_dict)[0]
@staticmethod
def ListOfValuesFromListOfDicts(value_dicts, page_dict):
"""Takes a list of value dicts to values.
Given a list of value dicts produced by AsDict, this method
deserializes the dicts given a dict mapping page IDs to pages.
This method performs memoization for deserializing a list of values
efficiently, where FromDict is meant to handle one-offs.
values: a list of value dicts produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
value_dir = os.path.dirname(__file__)
value_classes = discover.DiscoverClasses(
value_dir, util.GetTelemetryDir(),
Value, index_by_class_name=True)
value_json_types = dict((value_classes[x].GetJSONTypeName(), x) for x in
value_classes)
values = []
for value_dict in value_dicts:
value_class = value_classes[value_json_types[value_dict['type']]]
assert 'FromDict' in value_class.__dict__, \
'Subclass doesn\'t override FromDict'
values.append(value_class.FromDict(value_dict, page_dict))
return values
@staticmethod
def GetConstructorKwArgs(value_dict, page_dict):
"""Produces constructor arguments from a value dict and a page dict.
Takes a dict parsed from JSON and an index of pages and recovers the
keyword arguments to be passed to the constructor for deserializing the
dict.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
d = {
'name': value_dict['name'],
'units': value_dict['units']
}
description = value_dict.get('description', None)
if description:
d['description'] = description
else:
d['description'] = None
page_id = value_dict.get('page_id', None)
if page_id:
d['page'] = page_dict[int(page_id)]
else:
d['page'] = None
d['important'] = False
interaction_record = value_dict.get('interaction_record', None)
if interaction_record:
d['interaction_record'] = interaction_record
else:
d['interaction_record'] = None
return d
def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
"""Mangles a trace name plus optional chart name into a standard string.
A value might just be a bareword name, e.g. numPixels. In that case, its
chart may be None.
But, a value might also be intended for display with other values, in which
case the chart name indicates that grouping. So, you might have
screen.numPixels, screen.resolution, where chartName='screen'.
"""
assert trace_name != 'url', 'The name url cannot be used'
if chart_name:
return '%s.%s' % (chart_name, trace_name)
else:
assert '.' not in trace_name, ('Trace names cannot contain "." with an '
'empty chart_name since this is used to delimit chart_name.trace_name.')
return trace_name
def _ConvertValueNameToChartAndTraceName(value_name):
"""Converts a value_name into the equivalent chart-trace name pair.
Buildbot represents values by the measurement name and an optional trace name,
whereas telemetry represents values with a chart_name.trace_name convention,
where chart_name is optional. This convention is also used by chart_json.
This converts from the telemetry convention to the buildbot convention,
returning a 2-tuple (measurement_name, trace_name).
"""
if '.' in value_name:
return value_name.split('.', 1)
else:
return value_name, value_name
| bsd-3-clause | 4,884,086,111,514,157,000 | 36.172619 | 80 | 0.703283 | false | 4.188464 | false | false | false |
loadimpact/loadimpact-server-metrics | li_metrics_agent_service.py | 1 | 2556 | #!/usr/bin/env python
# coding=utf-8
"""
Copyright 2012 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import li_metrics_agent
import threading
import win32service
import win32serviceutil
import win32event
import servicemanager
import sys
__author__ = "Load Impact"
__copyright__ = "Copyright 2012, Load Impact"
__license__ = "Apache License v2.0"
__version__ = "1.1.1"
__email__ = "[email protected]"
class AgentThread(threading.Thread):
def __init__(self):
super(AgentThread, self).__init__()
self.agent_loop = li_metrics_agent.AgentLoop()
def run(self):
self.agent_loop.run()
def stop(self):
self.agent_loop.stop()
class AgentService(win32serviceutil.ServiceFramework):
_svc_name_ = "LoadImpactServerMetricsAgent"
_svc_display_name_ = "Load Impact server metrics agent"
_svc_description_ = ("Agent for collecting and reporting server metrics "
"to loadimpact.com")
# init service framework
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# listen for a stop request
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
#import servicemanager
rc = None
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.agent = AgentThread()
self.agent.start()
# loop until the stop event fires
while rc != win32event.WAIT_OBJECT_0:
# block for 5 seconds and listen for a stop event
rc = win32event.WaitForSingleObject(self.hWaitStop, 1000)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.agent.stop()
self.agent.join()
win32event.SetEvent(self.hWaitStop)
if __name__ == '__main__':
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(AgentService)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(AgentService)
| apache-2.0 | -4,460,066,151,057,241,600 | 30.170732 | 77 | 0.687402 | false | 3.849398 | false | false | false |
alexliyu/CDMSYSTEM | firewall.py | 1 | 1273 | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
主程序入口
@author:alex
@date:15-2-13
@time:上午11:44
@contact:[email protected]
"""
__author__ = 'alex'
import sys
import os
import ConfigParser
import uuid
from subprocess import Popen, PIPE
from utils.heartbeat import HeartBeatManager
from utils.tools import *
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
def init(ini_file=None):
cf = ConfigParser.ConfigParser()
try:
if ini_file:
cf.read(ini_file)
else:
cf.read(os.path.join(PROJECT_PATH, "config.ini"))
redis_host = cf.get("REDIS", "IP")
redis_port = cf.getint("REDIS", "PORT")
listener_host = cf.get("LISTENING", "IP")
listener_port = cf.getint("LISTENING", "PORT")
except Exception, e:
print e
sys.exit(1)
print_info("REDIS端口 %s:%d" % (redis_host, redis_port))
print_info("监听心跳包端口 %s:%d" % (listener_host, listener_port))
print_info("开始运行白名单服务........")
server = HeartBeatManager(redis_host, redis_port, listener_host, listener_port)
server.run()
return True
if __name__ == "__main__":
if len(sys.argv) > 1:
init(sys.argv[1])
else:
init()
| mit | -4,446,706,550,151,536,000 | 21.62963 | 83 | 0.60311 | false | 2.909524 | false | false | false |
jadecastro/LTLMoP | src/lib/handlers/motionControl/RRTController.py | 1 | 37133 | #!/usr/bin/env python
"""
===================================================================
RRTController.py - Rapidly-Exploring Random Trees Motion Controller
===================================================================
Uses Rapidly-exploring Random Tree Algorithm to generate paths given the starting position and the goal point.
"""
from numpy import *
from __is_inside import *
import math
import sys,os
from scipy.linalg import norm
from numpy.matlib import zeros
import __is_inside
import time, sys,os
import scipy as Sci
import scipy.linalg
import Polygon, Polygon.IO
import Polygon.Utils as PolyUtils
import Polygon.Shapes as PolyShapes
from math import sqrt, fabs , pi
import random
import thread
import threading
# importing matplotlib to show the path if possible
try:
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import_matplotlib = True
except:
print "matplotlib is not imported. Plotting is disabled"
import_matplotlib = False
class motionControlHandler:
def __init__(self, proj, shared_data,robot_type,max_angle_goal,max_angle_overlap,plotting):
"""
Rapidly-Exploring Random Trees alogorithm motion planning controller
robot_type (int): Which robot is used for execution. BasicSim is 1, ODE is 2, ROS is 3, Nao is 4, Pioneer is 5(default=1)
max_angle_goal (float): The biggest difference in angle between the new node and the goal point that is acceptable. If it is bigger than the max_angle, the new node will not be connected to the goal point. The value should be within 0 to 6.28 = 2*pi. Default set to 6.28 = 2*pi (default=6.28)
max_angle_overlap (float): difference in angle allowed for two nodes overlapping each other. If you don't want any node overlapping with each other, put in 2*pi = 6.28. Default set to 1.57 = pi/2 (default=1.57)
plotting (bool): Check the box to enable plotting. Uncheck to disable plotting (default=True)
"""
self.system_print = False # for debugging. print on GUI ( a bunch of stuffs)
self.finish_print = False # set to 1 to print the original finished E and V before trimming the tree
self.orientation_print = False # show the orientation information of the robot
# Get references to handlers we'll need to communicate with
self.drive_handler = proj.h_instance['drive']
self.pose_handler = proj.h_instance['pose']
# Get information about regions
self.proj = proj
self.coordmap_map2lab = proj.coordmap_map2lab
self.coordmap_lab2map = proj.coordmap_lab2map
self.last_warning = 0
self.previous_next_reg = None
# Store the Rapidly-Exploring Random Tress Built
self.RRT_V = None # array containing all the points on the RRT Tree
self.RRT_E = None # array specifying the connection of points on the Tree
self.E_current_column = None # the current column on the tree (to find the current heading point)
self.Velocity = None
self.currentRegionPoly = None
self.nextRegionPoly = None
self.map = {}
self.all = Polygon.Polygon()
self.trans_matrix = mat([[0,1],[-1,0]]) # transformation matrix for find the normal to the vector
self.stuck_thres = 20 # threshold for changing the range of sampling omega
# Information about the robot (default set to ODE)
if robot_type not in [1,2,3,4,5]:
robot_type = 1
self.system = robot_type
# Information about maximum turning angle allowed from the latest node to the goal point
if max_angle_goal > 2*pi:
max_angle_goal = 2*pi
if max_angle_goal < 0:
max_angle_goal = 0
self.max_angle_allowed = max_angle_goal
# Information about maximum difference in angle allowed between two overlapping nodes
if max_angle_overlap > 2*pi:
max_angle_overlap = 2*pi
if max_angle_overlap < 0:
max_angle_overlap = 0
self.max_angle_overlap = max_angle_overlap
# Information about whether plotting is enabled.
if plotting is True and import_matplotlib == True:
self.plotting = True
else:
self.plotting = False
# Specify the size of the robot
# 1: basicSim; 2: ODE; 3: ROS 4: Nao; 5: Pioneer
# self.radius: radius of the robot
# self.timestep : number of linear segments to break the curve into for calculation of x, y position
# self.step_size : the length of each step for connection to goal point
# self.velocity : Velocity of the robot in m/s in control space (m/s)
if self.system == 1:
self.radius = 5
self.step_size = 25
self.timeStep = 10
self.velocity = 2 # 1.5
if self.system == 2:
self.radius = 5
self.step_size = 15
self.timeStep = 10
self.velocity = 2 # 1.5
elif self.system == 3:
self.ROSInitHandler = shared_data['ROS_INIT_HANDLER']
self.radius = self.ROSInitHandler.robotPhysicalWidth/2
self.step_size = self.radius*3 #0.2
self.timeStep = 8
self.velocity = self.radius/2 #0.08
elif self.system == 4:
self.radius = 0.15*1.2
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
elif self.system == 5:
self.radius = 0.15
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
# Operate_system (int): Which operating system is used for execution.
# Ubuntu and Mac is 1, Windows is 2
if sys.platform in ['win32', 'cygwin']:
self.operate_system = 2
else:
self.operate_system = 1
if self.system_print == True:
print "The operate_system is "+ str(self.operate_system)
# Generate polygon for regions in the map
for region in self.proj.rfi.regions:
self.map[region.name] = self.createRegionPolygon(region)
for n in range(len(region.holeList)): # no of holes
self.map[region.name] -= self.createRegionPolygon(region,n)
# Generate the boundary polygon
for regionName,regionPoly in self.map.iteritems():
self.all += regionPoly
# Start plotting if operating in Windows
if self.operate_system == 2 and self.plotting ==True:
# start using anmination to plot the robot
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.scope = _Scope(self.ax,self)
thread.start_new_thread(self.jplot,())
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return True
# Find our current configuration
pose = self.pose_handler.getPose()
# Check if Vicon has cut out
# TODO: this should probably go in posehandler?
if math.isnan(pose[2]):
print "WARNING: No Vicon data! Pausing."
self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part will be run when the robot goes to a new region, otherwise, the original tree will be used.
if not self.previous_next_reg == next_reg:
# Entered a new region. New tree should be formed.
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
if self.system_print == True:
print "next Region is " + str(self.proj.rfi.regions[next_reg].name)
print "Current Region is " + str(self.proj.rfi.regions[current_reg].name)
#set to zero velocity before tree is generated
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
# Determine the mid points on the faces connecting to the next region (one goal point will be picked among all the mid points later in buildTree)
transFace = None
q_gBundle = [[],[]] # list of goal points (midpoints of transition faces)
face_normal = [[],[]] # normal of the trnasition faces
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
#find the normal vector to the face
face = transFace[0,:] - transFace[1,:]
distance_face = norm(face)
normal = face/distance_face * self.trans_matrix
face_normal = hstack((face_normal,vstack((normal[0,0],normal[0,1]))))
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
# Run algorithm to build the Rapid-Exploring Random Trees
self.RRT_V = None
self.RRT_E = None
# For plotting
if self.operate_system == 2:
if self.plotting == True:
self.ax.cla()
else:
self.ax = None
else:
self.ax = None
if self.operate_system == 1 and self.plotting == True:
plt.cla()
self.plotMap(self.map)
plt.plot(pose[0],pose[1],'ko')
self.RRT_V,self.RRT_E,self.E_current_column = self.buildTree(\
[pose[0], pose[1]],pose[2],self.currentRegionPoly, self.nextRegionPoly,q_gBundle,face_normal)
"""
# map the lab coordinates back to pixels
V_tosend = array(mat(self.RRT_V[1:,:])).T
V_tosend = map(self.coordmap_lab2map, V_tosend)
V_tosend = mat(V_tosend).T
s = 'RRT:E'+"["+str(list(self.RRT_E[0]))+","+str(list(self.RRT_E[1]))+"]"+':V'+"["+str(list(self.RRT_V[0]))+","+str(list(V_tosend[0]))+","+str(list(V_tosend[1]))+"]"+':T'+"["+str(list(q_gBundle[0]))+","+str(list(q_gBundle[1]))+"]"
#print s
"""
# Run algorithm to find a velocity vector (global frame) to take the robot to the next region
self.Velocity = self.getVelocity([pose[0], pose[1]], self.RRT_V,self.RRT_E)
#self.Node = self.getNode([pose[0], pose[1]], self.RRT_V,self.RRT_E)
self.previous_next_reg = next_reg
# Pass this desired velocity on to the drive handler
self.drive_handler.setVelocity(self.Velocity[0,0], self.Velocity[1,0], pose[2])
#self.drive_handler.setVelocity(self.Node[0,0], self.Node[1,0], pose[2])
RobotPoly = Polygon.Shapes.Circle(self.radius,(pose[0],pose[1]))
# check if robot is inside the current region
departed = not self.currentRegionPoly.overlaps(RobotPoly)
arrived = self.nextRegionPoly.covers(RobotPoly)
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
print "I think I'm in " + r.name
print pose
break
self.last_warning = time.time()
#print "arrived:"+str(arrived)
return arrived
def createRegionPolygon(self,region,hole = None):
"""
This function takes in the region points and make it a Polygon.
"""
if hole == None:
pointArray = [x for x in region.getPoints()]
else:
pointArray = [x for x in region.getPoints(hole_id = hole)]
pointArray = map(self.coordmap_map2lab, pointArray)
regionPoints = [(pt[0],pt[1]) for pt in pointArray]
formedPolygon= Polygon.Polygon(regionPoints)
return formedPolygon
def getVelocity(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
#else:
# dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- vstack((V[1,E[0,self.E_current_column]],V[2,E[0,self.E_current_column]]))
Vel = zeros([2,1])
Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Vel
def getNode(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
Node = zeros([2,1])
Node[0,0] = V[1,E[1,self.E_current_column]]
Node[1,0] = V[2,E[1,self.E_current_column]]
#Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Node
def buildTree(self,p,theta,regionPoly,nextRegionPoly,q_gBundle,face_normal, last=False):
"""
This function builds the RRT tree.
p : x,y position of the robot
theta : current orientation of the robot
regionPoly : current region polygon
nextRegionPoly : next region polygon
q_gBundle : coordinates of q_goals that the robot can reach
face_normal : the normal vector of each face corresponding to each goal point in q_gBundle
"""
q_init = mat(p).T
V = vstack((0,q_init))
theta = self.orientation_bound(theta)
V_theta = array([theta])
#!!! CONTROL SPACE: generate a list of omega for random sampling
omegaLowerBound = -math.pi/20 # upper bound for the value of omega
omegaUpperBound = math.pi/20 # lower bound for the value of omega
omegaNoOfSteps = 20
self.omega_range = linspace(omegaLowerBound,omegaUpperBound,omegaNoOfSteps)
self.omega_range_escape = linspace(omegaLowerBound*4,omegaUpperBound*4,omegaNoOfSteps*4) # range used when stuck > stuck_thres
regionPolyOld = Polygon.Polygon(regionPoly)
regionPoly += PolyShapes.Circle(self.radius*2.5,(q_init[0,0],q_init[1,0]))
# check faces of the current region for goal points
E = [[],[]] # the tree matrix
Other = [[],[]]
path = False # if path formed then = 1
stuck = 0 # count for changing the range of sampling omega
append_after_latest_node = False # append new nodes to the latest node
if self.system_print == True:
print "plotting in buildTree is " + str(self.plotting)
if self.plotting == True:
if not plt.isinteractive():
plt.ion()
plt.hold(True)
while not path:
#step -1: try connection to q_goal (generate path to goal)
i = 0
if self.system_print == True:
print "Try Connection to the goal points"
# pushing possible q_goals into the current region (ensure path is covered by the current region polygon)
q_pass = [[],[],[]]
q_pass_dist = []
q_gBundle = mat(q_gBundle)
face_normal = mat(face_normal)
while i < q_gBundle.shape[1]:
q_g_original = q_gBundle[:,i]
q_g = q_gBundle[:,i]+face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#q_g = q_gBundle[:,i]+(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
if not regionPolyOld.isInside(q_g[0],q_g[1]):
#q_g = q_gBundle[:,i]-(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
q_g = q_gBundle[:,i]-face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#forming polygon for path checking
EdgePolyGoal = PolyShapes.Circle(self.radius,(q_g[0,0],q_g[1,0])) + PolyShapes.Circle(self.radius,(V[1,shape(V)[1]-1],V[2:,shape(V)[1]-1]))
EdgePolyGoal = PolyUtils.convexHull(EdgePolyGoal)
dist = norm(q_g - V[1:,shape(V)[1]-1])
#check connection to goal
connect_goal = regionPoly.covers(EdgePolyGoal) #check coverage of path from new point to goal
# compare orientation difference
thetaPrev = V_theta[shape(V)[1]-1]
theta_orientation = abs(arctan((q_g[1,0]- V[2,shape(V)[1]-1])/(q_g[0,0]- V[1,shape(V)[1]-1])))
if q_g[1,0] > V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: # second quadrant
theta_orientation = pi - theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # first quadrant
theta_orientation = theta_orientation
elif q_g[1,0] < V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: #third quadrant
theta_orientation = pi + theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # foruth quadrant
theta_orientation = 2*pi - theta_orientation
# check the angle between vector(new goal to goal_original ) and vector( latest node to new goal)
Goal_to_GoalOriginal = q_g_original - q_g
LatestNode_to_Goal = q_g - V[1:,shape(V)[1]-1]
Angle_Goal_LatestNode= arccos(vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))/norm(Goal_to_GoalOriginal)/norm(LatestNode_to_Goal))
# if connection to goal can be established and the max change in orientation of the robot is smaller than max_angle, tree is said to be completed.
if self.orientation_print == True:
print "theta_orientation is " + str(theta_orientation)
print "thetaPrev is " + str(thetaPrev)
print "(theta_orientation - thetaPrev) is " + str(abs(theta_orientation - thetaPrev))
print "self.max_angle_allowed is " + str(self.max_angle_allowed)
print "abs(theta_orientation - thetaPrev) < self.max_angle_allowed" + str(abs(theta_orientation - thetaPrev) < self.max_angle_allowed)
print"Goal_to_GoalOriginal: " + str( array(Goal_to_GoalOriginal)) + "; LatestNode_to_Goal: " + str( array(LatestNode_to_Goal))
print vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))
print "Angle_Goal_LatestNode" + str(Angle_Goal_LatestNode)
if connect_goal and (abs(theta_orientation - thetaPrev) < self.max_angle_allowed) and (Angle_Goal_LatestNode < self.max_angle_allowed):
path = True
q_pass = hstack((q_pass,vstack((i,q_g))))
q_pass_dist = hstack((q_pass_dist,dist))
i = i + 1
if self.system_print == True:
print "checked goal points"
self.E = E
self.V = V
# connection to goal has established
# Obtain the closest goal point that path can be formed.
if path:
if shape(q_pass_dist)[0] == 1:
cols = 0
else:
(cols,) = nonzero(q_pass_dist == min(q_pass_dist))
cols = asarray(cols)[0]
q_g = q_pass[1:,cols]
"""
q_g = q_g-(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*3*self.radius #org 3
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g+(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*6*self.radius #org 3
"""
if self.plotting == True :
if self.operate_system == 1:
plt.suptitle('Rapidly-exploring Random Tree', fontsize=12)
plt.xlabel('x')
plt.ylabel('y')
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
if shape(V)[1] <= 2:
self.ax.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
self.ax.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
# trim the path connecting current node to goal point into pieces if the path is too long now
numOfPoint = floor(norm(V[1:,shape(V)[1]-1]- q_g)/self.step_size)
if numOfPoint < 3:
numOfPoint = 3
x = linspace( V[1,shape(V)[1]-1], q_g[0,0], numOfPoint )
y = linspace( V[2,shape(V)[1]-1], q_g[1,0], numOfPoint )
for i in range(x.shape[0]):
if i != 0:
V = hstack((V,vstack((shape(V)[1],x[i],y[i]))))
E = hstack((E,vstack((shape(V)[1]-2,shape(V)[1]-1))))
#push the goal point to the next region
q_g = q_g+face_normal[:,q_pass[0,cols]]*3*self.radius ##original 2*self.radius
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g-face_normal[:,q_pass[0,cols]]*6*self.radius ##original 2*self.radius
V = hstack((V,vstack((shape(V)[1],q_g[0,0],q_g[1,0]))))
E = hstack((E,vstack((shape(V)[1]-2 ,shape(V)[1]-1))))
if self.plotting == True :
if self.operate_system == 1:
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
plt.figure(1).canvas.draw()
else:
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
self.ax.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
# path is not formed, try to append points onto the tree
if not path:
# connection_to_tree : connection to the tree is successful
if append_after_latest_node:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode(V,V_theta,E,Other,regionPoly,stuck, append_after_latest_node)
else:
connection_to_tree = False
while not connection_to_tree:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode (V,V_theta,E,Other,regionPoly,stuck)
if self.finish_print:
print 'Here is the V matrix:', V, 'Here is the E matrix:',E
print >>sys.__stdout__, 'Here is the V matrix:\n', V, '\nHere is the E matrix:\n',E
#B: trim to a single path
single = 0
while single == 0:
trim = 0
for j in range(shape(V)[1]-3):
(row,col) = nonzero(E == j+1)
if len(col) == 1:
E = delete(E, col[0], 1)
trim = 1
if trim == 0:
single = 1;
####print with matlib
if self.plotting ==True :
if self.operate_system == 1:
plt.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
plt.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
plt.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
self.ax.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
self.ax.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
self.ax.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
#return V, E, and the current node number on the tree
V = array(V)
return V, E, 0
def generateNewNode(self,V,V_theta,E,Other,regionPoly,stuck,append_after_latest_node =False):
"""
Generate a new node on the current tree matrix
V : the node matrix
V_theta : the orientation matrix
E : the tree matrix (or edge matrix)
Other : the matrix containing the velocity and angular velocity(omega) information
regionPoly: the polygon of current region
stuck : count on the number of times failed to generate new node
append_after_latest_node : append new nodes to the latest node (True only if the previous node addition is successful)
"""
if self.system_print == True:
print "In control space generating path,stuck = " + str(stuck)
connection_to_tree = False # True when connection to the tree is successful
if stuck > self.stuck_thres:
# increase the range of omega since path cannot ge generated
omega = random.choice(self.omega_range_escape)
else:
#!!!! CONTROL SPACE STEP 1 - generate random omega
omega = random.choice(self.omega_range)
#!!!! CONTROL SPACE STEP 2 - pick a random point on the tree
if append_after_latest_node:
tree_index = shape(V)[1]-1
else:
if random.choice([1,2]) == 1:
tree_index = random.choice(array(V[0])[0])
else:
tree_index = shape(V)[1]-1
xPrev = V[1,tree_index]
yPrev = V[2,tree_index]
thetaPrev = V_theta[tree_index]
j = 1
#!!!! CONTROL SPACE STEP 3 - Check path of the robot
path_robot = PolyShapes.Circle(self.radius,(xPrev,yPrev))
while j <= self.timeStep:
xOrg = xPrev
yOrg = yPrev
xPrev = xPrev + self.velocity/omega*(sin(omega* 1 + thetaPrev)-sin(thetaPrev))
yPrev = yPrev - self.velocity/omega*(cos(omega* 1 + thetaPrev)-cos(thetaPrev))
thetaPrev = omega* 1 + thetaPrev
path_robot = path_robot + PolyShapes.Circle(self.radius,(xPrev,yPrev))
j = j + 1
thetaPrev = self.orientation_bound(thetaPrev)
path_all = PolyUtils.convexHull(path_robot)
in_bound = regionPoly.covers(path_all)
"""
# plotting
if plotting == True:
self.plotPoly(path_all,'r',1)
"""
stuck = stuck + 1
if in_bound:
robot_new_node = PolyShapes.Circle(self.radius,(xPrev,yPrev))
# check how many nodes on the tree does the new node overlaps with
nodes_overlap_count = 0
for k in range(shape(V)[1]-1):
robot_old_node = PolyShapes.Circle(self.radius,(V[1,k],V[2,k]))
if robot_new_node.overlaps(robot_old_node):
if abs(thetaPrev - V_theta[k]) < self.max_angle_overlap:
nodes_overlap_count += 1
if nodes_overlap_count == 0 or (stuck > self.stuck_thres+1 and nodes_overlap_count < 2) or (stuck > self.stuck_thres+500):
if stuck > self.stuck_thres+1:
append_after_latest_node = False
if (stuck > self.stuck_thres+500):
stuck = 0
stuck = stuck - 20
# plotting
if self.plotting == True:
self.plotPoly(path_all,'b',1)
if self.system_print == True:
print "node connected"
V = hstack((V,vstack((shape(V)[1],xPrev,yPrev))))
V_theta = hstack((V_theta,thetaPrev))
E = hstack((E,vstack((tree_index ,shape(V)[1]-1))))
Other = hstack((Other,vstack((self.velocity,omega))))
##################### E should add omega and velocity
connection_to_tree = True
append_after_latest_node = True
else:
append_after_latest_node = False
if self.system_print == True:
print "node not connected. check goal point"
else:
append_after_latest_node = False
return V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree
def orientation_bound(self,theta):
"""
make sure the returned angle is between 0 to 2*pi
"""
while theta > 2*pi or theta < 0:
if theta > 2*pi:
theta = theta - 2*pi
else:
theta = theta + 2*pi
return theta
def plotMap(self,mappedRegions):
"""
Plotting regions and obstacles with matplotlib.pyplot
number: figure number (see on top)
"""
#if not plt.isinteractive():
# plt.ion()
#plt.hold(True)
if self.operate_system == 1:
for regionName,regionPoly in mappedRegions.iteritems():
self.plotPoly(regionPoly,'k')
plt.figure(1).canvas.draw()
def plotPoly(self,c,string,w = 1):
"""
Plot polygons inside the boundary
c = polygon to be plotted with matlabplot
string = string that specify color
w = width of the line plotting
"""
if bool(c):
for i in range(len(c)):
#toPlot = Polygon.Polygon(c.contour(i))
toPlot = Polygon.Polygon(c.contour(i)) & self.all
if bool(toPlot):
for j in range(len(toPlot)):
#BoundPolyPoints = asarray(PolyUtils.pointList(toPlot.contour(j)))
BoundPolyPoints = asarray(PolyUtils.pointList(Polygon.Polygon(toPlot.contour(j))))
if self.operate_system == 2:
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
self.ax.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
else:
plt.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
plt.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
plt.figure(1).canvas.draw()
def data_gen(self):
#self.ax.cla()
for regionName,regionPoly in self.map.iteritems():
self.plotPoly(regionPoly,'k')
"""
#for i in range(len(self.V)):
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
"""
pose = self.pose_handler.getPose()
self.ax.plot(pose[0],pose[1],'bo')
"""
self.ax.plot(self.q_g[0],self.q_g[1],'ro')
self.plotPoly(self.overlap,'g')
self.plotPoly(self.m_line,'b')
"""
yield(pose[0],pose[1])
"""
self.ax.plot(self.prev_follow[0],self.prev_follow[1],'ko')
"""
def jplot(self):
ani = animation.FuncAnimation(self.fig, self.scope.update, self.data_gen)
plt.show()
class _Scope:
def __init__(self, ax, motion, maxt=2, dt=0.02):
self.i = 0
self.ax = ax
self.line, = self.ax.plot(1)
self.ax.set_ylim(0, 1)
self.motion = motion
def update(self,data):
(data1) = self.motion.data_gen()
a = data1.next()
self.line.set_data(a)
self.ax.relim()
self.ax.autoscale()
return self.line,
| gpl-3.0 | -7,881,769,386,482,705,000 | 45.484655 | 300 | 0.529798 | false | 3.654463 | true | false | false |
conda/kapsel | examples/quote_api/quote.py | 1 | 4575 | from argparse import ArgumentParser
import falcon
import gunicorn.app.base
import json
import multiprocessing
import sys
# A Falcon resource that returns the same quote every time
class QuoteResource(object):
def on_get(self, req, resp):
"""Handles GET requests"""
quote = {'quote': 'I\'ve always been more interested in the future than in the past.', 'author': 'Grace Hopper'}
resp.body = json.dumps(quote)
# A Falcon resource that explains what this server is
class IndexResource(object):
def __init__(self, prefix):
self.prefix = prefix
def on_get(self, req, resp):
"""Handles GET requests"""
resp.body = """
<html>
<head>
<title>Quote API Server</title>
</head>
<body>
<p>This is a toy JSON API server example.</p>
<p>Make a GET request to <a href="%s/quote">%s/quote</a></p>
</body>
</html>
""" % (self.prefix, self.prefix)
resp.content_type = "text/html"
resp.status = falcon.HTTP_200
# A Falcon middleware to implement validation of the Host header in requests
class HostFilter(object):
def __init__(self, hosts):
# falcon strips the port out of req.host, even if it isn't 80.
# This is probably a bug in Falcon, so we work around it here.
self.hosts = [falcon.util.uri.parse_host(host)[0] for host in hosts]
def process_request(self, req, resp):
# req.host has the port stripped from what the browser
# sent us, even when it isn't 80, which is probably a bug
# in Falcon. We deal with that in __init__ by removing
# ports from self.hosts.
if req.host not in self.hosts:
print("Attempted request with Host header '%s' denied" % req.host)
raise falcon.HTTPForbidden("Bad Host header", "Cannot connect via the provided hostname")
# the gunicorn application
class QuoteApplication(gunicorn.app.base.BaseApplication):
def __init__(self, port, prefix, hosts):
assert prefix is not None
assert port is not None
self.application = falcon.API(middleware=HostFilter(hosts))
# add_route is pedantic about this
if prefix != '' and not prefix.startswith("/"):
prefix = "/" + prefix
self.application.add_route(prefix + '/quote', QuoteResource())
self.application.add_route(prefix + "/", IndexResource(prefix))
self.port = port
super(QuoteApplication, self).__init__()
print("Only connections via these hosts are allowed: " + repr(hosts))
print("Starting API server. Try http://localhost:%s%s" % (self.port, prefix + '/quote'))
def load_config(self):
# Note that --kapsel-host is NOT this address; it is NOT
# the address to listen on. --kapsel-host specifies the
# allowed values of the Host header in an http request,
# which is totally different. Another way to put it is
# that --kapsel-host is the public hostname:port browsers will
# be connecting to.
self.cfg.set('bind', '%s:%s' % ('0.0.0.0', self.port))
self.cfg.set('workers', (multiprocessing.cpu_count() * 2) + 1)
def load(self):
return self.application
# arg parser for the standard kapsel options
parser = ArgumentParser(prog="quote-api", description="API server that returns a quote.")
parser.add_argument('--kapsel-host', action='append', help='Hostname to allow in requests')
parser.add_argument('--kapsel-no-browser', action='store_true', default=False, help='Disable opening in a browser')
parser.add_argument('--kapsel-use-xheaders',
action='store_true',
default=False,
help='Trust X-headers from reverse proxy')
parser.add_argument('--kapsel-url-prefix', action='store', default='', help='Prefix in front of urls')
parser.add_argument('--kapsel-port', action='store', default='8080', help='Port to listen on')
parser.add_argument('--kapsel-iframe-hosts',
action='append',
help='Space-separated hosts which can embed us in an iframe per our Content-Security-Policy')
if __name__ == '__main__':
# This app accepts but ignores --kapsel-no-browser because we never bother to open a browser,
# and accepts but ignores --kapsel-iframe-hosts since iframing an API makes no sense.
args = parser.parse_args(sys.argv[1:])
if not args.kapsel_host:
args.kapsel_host = ['localhost:' + args.kapsel_port]
QuoteApplication(port=args.kapsel_port, prefix=args.kapsel_url_prefix, hosts=args.kapsel_host).run()
| bsd-3-clause | -1,188,982,769,034,152,700 | 41.757009 | 120 | 0.65071 | false | 3.762336 | false | false | false |
parpg/parpg | tools/utilities/convert_dialogue.py | 1 | 3506 | #!/usr/bin/env python
"""Convert YAML dialogue files from the Techdemo1 format to the new Techdemo2
format.
@author: M. George Hansen <[email protected]>
"""
import os.path
import sys
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import shutil
import logging
from optparse import OptionParser
from parpg.dialogueparsers import (OldYamlDialogueParser, YamlDialogueParser,
DialogueFormatError)
def backup_file(filepath):
dirpath = os.path.dirname(filepath)
filename = os.path.basename(filepath)
shutil.copy2(filepath, os.path.join(dirpath,
'.'.join([filename, 'backup'])))
def convert_dialogue_file(filepath, backup):
logging.info('processing {0}...'.format(filepath))
dummy, extension = os.path.splitext(filepath)
if (not extension == '.yaml'):
logging.info(' skipping {0}: not a yaml file'.format(filepath))
return 1
with file(filepath, 'r') as dialogue_file:
old_parser = OldYamlDialogueParser()
new_parser = YamlDialogueParser()
try:
dialogue = old_parser.load(dialogue_file)
except DialogueFormatError as error:
logging.info(
' unable to convert {0}: unrecognized dialogue format'
.format(filepath)
)
return 1
if (backup):
backup_file(filepath)
logging.info(' backed up {0} as {0}.backup'.format(filepath))
with file(filepath, 'w') as dialogue_file:
new_parser.dump(dialogue, dialogue_file)
logging.info(' successfully converted {0}!'.format(filepath))
usage_message = '''\
usage: convert_dialogue.py [-h] [-n] [-v] [-q] file_or_dir
Convert YAML dialogue files written in Techdemo1 syntax to the new Techdemo2
syntax.
If the file_or_dir argument is a directory, then this script will attempt to
convert all .yaml files in the directory that contain valid dialogues.
By default all processed files are first backed up by adding a ".backup" suffix
to the filename + extension. Backups can be disabled by passing the -n option
to the script.
'''
def main(argv=sys.argv):
# Options.
backup = True
logging_level = logging.WARNING
parser = OptionParser(usage=usage_message,
description="Convert YAML dialogue files written "
"in Techdemo1 syntax to the new "
"Techdemo2 syntax.")
parser.add_option('-n', '--no-backup', default=True)
parser.add_option('-v', '--verbose', action='count', default=0)
parser.add_option('-q', '--quiet', action='count', default=0)
opts, args = parser.parse_args()
verbosity = opts.verbose * 10
quietness = - (opts.quiet * 10)
logging_level += (verbosity + quietness)
logging.basicConfig(format='%(message)s', level=logging_level)
try:
path = args[0]
except IndexError:
parser.print_help()
sys.exit(1)
if (os.path.isdir(path)):
for filepath in os.listdir(path):
qualified_filepath = os.path.join(path, filepath)
if (not os.path.isfile(qualified_filepath)):
continue
convert_dialogue_file(qualified_filepath, backup=backup)
else:
convert_dialogue_file(path, backup=backup)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,915,518,116,129,695,200 | 34.77551 | 79 | 0.617513 | false | 3.926092 | false | false | false |
hoaibang07/Webscrap | transcripture/sources/crawler_chuongthieu.py | 1 | 7017 | # -*- encoding: utf-8 -*-
import io
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import urllib2
import urlparse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import os.path
zenmatePath = "/home/hbc/.mozilla/firefox/yeyuaq0s.default/extensions/[email protected]"
ffprofile = webdriver.FirefoxProfile()
# ffprofile.set_preference("javascript.enabled", False)
# ffprofile.set_preference('permissions.default.image', 2)
# ffprofile.set_preference('permissions.default.stylesheet', 2)
# ffprofile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
ffprofile.add_extension(zenmatePath)
ffprofile.add_extension('/home/hbc/Downloads/quickjava-2.0.6-fx.xpi')
ffprofile.set_preference("thatoneguydotnet.QuickJava.curVersion", "2.0.6.1") ## Prevents loading the 'thank you for installing screen'
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Images", 2) ## Turns images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.AnimatedImage", 2) ## Turns animated images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.CSS", 2) ## CSS
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Cookies", 2) ## Cookies
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Flash", 2) ## Flash
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Java", 2) ## Java
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.JavaScript", 2) ## JavaScript
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Silverlight", 2) ## Silverlight
driver = webdriver.Firefox(ffprofile)
def _remove_div_vdx(soup):
for div in soup.find_all('div', class_='vidx'):
div.extract()
return soup
def get_data(urlchuong_list, i):
filename = 'urlsach/data/bosung/sach' + str(i) + '.txt'
ftmp = io.open(filename, 'w', encoding='utf-8')
try:
# hdrs = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Connection': 'keep-alive',
# 'Cookie': 'ipq_lip=20376774; ipq_set=1453874029; __atuvc=2%7C4; __utma=126044488.676620502.1453787537.1453787537.1453787537.1; __utmz=126044488.1453787537.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); PHPSESSID=ed3f4874b92a29b6ed036adfa5ad6fb3; ipcountry=us',
# 'Host': 'www.transcripture.com',
# 'Referer': 'http://www.transcripture.com/vietnamese-spanish-genesis-1.html',
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:43.0) Gecko/20100101 Firefox/43.0'
# }
count = 1
for urlchuong in urlchuong_list:
print('Dang get chuong %d, sach %d'%(count,i))
# urlchuong = 'http://www.transcripture.com/vietnamese-chinese-revelation-3.html'
# print urlchuong
# # create request
# req = urllib2.Request(urlchuong, headers=hdrs)
# # get response
# response = urllib2.urlopen(req)
# soup = BeautifulSoup(response.read())
# Load a page
driver.get(urlchuong)
# delay = 40 # seconds
# try:
# wait = WebDriverWait(driver, delay)
# path = '/html/body/center/div[1]/div[2]/div[4]/table/tbody/tr[2]/td[1]/div/div[1]/form[1]/select/option[66]'
# elem = driver.find_element_by_xpath(path)
# wait.until(EC.visibility_of(elem))
# print "Page is ready!"
# except TimeoutException:
# print "Loading took too much time!"
# #reload page
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.ESCAPE)
# body.send_keys(Keys.F5)
content = driver.page_source
soup = BeautifulSoup(content)
soup = _remove_div_vdx(soup)
# print soup
table_tag = soup.find_all('table', attrs={'width':'100%', 'cellspacing':'0'})[0]
tr_tags = table_tag.find_all('tr')
_len = len(tr_tags)
# in first tr tag:
h2_class = tr_tags[0].find_all('h2', class_='cphd')
ftmp.write(u'' + h2_class[0].get_text() + '|')
ftmp.write(u'' + h2_class[1].get_text() + '\n')
# print table_tag
for x in xrange(1,_len):
data = tr_tags[x].get_text('|')
# print data
# url_ec = url.encode('unicode','utf-8')
ftmp.write(u'' + data + '\n')
count = count + 1
# close file
ftmp.close()
except Exception, e:
print e
# close file
ftmp.close()
def check_numline(filename):
urlsach_list = []
urlsach_file = open(filename, 'r')
for line in urlsach_file:
urlsach_list.append(line.strip())
_len = len(urlsach_list)
return _len
def getsttchuongthieu(sachi):
list_stt = []
urlsach = 'urlsach/sach' + str(sachi) + '.txt'
#kiem tra so dong cua url sach, tuong ung voi so chuong
numline = check_numline(urlsach)
fname = 'urlsach/data/partcomplete/sach' + str(sachi) + '.txt'
#doc data tu file sach data
data = open(fname).read()
#kiem tra xem moi dong trong file sach data da co chuong cac so nay chua
for i in xrange(1,numline + 1):
key = str(i)
# print ('da chay den day')
if key not in data:
list_stt.append(i)
return list_stt
def getlisturlchuongthieu(sachi):
list_chuongthieu = []
list_stt = getsttchuongthieu(sachi)
fname = 'urlsach/sach' + str(sachi) + '.txt'
fp = open(fname)
lines=fp.readlines()
for stt in list_stt:
list_chuongthieu.append(lines[stt-1])
return list_chuongthieu
def main():
for x in xrange(1,67):
#kiem tra xem duong dan co trong thu muc partcomplete hay khong
f2name = 'urlsach/data/partcomplete/sach' + str(x) + '.txt'
if os.path.isfile(f2name):
list_urlchuongthieu = getlisturlchuongthieu(x)
get_data(list_urlchuongthieu, x)
if __name__ == '__main__':
# driver = webdriver.Firefox()
driver.get("about:blank")
# open new tab
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.CONTROL + 't')
# time.sleep(15)
print('Nhap vao mot ky tu bat ky de tiep tuc chuong trinh')
key = raw_input()
main()
# close the tab
driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 'w')
driver.close()
# urlchuong_list = ['http://www.transcripture.com/vietnamese-chinese-exodus-1.html']
# get_data(urlchuong_list, 2) | gpl-2.0 | -8,064,880,054,819,888,000 | 35.936842 | 282 | 0.619353 | false | 3.148048 | false | false | false |
tonnrueter/pymca_devel | PyMca/EPDL97/GenerateEADLShellNonradiativeRates.py | 1 | 6235 | __doc__= "Generate specfiles with EADL97 shell transition probabilities"
import os
import sys
import EADLParser
Elements = ['H', 'He',
'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe',
'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo',
'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn',
'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce',
'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy',
'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb',
'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th',
'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf',
'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg',
'Bh', 'Hs', 'Mt']
def getHeader(filename):
text = '#F %s\n' % filename
text += '#U00 This file is a conversion to specfile format of \n'
text += '#U01 directly extracted EADL97 nonradiative transition probabilities.\n'
text += '#U02 EADL itself can be found at:\n'
text += '#U03 http://www-nds.iaea.org/epdl97/libsall.htm\n'
text += '#U04 The code used to generate this file has been:\n'
text += '#U05 %s\n' % os.path.basename(__file__)
text += '#U06\n'
text += '\n'
return text
shellList = EADLParser.getBaseShellList()
workingShells = ['K', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5']
for shell in workingShells:
fname = "EADL97_%sShellNonradiativeRates.dat" % shell[0]
print("fname = %s" % fname)
if shell in ['K', 'L1', 'M1']:
if os.path.exists(fname):
os.remove(fname)
nscan = 0
outfile = open(fname, 'wb')
tmpText = getHeader(fname)
if sys.version < '3.0':
outfile.write(tmpText)
else:
outfile.write(tmpText.encode('UTF-8'))
nscan += 1
for i in range(1,101):
print("Z = %d, Element = %s" % (i, Elements[i-1]))
element = Elements[i-1]
ddict = {}
for key0 in shellList:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
if shell in [key0.split()[0], key1.split()[0]]:
continue
ddict[key] = [0.0, 0.0]
try:
ddict = EADLParser.getNonradiativeTransitionProbabilities(\
Elements.index(element)+1,
shell=shell)
print("%s Shell nonradiative emission probabilities " % shell)
except IOError:
#This happens when reading elements not presenting the transitions
pass
#continue
if i == 1:
#generate the labels
nTransitions = 0
tmpText = '#L Z TOTAL'
for key0 in workingShells:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
tmpText += ' %s' % (key)
nTransitions += 1
text = '#S %d %s-Shell nonradiative rates\n' % (nscan, shell)
text += '#N %d\n' % (2 + nTransitions)
text += tmpText + '\n'
else:
text = ''
# this loop calculates the totals, because it cannot be deduced from the subset
# transitions written in the file
total = 0.0
for key0 in shellList:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
total += ddict.get(key, [0.0, 0.0])[0]
text += '%d %.7E' % (i, total)
for key0 in workingShells:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
valueToWrite = ddict.get(key, [0.0, 0.0])[0]
if valueToWrite == 0.0:
text += ' 0.0'
else:
text += ' %.7E' % valueToWrite
text += '\n'
if sys.version < '3.0':
outfile.write(text)
else:
outfile.write(text.encode('UTF-8'))
if sys.version < '3.0':
outfile.write('\n')
else:
outfile.write('\n'.encode('UTF-8'))
if sys.version < '3.0':
outfile.write('\n')
else:
outfile.write('\n'.encode('UTF-8'))
outfile.close()
| gpl-2.0 | 4,806,273,130,956,564,000 | 42.601399 | 87 | 0.455654 | false | 3.479353 | false | false | false |
mangaki/mangaki | mangaki/mangaki/factories.py | 1 | 1591 | import factory
from factory.django import DjangoModelFactory, mute_signals
from .models import Profile, Work, Category
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class ProfileFactory(DjangoModelFactory):
class Meta:
model = Profile
user = factory.SubFactory('mangaki.factories.UserFactory', profile=None)
mal_username = factory.Faker('user_name')
is_shared = factory.Faker('boolean')
nsfw_ok = factory.Faker('boolean')
newsletter_ok = factory.Faker('boolean')
avatar_url = factory.LazyAttribute(lambda o: '{}{}.png'.format(factory.Faker('url').generate({}), o.mal_username))
@mute_signals(post_save)
class UserFactory(DjangoModelFactory):
class Meta:
model = User
username = factory.Faker('user_name')
email = factory.LazyAttribute(lambda o: '{}@mangaki.fr'.format(o.username))
profile = factory.RelatedFactory(ProfileFactory, 'user')
class WorkFactory(DjangoModelFactory):
class Meta:
model = Work
category = factory.Iterator(Category.objects.all())
@factory.iterator
def title():
qs = Work.objects.values_list('title', flat=True).all()[:20]
for title in qs:
yield title
nsfw = factory.Faker('boolean')
synopsis = factory.Faker('text')
def create_user(**kwargs):
return UserFactory.create(**kwargs)
def create_user_with_profile(**kwargs):
profile = kwargs.pop('profile')
user = create_user(**kwargs)
for key, value in profile.items():
setattr(user.profile, key, value)
return user
| agpl-3.0 | -2,614,688,259,151,753,700 | 29.018868 | 118 | 0.688875 | false | 3.788095 | false | false | false |
Ebag333/Pyfa | eos/db/gamedata/effect.py | 1 | 2320 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Column, String, Integer, Boolean, Table, ForeignKey
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import mapper, synonym, relation, deferred
from eos.db import gamedata_meta
from eos.types import Effect, EffectInfo
typeeffects_table = Table("dgmtypeeffects", gamedata_meta,
Column("typeID", Integer, ForeignKey("invtypes.typeID"), primary_key=True, index=True),
Column("effectID", Integer, ForeignKey("dgmeffects.effectID"), primary_key=True))
effects_table = Table("dgmeffects", gamedata_meta,
Column("effectID", Integer, primary_key=True),
Column("effectName", String),
Column("description", String),
Column("published", Boolean),
Column("isAssistance", Boolean),
Column("isOffensive", Boolean))
mapper(EffectInfo, effects_table,
properties={"ID": synonym("effectID"),
"name": synonym("effectName"),
"description": deferred(effects_table.c.description)})
mapper(Effect, typeeffects_table,
properties={"ID": synonym("effectID"),
"info": relation(EffectInfo, lazy=False)})
Effect.name = association_proxy("info", "name")
Effect.description = association_proxy("info", "description")
Effect.published = association_proxy("info", "published")
| gpl-3.0 | -5,116,466,407,913,318,000 | 45.4 | 113 | 0.625 | false | 4.452975 | false | false | false |
dwayne-randle-sr/various-snippets | centos/6/usr/local/bin/ps_mem.py | 1 | 17569 | #!/usr/bin/env python
# Try to determine how much RAM is currently being used per program.
# Note per _program_, not per process. So for example this script
# will report RAM used by all httpd process together. In detail it reports:
# sum(private RAM for program processes) + sum(Shared RAM for program processes)
# The shared RAM is problematic to calculate, and this script automatically
# selects the most accurate method available for your kernel.
# Licence: LGPLv2
# Author: [email protected]
# Source: http://www.pixelbeat.org/scripts/ps_mem.py
# V1.0 06 Jul 2005 Initial release
# V1.1 11 Aug 2006 root permission required for accuracy
# V1.2 08 Nov 2006 Add total to output
# Use KiB,MiB,... for units rather than K,M,...
# V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for
# 2.6 kernels up to and including 2.6.9.
# There it represented the total file backed extent
# V1.4 23 Nov 2006 Remove total from output as it's meaningless
# (the shared values overlap with other programs).
# Display the shared column. This extra info is
# useful, especially as it overlaps between programs.
# V1.5 26 Mar 2007 Remove redundant recursion from human()
# V1.6 05 Jun 2007 Also report number of processes with a given name.
# Patch from [email protected]
# V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which
# fixes some over-estimation and allows totalling.
# Enumerate the PIDs directly rather than using ps,
# which fixes the possible race between reading
# RSS with ps, and shared memory with this program.
# Also we can show non truncated command names.
# V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps
# as otherwise could match libraries causing a crash.
# Patch from [email protected]
# V1.9 20 Feb 2008 Fix invalid values reported when PSS is available.
# Reported by Andrey Borzenkov <[email protected]>
# V3.3 24 Jun 2014
# http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py
# Notes:
#
# All interpreted programs where the interpreter is started
# by the shell or with env, will be merged to the interpreter
# (as that's what's given to exec). For e.g. all python programs
# starting with "#!/usr/bin/env python" will be grouped under python.
# You can change this by using the full command line but that will
# have the undesirable affect of splitting up programs started with
# differing parameters (for e.g. mingetty tty[1-6]).
#
# For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels
# (rmap vm without smaps) it can not be accurately determined how many pages
# are shared between processes in general or within a program in our case:
# http://lkml.org/lkml/2005/7/6/250
# A warning is printed if overestimation is possible.
# In addition for 2.6 kernels up to 2.6.9 inclusive, the shared
# value in /proc/$pid/statm is the total file-backed extent of a process.
# We ignore that, introducing more overestimation, again printing a warning.
# Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows
# us to calculate a more accurate value for the total RAM used by programs.
#
# Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming
# they're the only programs that have the same /proc/$PID/smaps file for
# each instance. This will fail if there are multiple real instances of a
# program that then use CLONE_VM without CLONE_THREAD, or if a clone changes
# its memory map while we're checksumming each /proc/$PID/smaps.
#
# I don't take account of memory allocated for a program
# by other programs. For e.g. memory used in the X server for
# a program could be determined, but is not.
#
# FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/
# FreeBSD 8.0 supports up to a level of Linux 2.6.16
import getopt
import time
import errno
import os
import sys
try:
# md5 module is deprecated on python 2.6
# so try the newer hashlib first
import hashlib
md5_new = hashlib.md5
except ImportError:
import md5
md5_new = md5.new
# The following exits cleanly on Ctrl-C or EPIPE
# while treating other exceptions as before.
def std_exceptions(etype, value, tb):
sys.excepthook = sys.__excepthook__
if issubclass(etype, KeyboardInterrupt):
pass
elif issubclass(etype, IOError) and value.errno == errno.EPIPE:
pass
else:
sys.__excepthook__(etype, value, tb)
sys.excepthook = std_exceptions
#
# Define some global variables
#
PAGESIZE = os.sysconf("SC_PAGE_SIZE") / 1024 #KiB
our_pid = os.getpid()
have_pss = 0
class Proc:
def __init__(self):
uname = os.uname()
if uname[0] == "FreeBSD":
self.proc = '/compat/linux/proc'
else:
self.proc = '/proc'
def path(self, *args):
return os.path.join(self.proc, *(str(a) for a in args))
def open(self, *args):
try:
return open(self.path(*args))
except (IOError, OSError):
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
proc = Proc()
#
# Functions
#
def parse_options():
try:
long_options = ['split-args', 'help', 'total']
opts, args = getopt.getopt(sys.argv[1:], "shtp:w:", long_options)
except getopt.GetoptError:
sys.stderr.write(help())
sys.exit(3)
# ps_mem.py options
split_args = False
pids_to_show = None
watch = None
only_total = False
for o, a in opts:
if o in ('-s', '--split-args'):
split_args = True
if o in ('-t', '--total'):
only_total = True
if o in ('-h', '--help'):
sys.stdout.write(help())
sys.exit(0)
if o in ('-p',):
try:
pids_to_show = [int(x) for x in a.split(',')]
except:
sys.stderr.write(help())
sys.exit(3)
if o in ('-w',):
try:
watch = int(a)
except:
sys.stderr.write(help())
sys.exit(3)
return (split_args, pids_to_show, watch, only_total)
def help():
help_msg = 'ps_mem.py - Show process memory usage\n'\
'\n'\
'-h Show this help\n'\
'-w <N> Measure and show process memory every N seconds\n'\
'-p <pid>[,pid2,...pidN] Only show memory usage PIDs in the specified list\n' \
'-s, --split-args Show and separate by, all command line arguments\n' \
'-t, --total Show only the total value\n'
return help_msg
#(major,minor,release)
def kernel_ver():
kv = proc.open('sys/kernel/osrelease').readline().split(".")[:3]
last = len(kv)
if last == 2:
kv.append('0')
last -= 1
while last > 0:
for char in "-_":
kv[last] = kv[last].split(char)[0]
try:
int(kv[last])
except:
kv[last] = 0
last -= 1
return (int(kv[0]), int(kv[1]), int(kv[2]))
#return Private,Shared
#Note shared is always a subset of rss (trs is not always)
def getMemStats(pid):
global have_pss
mem_id = pid #unique
Private_lines = []
Shared_lines = []
Pss_lines = []
Rss = (int(proc.open(pid, 'statm').readline().split()[1])
* PAGESIZE)
if os.path.exists(proc.path(pid, 'smaps')): #stat
digester = md5_new()
for line in proc.open(pid, 'smaps').readlines(): #open
# Note we checksum smaps as maps is usually but
# not always different for separate processes.
digester.update(line.encode('latin1'))
if line.startswith("Shared"):
Shared_lines.append(line)
elif line.startswith("Private"):
Private_lines.append(line)
elif line.startswith("Pss"):
have_pss = 1
Pss_lines.append(line)
mem_id = digester.hexdigest()
Shared = sum([int(line.split()[1]) for line in Shared_lines])
Private = sum([int(line.split()[1]) for line in Private_lines])
#Note Shared + Private = Rss above
#The Rss in smaps includes video card mem etc.
if have_pss:
pss_adjust = 0.5 # add 0.5KiB as this avg error due to trunctation
Pss = sum([float(line.split()[1])+pss_adjust for line in Pss_lines])
Shared = Pss - Private
elif (2,6,1) <= kernel_ver() <= (2,6,9):
Shared = 0 #lots of overestimation, but what can we do?
Private = Rss
else:
Shared = int(proc.open(pid, 'statm').readline().split()[2])
Shared *= PAGESIZE
Private = Rss - Shared
return (Private, Shared, mem_id)
def getCmdName(pid, split_args):
cmdline = proc.open(pid, 'cmdline').read().split("\0")
if cmdline[-1] == '' and len(cmdline) > 1:
cmdline = cmdline[:-1]
path = proc.path(pid, 'exe')
try:
path = os.readlink(path)
# Some symlink targets were seen to contain NULs on RHEL 5 at least
# https://github.com/pixelb/scripts/pull/10, so take string up to NUL
path = path.split('\0')[0]
except OSError:
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # either kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
if split_args:
return " ".join(cmdline)
if path.endswith(" (deleted)"):
path = path[:-10]
if os.path.exists(path):
path += " [updated]"
else:
#The path could be have prelink stuff so try cmdline
#which might have the full path present. This helped for:
#/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted)
if os.path.exists(cmdline[0]):
path = cmdline[0] + " [updated]"
else:
path += " [deleted]"
exe = os.path.basename(path)
cmd = proc.open(pid, 'status').readline()[6:-1]
if exe.startswith(cmd):
cmd = exe #show non truncated version
#Note because we show the non truncated name
#one can have separated programs as follows:
#584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash)
# 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin
return cmd
#The following matches "du -h" output
#see also human.py
def human(num, power="Ki"):
powers = ["Ki", "Mi", "Gi", "Ti"]
while num >= 1000: #4 digits
num /= 1024.0
power = powers[powers.index(power)+1]
return "%.1f %s" % (num, power)
def cmd_with_count(cmd, count):
if count > 1:
return "%s (%u)" % (cmd, count)
else:
return cmd
#Warn of possible inaccuracies
#2 = accurate & can total
#1 = accurate only considering each process in isolation
#0 = some shared mem not reported
#-1= all shared mem not reported
def shared_val_accuracy():
"""http://wiki.apache.org/spamassassin/TopSharedMemoryBug"""
kv = kernel_ver()
if kv[:2] == (2,4):
if proc.open('meminfo').read().find("Inact_") == -1:
return 1
return 0
elif kv[:2] == (2,6):
pid = os.getpid()
if os.path.exists(proc.path(pid, 'smaps')):
if proc.open(pid, 'smaps').read().find("Pss:")!=-1:
return 2
else:
return 1
if (2,6,1) <= kv <= (2,6,9):
return -1
return 0
elif kv[0] > 2:
return 2
else:
return 1
def show_shared_val_accuracy( possible_inacc, only_total=False ):
level = ("Warning","Error")[only_total]
if possible_inacc == -1:
sys.stderr.write(
"%s: Shared memory is not reported by this system.\n" % level
)
sys.stderr.write(
"Values reported will be too large, and totals are not reported\n"
)
elif possible_inacc == 0:
sys.stderr.write(
"%s: Shared memory is not reported accurately by this system.\n" % level
)
sys.stderr.write(
"Values reported could be too large, and totals are not reported\n"
)
elif possible_inacc == 1:
sys.stderr.write(
"%s: Shared memory is slightly over-estimated by this system\n"
"for each program, so totals are not reported.\n" % level
)
sys.stderr.close()
if only_total and possible_inacc != 2:
sys.exit(1)
def get_memory_usage( pids_to_show, split_args, include_self=False, only_self=False ):
cmds = {}
shareds = {}
mem_ids = {}
count = {}
for pid in os.listdir(proc.path('')):
if not pid.isdigit():
continue
pid = int(pid)
# Some filters
if only_self and pid != our_pid:
continue
if pid == our_pid and not include_self:
continue
if pids_to_show is not None and pid not in pids_to_show:
continue
try:
cmd = getCmdName(pid, split_args)
except LookupError:
#operation not permitted
#kernel threads don't have exe links or
#process gone
continue
try:
private, shared, mem_id = getMemStats(pid)
except RuntimeError:
continue #process gone
if shareds.get(cmd):
if have_pss: #add shared portion of PSS together
shareds[cmd] += shared
elif shareds[cmd] < shared: #just take largest shared val
shareds[cmd] = shared
else:
shareds[cmd] = shared
cmds[cmd] = cmds.setdefault(cmd, 0) + private
if cmd in count:
count[cmd] += 1
else:
count[cmd] = 1
mem_ids.setdefault(cmd, {}).update({mem_id:None})
#Add shared mem for each program
total = 0
for cmd in cmds:
cmd_count = count[cmd]
if len(mem_ids[cmd]) == 1 and cmd_count > 1:
# Assume this program is using CLONE_VM without CLONE_THREAD
# so only account for one of the processes
cmds[cmd] /= cmd_count
if have_pss:
shareds[cmd] /= cmd_count
cmds[cmd] = cmds[cmd] + shareds[cmd]
total += cmds[cmd] #valid if PSS available
sorted_cmds = sorted(cmds.items(), key=lambda x:x[1])
sorted_cmds = [x for x in sorted_cmds if x[1]]
return sorted_cmds, shareds, count, total
def print_header():
sys.stdout.write(" Private + Shared = RAM used\tProgram\n\n")
def print_memory_usage(sorted_cmds, shareds, count, total):
for cmd in sorted_cmds:
sys.stdout.write("%8sB + %8sB = %8sB\t%s\n" %
(human(cmd[1]-shareds[cmd[0]]),
human(shareds[cmd[0]]), human(cmd[1]),
cmd_with_count(cmd[0], count[cmd[0]])))
if have_pss:
sys.stdout.write("%s\n%s%8sB\n%s\n" %
("-" * 33, " " * 24, human(total), "=" * 33))
def verify_environment():
if os.geteuid() != 0:
sys.stderr.write("Sorry, root permission required.\n")
if __name__ == '__main__':
sys.stderr.close()
sys.exit(1)
try:
kv = kernel_ver()
except (IOError, OSError):
val = sys.exc_info()[1]
if val.errno == errno.ENOENT:
sys.stderr.write(
"Couldn't access " + proc.path('') + "\n"
"Only GNU/Linux and FreeBSD (with linprocfs) are supported\n")
sys.exit(2)
else:
raise
if __name__ == '__main__':
verify_environment()
split_args, pids_to_show, watch, only_total = parse_options()
if not only_total:
print_header()
if watch is not None:
try:
sorted_cmds = True
while sorted_cmds:
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
time.sleep(watch)
else:
sys.stdout.write('Process does not exist anymore.\n')
except KeyboardInterrupt:
pass
else:
# This is the default behavior
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
# We must close explicitly, so that any EPIPE exception
# is handled by our excepthook, rather than the default
# one which is reenabled after this script finishes.
sys.stdout.close()
vm_accuracy = shared_val_accuracy()
show_shared_val_accuracy( vm_accuracy, only_total )
| gpl-3.0 | -8,928,246,836,031,157,000 | 34.855102 | 97 | 0.575047 | false | 3.679372 | false | false | false |
Azure/azure-sdk-for-python | sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/aio/operations/_test_summaries_operations.py | 1 | 9021 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TestSummariesOperations:
"""TestSummariesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~test_base.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
test_base_account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.TestSummaryListResult"]:
"""Lists the Test Summaries of all the packages under a Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TestSummaryListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~test_base.models.TestSummaryListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TestSummaryListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries'} # type: ignore
async def get(
self,
resource_group_name: str,
test_base_account_name: str,
test_summary_name: str,
**kwargs: Any
) -> "_models.TestSummaryResource":
"""Gets a Test Summary with specific name from all the Test Summaries of all the packages under a
Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:param test_summary_name: The name of the Test Summary.
:type test_summary_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestSummaryResource, or the result of cls(response)
:rtype: ~test_base.models.TestSummaryResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'testSummaryName': self._serialize.url("test_summary_name", test_summary_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestSummaryResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries/{testSummaryName}'} # type: ignore
| mit | -5,264,327,462,107,151,000 | 48.839779 | 212 | 0.652478 | false | 4.452616 | true | false | false |
RalfJung/lilass | qt_frontend.py | 1 | 7267 | # DSL - easy Display Setup for Laptops
# Copyright (C) 2012-2015 Ralf Jung <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys, os
from screen import RelativeScreenPosition, ScreenSetup
try:
# Be fine with PyQt4 not being installed
from PyQt5 import QtCore, QtWidgets, uic
class PositionSelection(QtWidgets.QDialog):
def __init__(self, situation):
# set up main window
super(PositionSelection, self).__init__()
self._situation = situation
uifile = os.path.join(os.path.dirname(__file__), 'qt_dialogue.ui')
uic.loadUi(uifile, self)
# fill relative position box
for pos in RelativeScreenPosition:
self.relPos.addItem(pos.text, pos)
# keep resolutions in sync when in mirror mode
def syncIfMirror(source, target):
def _slot(idx):
if self.isMirror:
target.setCurrentIndex(idx)
source.currentIndexChanged.connect(_slot)
syncIfMirror(self.intRes, self.extRes)
syncIfMirror(self.extRes, self.intRes)
# if situation has a previousSetup, use its values as initial state
if situation.previousSetup:
p = situation.previousSetup
self.intEnabled.setChecked(p.intResolution is not None)
self.extEnabled.setChecked(p.extResolution is not None)
if p.relPosition:
self.relPos.setCurrentIndex(p.relPosition.value - 1)
if p.extIsPrimary:
self.extPrimary.setChecked(True)
else:
self.intPrimary.setChecked(True)
# Pre-select the previous resolution
self._intDefaultRes = p.intResolution
self._extDefaultRes = p.extResolution
self._mirrorDefaultRes = p.intResolution if p.relPosition == RelativeScreenPosition.MIRROR else None # in case of a mirror, they would be the same anyway
else:
self._intDefaultRes = situation.internalConnector.getPreferredResolution()
self._extDefaultRes = situation.externalConnector.getPreferredResolution()
self._mirrorDefaultRes = None
# connect the update function
self.intEnabled.toggled.connect(self.updateEnabledControls)
self.extEnabled.toggled.connect(self.updateEnabledControls)
self.relPos.currentIndexChanged.connect(self.updateEnabledControls)
# make sure we are in a correct state
self.updateEnabledControls()
def getRelativeScreenPosition(self):
idx = self.relPos.currentIndex()
return self.relPos.itemData(idx)
def fillResolutionBox(self, box, resolutions, select = None):
# if the count did not change, update in-place (this avoids flicker)
if box.count() == len(resolutions):
for idx, res in enumerate(resolutions):
box.setItemText(idx, str(res))
box.setItemData(idx, res)
if res == select:
box.setCurrentIndex(idx)
else:
# first clear it
while box.count() > 0:
box.removeItem(0)
# then fill it
for res in resolutions:
box.addItem(str(res), res)
if res == select:
box.setCurrentIndex(box.count() - 1) # select the most recently added one
def updateEnabledControls(self):
intEnabled = self.intEnabled.isChecked()
extEnabled = self.extEnabled.isChecked()
bothEnabled = intEnabled and extEnabled
self.isMirror = bothEnabled and self.getRelativeScreenPosition() == RelativeScreenPosition.MIRROR # only if both are enabled, we can really mirror
# configure screen controls
self.intRes.setEnabled(intEnabled)
self.intPrimary.setEnabled(intEnabled and not self.isMirror)
self.extRes.setEnabled(extEnabled)
self.extPrimary.setEnabled(extEnabled and not self.isMirror)
if not intEnabled and extEnabled:
self.extPrimary.setChecked(True)
elif not extEnabled and intEnabled:
self.intPrimary.setChecked(True)
# which resolutions do we offer?
if self.isMirror:
commonRes = self._situation.commonResolutions()
self.fillResolutionBox(self.intRes, commonRes, select = self._mirrorDefaultRes)
self.fillResolutionBox(self.extRes, commonRes, select = self._mirrorDefaultRes)
self.intRes.setCurrentIndex(self.extRes.currentIndex())
else:
self.fillResolutionBox(self.intRes, self._situation.internalConnector.getResolutionList(), select = self._intDefaultRes)
self.fillResolutionBox(self.extRes, self._situation.externalConnector.getResolutionList(), select = self._extDefaultRes)
# configure position control
self.posGroup.setEnabled(bothEnabled)
self.posLabel1.setEnabled(bothEnabled)
self.posLabel2.setEnabled(bothEnabled)
self.relPos.setEnabled(bothEnabled)
# avoid having no screen
self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(intEnabled or extEnabled)
def run(self):
self.exec_()
if not self.result(): return None
intRes = self.intRes.itemData(self.intRes.currentIndex()) if self.intEnabled.isChecked() else None
extRes = self.extRes.itemData(self.extRes.currentIndex()) if self.extEnabled.isChecked() else None
return ScreenSetup(intRes, extRes, self.getRelativeScreenPosition(), self.extPrimary.isChecked())
except ImportError:
pass
# Qt frontend
class QtFrontend:
def __init__(self):
from PyQt5 import QtWidgets
self.app = QtWidgets.QApplication(sys.argv)
print("Qt loaded")
def error(self, message):
from PyQt5 import QtWidgets
QtWidgets.QMessageBox.critical(None, 'Fatal error', message)
def setup(self, situation):
return PositionSelection(situation).run()
@staticmethod
def isAvailable():
try:
import PyQt5
return True
except ImportError:
return False
| gpl-2.0 | -1,890,211,412,476,436,700 | 45.583333 | 169 | 0.624329 | false | 4.64345 | false | false | false |
BichenWuUCB/squeezeDet | src/dataset/kitti.py | 1 | 10284 | # Author: Bichen Wu ([email protected]) 08/25/2016
"""Image data base class for kitti"""
import cv2
import os
import numpy as np
import subprocess
from dataset.imdb import imdb
from utils.util import bbox_transform_inv, batch_iou
class kitti(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_'+image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')
self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_kitti_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'
def _load_image_set_idx(self):
image_set_file = os.path.join(
self._data_root_path, 'ImageSets', self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._image_path, idx+'.png')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_kitti_annotation(self):
def _get_obj_level(obj):
height = float(obj[7]) - float(obj[5]) + 1
truncation = float(obj[1])
occlusion = float(obj[2])
if height >= 40 and truncation <= 0.15 and occlusion <= 0:
return 1
elif height >= 25 and truncation <= 0.3 and occlusion <= 1:
return 2
elif height >= 25 and truncation <= 0.5 and occlusion <= 2:
return 3
else:
return 4
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._label_path, index+'.txt')
with open(filename, 'r') as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
try:
cls = self._class_to_idx[obj[0].lower().strip()]
except:
continue
if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:
continue
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
if not os.path.isdir(det_file_dir):
os.makedirs(det_file_dir)
for im_idx, index in enumerate(self._image_idx):
filename = os.path.join(det_file_dir, index+'.txt')
with open(filename, 'wt') as f:
for cls_idx, cls in enumerate(self._classes):
dets = all_boxes[cls_idx][im_idx]
for k in xrange(len(dets)):
f.write(
'{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '
'0.0 0.0 {:.3f}\n'.format(
cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],
dets[k][4])
)
cmd = self._eval_tool + ' ' \
+ os.path.join(self._data_root_path, 'training') + ' ' \
+ os.path.join(self._data_root_path, 'ImageSets',
self._image_set+'.txt') + ' ' \
+ os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))
print('Running: {}'.format(cmd))
status = subprocess.call(cmd, shell=True)
aps = []
names = []
for cls in self._classes:
det_file_name = os.path.join(
os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))
if os.path.exists(det_file_name):
with open(det_file_name, 'r') as f:
lines = f.readlines()
assert len(lines) == 3, \
'Line number of {} should be 3'.format(det_file_name)
aps.append(float(lines[0].split('=')[1].strip()))
aps.append(float(lines[1].split('=')[1].strip()))
aps.append(float(lines[2].split('=')[1].strip()))
else:
aps.extend([0.0, 0.0, 0.0])
names.append(cls+'_easy')
names.append(cls+'_medium')
names.append(cls+'_hard')
return aps, names
def do_detection_analysis_in_eval(self, eval_dir, global_step):
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
det_error_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step),
'error_analysis')
if not os.path.exists(det_error_dir):
os.makedirs(det_error_dir)
det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')
stats = self.analyze_detections(det_file_dir, det_error_file)
ims = self.visualize_detections(
image_dir=self._image_path,
image_format='.png',
det_error_file=det_error_file,
output_image_dir=det_error_dir,
num_det_per_type=10
)
return stats, ims
def analyze_detections(self, detection_file_dir, det_error_file):
def _save_detection(f, idx, error_type, det, score):
f.write(
'{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\n'.format(
idx, error_type,
det[0]-det[2]/2., det[1]-det[3]/2.,
det[0]+det[2]/2., det[1]+det[3]/2.,
self._classes[int(det[4])],
score
)
)
# load detections
self._det_rois = {}
for idx in self._image_idx:
det_file_name = os.path.join(detection_file_dir, idx+'.txt')
with open(det_file_name) as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
cls = self._class_to_idx[obj[0].lower().strip()]
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
score = float(obj[-1])
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls, score])
bboxes.sort(key=lambda x: x[-1], reverse=True)
self._det_rois[idx] = bboxes
# do error analysis
num_objs = 0.
num_dets = 0.
num_correct = 0.
num_loc_error = 0.
num_cls_error = 0.
num_bg_error = 0.
num_repeated_error = 0.
num_detected_obj = 0.
with open(det_error_file, 'w') as f:
for idx in self._image_idx:
gt_bboxes = np.array(self._rois[idx])
num_objs += len(gt_bboxes)
detected = [False]*len(gt_bboxes)
det_bboxes = self._det_rois[idx]
if len(gt_bboxes) < 1:
continue
for i, det in enumerate(det_bboxes):
if i < len(gt_bboxes):
num_dets += 1
ious = batch_iou(gt_bboxes[:, :4], det[:4])
max_iou = np.max(ious)
gt_idx = np.argmax(ious)
if max_iou > 0.1:
if gt_bboxes[gt_idx, 4] == det[4]:
if max_iou >= 0.5:
if i < len(gt_bboxes):
if not detected[gt_idx]:
num_correct += 1
detected[gt_idx] = True
else:
num_repeated_error += 1
else:
if i < len(gt_bboxes):
num_loc_error += 1
_save_detection(f, idx, 'loc', det, det[5])
else:
if i < len(gt_bboxes):
num_cls_error += 1
_save_detection(f, idx, 'cls', det, det[5])
else:
if i < len(gt_bboxes):
num_bg_error += 1
_save_detection(f, idx, 'bg', det, det[5])
for i, gt in enumerate(gt_bboxes):
if not detected[i]:
_save_detection(f, idx, 'missed', gt, -1.0)
num_detected_obj += sum(detected)
f.close()
print ('Detection Analysis:')
print (' Number of detections: {}'.format(num_dets))
print (' Number of objects: {}'.format(num_objs))
print (' Percentage of correct detections: {}'.format(
num_correct/num_dets))
print (' Percentage of localization error: {}'.format(
num_loc_error/num_dets))
print (' Percentage of classification error: {}'.format(
num_cls_error/num_dets))
print (' Percentage of background error: {}'.format(
num_bg_error/num_dets))
print (' Percentage of repeated detections: {}'.format(
num_repeated_error/num_dets))
print (' Recall: {}'.format(
num_detected_obj/num_objs))
out = {}
out['num of detections'] = num_dets
out['num of objects'] = num_objs
out['% correct detections'] = num_correct/num_dets
out['% localization error'] = num_loc_error/num_dets
out['% classification error'] = num_cls_error/num_dets
out['% background error'] = num_bg_error/num_dets
out['% repeated error'] = num_repeated_error/num_dets
out['% recall'] = num_detected_obj/num_objs
return out
| bsd-2-clause | -1,532,657,115,107,445,500 | 33.743243 | 82 | 0.550661 | false | 3.222814 | false | false | false |
blaiseli/p4-phylogenetics | p4/nexussets.py | 1 | 69575 | import os
import sys
import string
import array
import types
import copy
from var import var
# Don't bother with NexusToken2, cuz sets blocks are small
from nexustoken import nexusSkipPastNextSemiColon, safeNextTok
import func
from p4exceptions import P4Error
# [Examples from the paup manual,
# but note the bad charpartition subset names '1' and '2'. P4 would not allow those names.]
# charset coding = 2-457 660-896;
# charset noncoding = 1 458-659 897-898;
# charpartition gfunc = 1:coding, 2:noncoding;
# Notes from MadSwofMad97.
# TaxSet taxset-name [({Standard | Vector})] = taxon-set; # standard is default
# TaxPartition partition-name [([{[No]Tokens}] # tokens is default
# [{standard|vector}])] # standard is default
# = subset-name:taxon-set [, subset-name:taxon-set...];
# eg TaxSet outgroup=1-4;
# TaxSet beetles=Omma-.;
#
# taxpartition populations=1:1-3, 2:4-6, 3:7 8; # note bad taxpartition names 1, 2, 3
# taxpartition populations (vector notokens) = 11122233;
#
class CaseInsensitiveDict(dict):
"""A dictionary that is case insensitive, for Nexus"""
def __init__(self, default=None):
dict.__init__(self)
self.default = default
#self.keyDict = {}
def __setitem__(self, key, val):
if type(key) != types.StringType:
gm = ["CaseInsensitiveDict()"]
gm.append("The key must be a string. Got '%s'" % key)
raise P4Error(gm)
lowKey = string.lower(key)
dict.__setitem__(self, lowKey, val)
#self.keyDict[string.lower(key)] = key
def __getitem__(self, key):
if type(key) != types.StringType:
gm = ["CaseInsensitiveDict()"]
gm.append("The key must be a string. Got '%s'" % key)
raise P4Error(gm)
lowKey = string.lower(key)
try:
return dict.__getitem__(self, lowKey)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
#########################################################################
# CLASS NexusSets
#########################################################################
class NexusSets(object):
"""A container for Nexus CharSet, CharPartition, and TaxSet objects.
When the first Nexus sets block is read, a NexusSets object is
made and saved as ``var.nexusSets``. ``CharSet``, ``TaxSet``, and
``CharPartition`` objects are placed in it, as they are
read/created. TaxPartition commands are not implemented. Here is
a simple nexus sets block that only has charsets::
#nexus
begin sets;
charset pos1 = 1-.\\3;
charset pos2 = 2-.\\3;
charset pos3 = 3-.\\3;
end;
To get the third positions only, you could say::
read('myAlignment.phy')
a = var.alignments[0]
read('mySets.nex') # the sets block above
b = a.subsetUsingCharSet('pos3')
What happens above when the mySets.nex file is read is that a
NexusSets object is created as ``var.nexusSets`` and populated
with the three charsets as CharSet objects. Then when you asked
for a subset, a copy of that NexusSets object was made and applied
to the alignment.
Notice that the length of the alignment is not part of the
information in the sets block, and so things remain undefined
in ``var.nexusSets`` until the nexus sets are applied to a
particular alignment. One consequence of this somewhat awkward
system is that the same charsets could then be applied to another
alignment of a different size::
read('myAlignment.phy')
aA = var.alignments[0]
read('anotherAlignment.nex')
aB = var.alignments[1]
read('mySets.nex') # the sets block above
bA = aA.subsetUsingCharSet('pos3')
bB = aB.subsetUsingCharSet('pos3')
In the above example, ``bA.nexusSets`` and ``bB.nexusSets`` are
both derived from ``var.nexusSets`` but are independent of it, and
different from each other.
So when an Alignment (or Tree object) wants to use ``var.nexusSets``, it
makes a copy of it, and attaches the copy as
theAlignment.nexusSets or theTree.nexusSets
Here is another example, including a ``charPartition`` definition::
begin sets;
charset gene1 = 1-213;
charset gene2 = 214-497;
charPartition cpName = gene1:gene1, gene2:gene2;
end;
For an alignment, you can then set a **character partition** by ::
a.setCharPartition(cpName)
Do this *before* you make a Data object, to partition the alignment.
You can also use charsets to extract subsets, eg via::
b = a.subsetUsingCharSet(csName)
Setting a charPartition or asking for a subset will trigger
applying ``var.nexusSets`` to the alignment, but you can also do
it explicitly, by::
myTree.setNexusSets()
NexusSets knows about predefined 'constant', 'gapped', and
'remainder' charsets. It does not know about 'missambig' or
'uninf' charsets.
NexusSets can either be in the default standard format or in
vector format -- you can change them to vector format with the ::
mySet.vectorize()
method, and you can change them to standard format with the ::
mySet.standardize()
method. For taxSets, you can use actual tax names (rather than
numbers or ranges) by invoking the method::
myTaxSet.setUseTaxNames()
which sets the attribute 'useTaxNames' to True, and puts the
taxNames for the taxSet in the ::
taxSet.taxNames
list, which might be handy.
You can see the current state of a NexusSets object using ::
myNexusSets.dump()
It can also be written out as a nexus sets block. If an Alignment object
has a ``nexusSets`` attribute then if you ask the alignment to write
itself to a nexus file then the Alignment.nexusSets is also
written. If you would rather it not be written, delete it first.
If you would rather it be written to a separate file, do that
first and then delete it.
One nice thing about taxsets is that :meth:`Tree.Tree.tv` and
:meth:`Tree.Tree.btv` know about them and can display them.
"""
def __init__(self):
self.charSets = []
self.charSetsDict = CaseInsensitiveDict()
self.charSetLowNames = []
self.taxSets = []
self.taxSetsDict = CaseInsensitiveDict()
self.taxSetLowNames = []
self.charPartitions = []
self.charPartitionsDict = CaseInsensitiveDict()
self.charPartitionLowNames = []
self.charPartition = None
#self.alignment = None
self.aligNChar = None
self.taxNames = []
self.nTax = None
self.predefinedCharSetLowNames = ['constant', 'gapped']
# The nexus format defines several "predefined" charSets.
# For all datatypes:
# constant
# gapped
# missambig
# remainder
# uninf
# I only have implemented 2-- constant and gapped. The
# 'remainder' charSet is handled by p4, but not as a CharSet
# object, since its content depends on the context.
cS = CharSet(self)
cS.num = -1
cS.name = 'constant'
cS.lowName = 'constant'
cS.format = 'vector'
# self.charSets.append(cS)
self.constant = cS
self.charSetsDict['constant'] = self.constant
cS = CharSet(self)
cS.num = -1
cS.name = 'gapped'
cS.lowName = 'gapped'
cS.format = 'vector'
# self.charSets.append(cS)
self.gapped = cS
self.charSetsDict['gapped'] = self.gapped
def _continueReadingFromNexusFile(self, flob):
gm = ['NexusSets._continueReadingFromNexusFile()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
if 0:
print gm[0]
print ' var.nexus_doFastNextTok = %s' % var.nexus_doFastNextTok
nexusSkipPastNextSemiColon(flob)
commandName = safeNextTok(flob, gm[0])
lowCommandName = string.lower(commandName)
# print 'got lowCommandName = %s' % lowCommandName
while lowCommandName not in [None, 'end', 'endblock']:
# print "Got lowCommandName '%s'" % lowCommandName
if lowCommandName == 'charset':
self._readCharSetCommand(flob)
elif lowCommandName == 'charpartition':
self._readCharPartitionCommand(flob)
elif lowCommandName == 'taxset':
self._readTaxSetCommand(flob)
elif lowCommandName == 'taxpartition':
print
print gm[0]
if len(gm) > 1:
print gm[1]
print " Sorry-- taxpartition is not implemented."
nexusSkipPastNextSemiColon(flob)
else:
gm.append("Got unrecognized sets block command '%s'" %
commandName)
raise P4Error(gm)
commandName = safeNextTok(
flob, 'NexusSets.continueReadingFromNexusFile()')
lowCommandName = string.lower(commandName)
def _readCharSetCommand(self, flob):
# We have just read 'charset'. The next thing we expect is the charset
# name.
gm = ['NexusSets._readCharSetCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(
safeNextTok(flob, 'NexusSets: _readCharSetCommand'))
# print "readCharSetCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad charSet name '%s'" % name)
raise P4Error(gm)
# Check for duped names
if lowName in self.charSetLowNames:
gm.append("Duplicated charSet name '%s'" % name)
raise P4Error(gm)
elif lowName in self.predefinedCharSetLowNames:
gm.append(
"You cannot use the name '%s' -- it is predefined." % name)
raise P4Error(gm)
cs = CharSet(self)
cs.name = name
cs.lowName = lowName
cs.readTaxOrCharSetDefinition(flob)
cs.num = len(self.charSets)
self.charSets.append(cs)
self.charSetsDict[name] = cs
self.charSetLowNames.append(cs.lowName)
def _readTaxSetCommand(self, flob):
# We have just read 'taxset'. The next thing we expect is the taxset
# name.
gm = ['NexusSets._readTaxSetCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(
safeNextTok(flob, 'NexusSets: readTaxSetCommand'))
# print "readTaxSetCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad taxSet name '%s'" % name)
raise P4Error(gm)
# Check for duped names
if lowName in self.taxSetLowNames:
gm.append("Duplicated taxSet name '%s'" % name)
raise P4Error(gm)
ts = TaxSet(self)
ts.name = name
ts.lowName = lowName
ts.readTaxOrCharSetDefinition(flob)
ts.num = len(self.taxSets)
self.taxSets.append(ts)
self.taxSetsDict[name] = ts
self.taxSetLowNames.append(ts.lowName)
def _readCharPartitionCommand(self, flob):
gm = ['NexusSets._readCharPartitionCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
# print "readCharPartitionCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad charPartition name '%s'" % name)
if lowName in self.charPartitionLowNames:
gm.append("Duplicated charPartition name '%s'" % name)
raise P4Error(gm)
cp = CharPartition(self)
cp.name = name
cp.lowName = lowName
cp._readCharPartitionDefinition(flob)
self.charPartitions.append(cp)
self.charPartitionsDict[name] = cp
self.charPartitionLowNames.append(cp.lowName)
def dump(self):
print " NexusSets dump"
if self.constant:
print " Predefined char set 'constant'"
self.constant.dump()
if self.gapped:
print " Predefined char set 'gapped'"
self.gapped.dump()
print " There are %i non-predefined char sets" % len(self.charSets)
for cs in self.charSets:
cs.dump()
print " There are %i tax sets" % len(self.taxSets)
for ts in self.taxSets:
ts.dump()
print " There are %i char partitions" % len(self.charPartitions)
for cp in self.charPartitions:
cp.dump()
if self.charPartition:
print " self.charPartition.name is %s" % func.nexusFixNameIfQuotesAreNeeded(self.charPartition.name)
else:
print " There is no self.charPartition"
def write(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexus(self, fName=None):
"""Write self in Nexus format to stdout or a file."""
if fName:
f = file(fName, 'w')
else:
f = sys.stdout
f.write('#nexus\n\n')
self.writeNexusToOpenFile(f)
if fName:
f.close()
def writeNexusToOpenFile(self, flob):
"""This only writes non-trivial stuff.
Ie if self has only constant and gapped charsets, then it does
not write anything."""
if self.charSets or self.charPartitions or self.taxSets:
flob.write('begin sets;\n')
for cs in self.charSets:
cs.writeNexusToOpenFile(flob)
for cp in self.charPartitions:
cp.writeNexusToOpenFile(flob)
for ts in self.taxSets:
ts.writeNexusToOpenFile(flob)
flob.write('end;\n\n')
def newCharSet(self, name, mask=None):
cs = CharSet(self)
cs.name = name
cs.name = name.lower()
cs.num = len(self.charSets)
if mask:
cs.format = 'vector'
cs.mask = mask
else:
pass
self.charSets.append(cs)
self.charSetsDict[cs.name] = cs
def dupeCharSet(self, existingCharSetName, newName):
theCS = self.charSetsDict.get(existingCharSetName)
if not theCS:
raise P4Error(
"NexusSets.dupeCharSet() -- can't find char set '%s'" % existingCharSetName)
cs = CharSet(self)
cs.name = newName
cs.name = newName.lower()
cs.num = len(self.charSets)
self.charSets.append(cs)
self.charSetsDict[cs.name] = cs
cs.format = theCS.format
cs.triplets = copy.deepcopy(theCS.triplets) # its a list of lists
cs.tokens = theCS.tokens[:]
cs.mask = theCS.mask
cs.aligNChar = theCS.aligNChar
class TaxOrCharSet(object):
def __init__(self, theNexusSets):
self.nexusSets = theNexusSets
self.num = -1
self.name = None
self.lowName = None
self._format = 'standard' # or 'vector' So it should be a property.
self.triplets = []
self.tokens = []
self.mask = None
self.className = 'TaxOrCharSet'
self.lowTaxNames = []
self.taxNames = []
self.useTaxNames = None # undecided
def _getFormat(self):
return self._format
def _setFormat(self, newFormat):
assert newFormat in ['standard', 'vector']
self._format = newFormat
format = property(_getFormat, _setFormat)
def dump(self):
print " %s %i" % (self.className, self.num)
print " name: %s" % self.name
if hasattr(self, 'aligNChar'):
print " aligNChar: %s" % self.aligNChar
print " format: %s" % self.format
if hasattr(self, 'useTaxNames'):
print " useTaxNames: %s" % self.useTaxNames
print " triplets: "
for t in self.triplets:
print " %s" % t
if hasattr(self, 'numberTriplets'):
print " numberTriplets: "
for t in self.numberTriplets:
print " %s" % t
print " tokens: %s" % self.tokens
print " mask: %s" % self.mask
if self.mask:
print " mask 1s-count: %s" % self.mask.count('1')
def readTaxOrCharSetDefinition(self, flob):
gm = ['%s.readTaxSetDefinition()' % self.className]
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
tok = safeNextTok(flob, gm[0])
lowTok = string.lower(tok)
# print "readTaxSetDefinition: get tok '%s'" % tok
if lowTok == '=':
pass
elif lowTok == '(':
#['standard', 'vector']:
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
if lowTok == 'standard':
pass
elif lowTok == 'vector':
self.format = 'vector'
else:
gm.append("Unexpected '%s'" % tok)
gm.append("(I was expecting either 'standard' or")
gm.append("'vector' following the parenthesis.)")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
if tok == ')':
pass
else:
gm.append("Unexpected '%s'" % tok)
gm.append(
"(I was expecting an unparentheis after '%s')" % self.format)
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
if tok != '=':
gm.append("Unexpected '%s'" % tok)
gm.append("I was expecting an '=' after '(%s)'" % self.format)
raise P4Error(gm)
else:
gm.append("Unexpected '%s'" % tok)
raise P4Error(gm)
# Now we are on the other side of the '='
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok not in [None, ';', 'end', 'endblock']:
self.tokens.append(tok)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
if self.format == 'vector':
self.mask = string.join(self.tokens, '')
self.tokens = []
for i in range(len(self.mask)):
if self.mask[i] not in ['0', '1']:
gm.append("%s '%s', vector format" %
(self.className, self.name))
gm.append("The vector must be all zeros or ones.")
raise P4Error(gm)
# print self.mask
# do a once-over sanity check, and convert integer strings to ints
# print "xx1 self.tokens is now %s" % self.tokens
for tokNum in range(len(self.tokens)):
tok = self.tokens[tokNum]
lowTok = string.lower(tok)
if lowTok in ['.', 'all', '-', '\\']:
pass
elif self.className == 'CharSet' and lowTok in self.nexusSets.charSetLowNames:
# print " xx3 %s is an existing charSet" % tok
pass
elif self.className == 'CharSet' and lowTok in self.nexusSets.predefinedCharSetLowNames:
# print " xx3 %s is a pre-defined charSet" % tok
pass
elif self.className == 'TaxSet' and lowTok in self.nexusSets.taxSetLowNames:
# print " xx4 %s is an existing taxSet" % tok
pass
else:
# print " xx5"
try:
intTok = int(tok)
self.tokens[tokNum] = intTok
except ValueError:
if self.className == 'TaxSet':
pass
elif self.className == 'CharSet':
gm.append("I don't understand the token '%s'" % tok)
raise P4Error(gm)
# Now I want to make a list of triplets representing eg 23-87\3
# first item = 23, second item = 87, third = 3
# not all will exist for each part of the char definition.
tokNum = 0
self.triplets = []
while tokNum < len(self.tokens):
tok = self.tokens[tokNum]
# print "Considering tok[%i] '%s'" % (tokNum, tok)
if type(tok) == type('str'):
lowTok = string.lower(tok)
else:
lowTok = None
if self.className == 'TaxSet' and lowTok in self.nexusSets.taxSetLowNames or \
self.className == 'charSet' and lowTok in self.nexusSets.charSetLowNames:
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"An existing tax or char set may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"An existing tax or char set may not be followed by a '\\'")
raise P4Error(gm)
elif tok == 'all':
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set 'all' may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set 'all' may not be followed by a '\\'")
raise P4Error(gm)
elif tok == '-':
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Out of place '-'")
raise P4Error(gm)
elif tok == '\\':
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Out of place '\\'")
raise P4Error(gm)
elif tok == '.':
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set '.' may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set '.' may not be followed by a '\\'")
raise P4Error(gm)
elif type(tok) == type(1) or type(tok) == type('str'):
aTriplet = [tok, None, None]
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
tokNum += 1
if tokNum < len(self.tokens):
# maybe '.'
if type(self.tokens[tokNum]) == type('str'):
aTriplet[1] = self.tokens[tokNum]
elif type(self.tokens[tokNum]) == type(1):
if type(aTriplet[0]) == type(1):
if self.tokens[tokNum] > aTriplet[0]:
aTriplet[1] = self.tokens[tokNum]
else:
gm.append(
"%s '%s' definition" % (self.className, self.name))
gm.append(
"If a range is defined by two numbers,")
# gm.append("(as it appears to be -- %s %s %s)" % (
# aTriplet[0], aTriplet[1],
# aTriplet[2]))
gm.append(
"the second number of a range must be bigger than")
gm.append("the first.")
raise P4Error(gm)
else:
aTriplet[1] = self.tokens[tokNum]
else:
raise P4Error(gm)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '\\':
tokNum += 1
if tokNum < len(self.tokens):
if type(self.tokens[tokNum]) == type(1):
aTriplet[2] = self.tokens[tokNum]
else:
gm.append(
"%s '%s' definition" % (self.className, self.name))
gm.append(
"Step value of a range must be a number")
gm.append("(Got '%s')" %
self.tokens[tokNum])
raise P4Error(gm)
tokNum += 1
self.triplets.append(aTriplet)
# print "xxy self.mask = %s" % self.mask
if not self.triplets and not self.mask:
if not var.allowEmptyCharSetsAndTaxSets:
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Got no definition (no triplets or mask)")
gm.append("(Allow this by turning var.allowEmptyCharSetsAndTaxSets on)")
raise P4Error(gm)
if 0:
print gm[0]
print " Got self.triplets %s" % self.triplets
def setMask(self):
"""Set self.mask."""
gm = ["%s.setMask() name='%s'" % (self.className, self.name)]
if self.format == 'vector':
if self.mask:
pass
else:
gm.append("vector format, but no mask?")
raise P4Error(gm)
elif self.format == 'standard':
if 0:
print gm[0]
self.dump()
if not len(self.triplets):
if not var.allowEmptyCharSetsAndTaxSets:
gm.append(
"standard format, but we have no triplets? - no definition?")
gm.append("(Allow this by turning var.allowEmptyCharSetsAndTaxSets on.)")
raise P4Error(gm)
if self.className == 'CharSet':
thisMaskLen = self.aligNChar
existingSetNames = self.nexusSets.charSetLowNames
existingSets = self.nexusSets.charSets
theTriplets = self.triplets
elif self.className == 'TaxSet':
thisMaskLen = self.nexusSets.nTax
existingSetNames = self.nexusSets.taxSetLowNames
existingSets = self.nexusSets.taxSets
theTriplets = self.numberTriplets
mask = array.array('c', thisMaskLen * '0')
for aTriplet in theTriplets:
if 0:
print gm[0]
print " '%s' aTriplet=%s" % (self.name, aTriplet)
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
lowFirst = None
lowSecond = None
if type(first) == type('str'):
lowFirst = string.lower(first)
if type(second) == type('str'):
lowSecond = string.lower(second)
# its a single, or an existing set, not a range
if first and not second:
if lowFirst:
if lowFirst == 'all':
for i in range(thisMaskLen):
mask[i] = '1'
if lowFirst in existingSetNames:
for aSet in existingSets:
if lowFirst == aSet.lowName:
if not aSet.mask:
aSet.setMask()
for j in range(thisMaskLen):
if aSet.mask[j] == '1':
mask[j] = '1'
# Maybe its a predefined charset --- constant or gapped
elif self.className == 'CharSet' and lowFirst in self.nexusSets.predefinedCharSetLowNames:
aSet = None
if lowFirst == 'constant':
aSet = self.nexusSets.constant
elif lowFirst == 'gapped':
aSet = self.nexusSets.gapped
assert aSet
for j in range(thisMaskLen):
if aSet.mask[j] == '1':
mask[j] = '1'
else:
gm.append("I don't know '%s'" % first)
raise P4Error(gm)
elif first == '.':
mask[-1] = '1'
elif type(first) == type(1):
if first > 0 and first <= thisMaskLen:
mask[first - 1] = '1'
else:
# This will have been checked before.
gm.append(
"Component '%s' is out of range of mask len (%s)" % (first, thisMask))
raise P4Error(gm)
elif first and second:
# Its a range.
start = int(first)
if second == '.':
fin = len(mask)
else:
fin = int(second)
if third:
bystep = int(third)
# print "mask len %i, start-1 %i, fin %i, bystep %i" %
# (len(mask), (start-1), fin, bystep)
for spot in range(start - 1, fin, bystep):
mask[spot] = '1'
else:
for spot in range(start - 1, fin):
mask[spot] = '1'
# print " finished incorporating triplet %s into
# '%s' mask." % (aTriplet, self.name)
mask = mask.tostring()
# print "Got char set '%s' mask '%s'" % (self.name, mask)
self.mask = mask
def invertMask(self):
"""Change zeros to ones, and non-zeros to zero."""
gm = ['%s.invertMask()' % self.className]
if not self.mask:
self.dump()
gm.append("The charset has no mask")
raise P4Error(gm)
self.mask = list(self.mask)
for i in range(len(self.mask)):
if self.mask[i] == '0':
self.mask[i] = '1'
else:
self.mask[i] = '0'
self.mask = string.join(self.mask, '')
def write(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexus(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexusToOpenFile(self, flob):
if self.className == 'CharSet':
theSetName = 'charSet'
else:
theSetName = 'taxSet'
if self.format == 'standard':
flob.write(' %s %s =' % (theSetName, self.name))
if self.useTaxNames:
for tN in self.taxNames:
flob.write(" %s" % func.nexusFixNameIfQuotesAreNeeded(tN))
else:
# for i in self.tokens:
# flob.write(' %s' % i)
previousTok = None
for theTok in self.tokens:
if type(theTok) == types.StringType:
if theTok not in ['-', '\\']:
tok = func.nexusFixNameIfQuotesAreNeeded(theTok)
else:
tok = theTok
else:
tok = theTok
if previousTok != None:
# tokens will be either ints or strings
previousType = type(previousTok)
# print "previousTok = %s, previousType = %s" %
# (previousTok, previousType)
# usually put in a space
if type(tok) == previousType:
# except in this case
if tok in ['-'] or previousTok in ['-']:
flob.write('%s' % tok)
else:
flob.write(' %s' % tok)
else: # usually no space
if tok in ['-'] or previousTok in ['-']:
flob.write('%s' % tok)
else: # except in this case
flob.write(' %s' % tok)
previousTok = tok
# print "previousTok = %s, previousType = %s" %
# (previousTok, previousType)
else:
flob.write(' %s' % tok)
previousTok = tok
flob.write(';\n')
elif self.format == 'vector':
flob.write(' %s %s (vector) = ' % (theSetName, self.name))
flob.write('%s;\n' % self.mask)
def vectorize(self):
if self.format == 'vector':
return
if not self.mask:
self.setMask()
#self.triplets = []
#self.tokens = []
self.format = 'vector'
def standardize(self):
if self.format == 'standard':
return
self.triplets = []
self.tokens = []
thisTriplet = []
for mPos in range(len(self.mask)):
# print "mPos=%i mask=%s thisTriplet=%s" % (mPos,
# self.mask[mPos], thisTriplet)
if self.mask[mPos] == '0':
if thisTriplet:
if thisTriplet[0] == mPos:
thisTriplet.append(None)
thisTriplet.append(None)
else:
thisTriplet.append(mPos)
thisTriplet.append(None)
# print " finished triplet -- %s" % thisTriplet
self.triplets.append(thisTriplet)
thisTriplet = []
else:
if thisTriplet:
pass
else:
thisTriplet.append(mPos + 1)
# print " started triplet -- %s" % thisTriplet
if thisTriplet:
if thisTriplet[0] == len(self.mask):
thisTriplet.append(None)
thisTriplet.append(None)
else:
thisTriplet.append(mPos + 1)
thisTriplet.append(None)
# print " finished last triplet -- %s" % thisTriplet
self.triplets.append(thisTriplet)
# print self.triplets
for triplet in self.triplets:
if triplet[1] == None:
self.tokens.append(triplet[0])
else:
self.tokens.append(triplet[0])
self.tokens.append('-')
self.tokens.append(triplet[1])
self.format = 'standard'
# self.dump()
class CharSet(TaxOrCharSet):
def __init__(self, theNexusSets):
TaxOrCharSet.__init__(self, theNexusSets)
self.className = 'CharSet'
self.aligNChar = None
def getNChar(self):
self.setMask()
return self.mask.count('1')
def setAligNChar(self, aligNChar):
gm = ['CharSet.setAligNChar()']
# print "CharSet name=%s, format=%s, aligNChar=%i" % (self.name,
# self.format, aligNChar)
self.aligNChar = aligNChar
if self.format == 'standard':
for aTriplet in self.triplets:
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
if first and not second: # its a single
if type(first) == type(1):
if first > 0 and first <= self.aligNChar:
pass
else:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Charset definition element '%s' is out of range" % first)
gm.append("(aligNChar = %i)" % self.aligNChar)
raise P4Error(gm)
pass
elif first and second: # its a range
try:
start = int(first)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % first)
raise P4Error(gm)
if second == '.':
fin = self.aligNChar
else:
try:
fin = int(second)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % second)
raise P4Error(gm)
if third:
try:
bystep = int(third)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % third)
raise P4Error(gm)
elif self.format == 'vector':
# print "charset %s, vector format %s, mask %s" % (self.name,
# self.format, self.mask)
if self.mask:
if len(self.mask) == self.aligNChar:
pass
else:
gm.append("len(self.mask) is %i, but aligNChar is %i" % (
len(self.mask), self.aligNChar))
raise P4Error(gm)
else:
gm.append("bad format %s" % self.format)
raise P4Error(gm)
class TaxSet(TaxOrCharSet):
def __init__(self, theNexusSets):
TaxOrCharSet.__init__(self, theNexusSets)
self.className = 'TaxSet'
self.numberTriplets = []
def setNumberTriplets(self):
gm = ['TaxSet.setNumberTriplets()']
if not self.nexusSets.lowTaxNames:
self.nexusSets.lowTaxNames = [
string.lower(txName) for txName in self.nexusSets.taxNames]
self.numberTriplets = []
# print "self.triplets = %s" % self.triplets
for tr in self.triplets:
# print "setNumberTriplets() tr=%s" % tr
numTr = []
for itemNum in range(2):
trItem = tr[itemNum]
# print " considering '%s'" % trItem
if trItem == None:
numTr.append(trItem)
elif type(trItem) == type(1):
numTr.append(trItem)
elif trItem == '.':
numTr.append(self.nexusSets.nTax)
else:
assert type(trItem) == type('str')
lowTrItem = string.lower(trItem)
if lowTrItem in self.nexusSets.taxSetLowNames:
numTr.append(trItem)
else:
if lowTrItem not in self.nexusSets.lowTaxNames:
gm.append("Triplet %s" % tr)
gm.append(
"'%s' is a string, but not in the taxNames." % trItem)
raise P4Error(gm)
theIndx = self.nexusSets.lowTaxNames.index(lowTrItem)
theIndx += 1
numTr.append(theIndx)
trItem = tr[2]
if trItem == None:
numTr.append(None)
else:
assert type(trItem) == type(1)
numTr.append(trItem)
assert len(numTr) == 3
# print numTr
first = numTr[0]
# first might be a pre-existing taxSet name
if type(first) == type('str'):
pass
else:
second = numTr[1]
assert type(first) == type(1) and first != 0
if type(second) == type(1):
assert second != 0
if second <= first:
gm.append("Triplet %s" % tr)
gm.append("Triplet expressed as numbers. %s" % numTr)
gm.append(
"This appears to be a range, but the second number")
gm.append("is not bigger than the first.")
raise P4Error(gm)
assert second <= self.nexusSets.nTax
assert first <= self.nexusSets.nTax
self.numberTriplets.append(numTr)
def setUseTaxNames(self):
if self.useTaxNames:
return
# if not self.mask:
# self.setMask()
if not self.taxNames:
for pos in range(len(self.mask)):
c = self.mask[pos]
if c == '1':
self.taxNames.append(self.nexusSets.taxNames[pos])
self.useTaxNames = True
class CharPartitionSubset(object):
def __init__(self):
self.name = None
self.lowName = None
self.tokens = []
self.mask = None
self.triplets = []
def dump(self):
print " -- CharPartitionSubset"
print " name: %s" % func.nexusFixNameIfQuotesAreNeeded(self.name)
print " triplets: "
for t in self.triplets:
print " %s" % t
print " tokens: %s" % self.tokens
# for t in self.tokens:
# print " %s" % t
print " mask: %s" % self.mask
def writeNexusToOpenFile(self, flob):
flob.write('%s:' % self.name)
# print self.tokens
# for i in self.tokens:
# flob.write(' %s' % i)
previousTok = None
for i in self.tokens:
if previousTok != None:
# tokens will be either ints or strings
previousType = type(previousTok)
# print "previousTok = %s, previousType = %s" % (previousTok,
# previousType)
if type(i) == previousType: # put in a space
flob.write(' %s' % i)
else: # no space
flob.write('%s' % i)
previousTok = i
else:
flob.write(' %s' % i)
previousTok = i
class CharPartition(object):
def __init__(self, theNexusSets):
self.nexusSets = theNexusSets
self.name = None
self.lowName = None
self.tokens = []
self.subsets = []
def _readCharPartitionDefinition(self, flob):
gm = ['CharPartition._readCharPartitionDefinition()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok != '=':
if lowTok == '(':
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok != ')':
if lowTok in ['notokens', 'vector']:
gm.append("Got charpartition modifier: '%s'" % tok)
gm.append("It is not implemented.")
gm.append(
"Only 'tokens' and 'standard' are implemented.")
raise P4Error(gm)
elif lowTok in ['tokens', 'standard']:
pass
else:
gm.append("Got charpartition modifier: '%s'" % tok)
gm.append("This is not understood.")
gm.append(
"(Only 'tokens' and 'standard' are implemented.)")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
else:
gm.append("Got unexpected token: '%s'" % tok)
gm.append(
"I was expecting either an '=' or something in parentheses.")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok not in [None, ';', 'end', 'endblock']:
self.tokens.append(tok)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
# print "_readCharPartitionDefinition: tokens %s" % self.tokens
# Divide into CharPartitionSubset instances
i = 0
while i < len(self.tokens):
aSubset = CharPartitionSubset()
aSubset.name = self.tokens[i]
if not func.nexusCheckName(aSubset.name):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Bad subset name (%s, I think)" % aSubset.name)
raise P4Error(gm)
aSubset.lowName = string.lower(aSubset.name)
i += 1
if i >= len(self.tokens):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) should be followed by a colon" % aSubset.name)
raise P4Error(gm)
if self.tokens[i] != ':':
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) should be followed by a colon" % aSubset.name)
raise P4Error(gm)
i += 1
if i >= len(self.tokens):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) and colon should be followed" % aSubset.name)
gm.append(
"by a subset definition (charSet or charSet definition)")
raise P4Error(gm)
while i < len(self.tokens) and self.tokens[i] != ',':
aSubset.tokens.append(self.tokens[i])
i += 1
i += 1
self.subsets.append(aSubset)
# do a once-over sanity check,
# check for duplicated names
# and convert integer strings to ints
existingPartNames = []
for aSubset in self.subsets:
# print "Checking charPartitionPart '%s'" % aSubset.name
# print " existingPartNames '%s'" % existingPartNames
if aSubset.lowName in existingPartNames:
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Duplicated subset name (%s, I think)" %
aSubset.name)
raise P4Error(gm)
existingPartNames.append(aSubset.lowName)
for i in range(len(aSubset.tokens)):
tok = aSubset.tokens[i]
lowTok = string.lower(tok)
# print "considering '%s', ord(lowTok[0])=%i" % (lowTok,
# ord(lowTok[0]))
# Does not pick up '.'!!!!
if lowTok in ['.', 'all', '-', '\\', 'remainder']:
pass
elif lowTok in self.nexusSets.charSetLowNames:
pass
elif lowTok in self.nexusSets.predefinedCharSetLowNames:
pass
else:
# print " lowTok=%s, ord(lowTok[0])=%s, ord('.')=%s" % (
# lowTok, ord(lowTok[0]), ord('.'))
try:
intTok = int(tok)
aSubset.tokens[i] = intTok
except ValueError:
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Can't understand '%s' in subset '%s' definition" %
(tok, aSubset.name))
gm.append(
"(If you are using read('whatever'), and there are backslashes,")
gm.append(
"are you using raw strings, ie read(r'whatever')?)")
raise P4Error(gm)
def setSubsetMasks(self):
"""Make charParititionSubset.mask's appropriate to the Alignment.
This is called by theAlignment.setCharPartition().
"""
gm = ['CharPartition.setSubsetMasks()']
assert self.nexusSets.aligNChar
# Make a list of triplets representing eg 23-87\3
# first item = 23, second item = 87, third = 3
# Not all will exist for each part of the char definition.
for aSubset in self.subsets:
i = 0
aSubset.triplets = []
while i < len(aSubset.tokens):
tok = aSubset.tokens[i]
if type(tok) == type('string'):
lowTok = string.lower(tok)
else:
lowTok = None
# print "Doing triplets: looking at tok '%s'" % tok
if lowTok and lowTok in self.nexusSets.charSetLowNames or \
lowTok in self.nexusSets.predefinedCharSetLowNames:
aTriplet = [lowTok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"An existing char set may not be followed by a '-'")
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"An existing char set may not be followed by a '\\'")
raise P4Error(gm)
elif lowTok in ['all', 'remainder']:
aTriplet = [lowTok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if lowTok == 'remainder' and i < len(aSubset.tokens):
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set 'remainder' must be the last one in the charPartition definition")
raise P4Error(gm)
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '%s' may not be followed by a '-'" % lowTok)
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '%s' may not be followed by a '\\'" % lowTok)
raise P4Error(gm)
elif tok == '-':
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("Out of place '-'")
raise P4Error(gm)
elif tok == '\\':
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("Out of place '\\'")
raise P4Error(gm)
elif tok == '.':
aTriplet = [tok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '.' may not be followed by a '-'")
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '.' may not be followed by a '\\'")
raise P4Error(gm)
elif type(tok) == type(1):
aTriplet = [tok, None, None]
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '.':
aTriplet[1] = aSubset.tokens[i]
elif type(aSubset.tokens[i]) == type(1):
if aSubset.tokens[i] > aTriplet[0]:
aTriplet[1] = aSubset.tokens[i]
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Second number of a character range must be bigger than")
gm.append("the first.")
raise P4Error(gm)
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Second item of a character range must be either a")
gm.append(
"number or a '.'. I got '%s'" % aSubset.tokens[i])
raise P4Error(gm)
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '\\':
i = i + 1
if i < len(aSubset.tokens):
if type(aSubset.tokens[i]) == type(1):
aTriplet[2] = aSubset.tokens[i]
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Step value of a range must be a number")
gm.append(
"(Got '%s')" % aSubset.tokens[i])
raise P4Error(gm)
i = i + 1
aSubset.triplets.append(aTriplet)
else:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("token '%s' is not understood." % tok)
raise P4Error(gm)
if 0:
print gm[0]
print "Got aSubset (%s) triplets %s" % (aSubset.name, aSubset.triplets)
# sys.exit()
aSubset.mask = array.array('c', self.nexusSets.aligNChar * '0')
for aTriplet in aSubset.triplets:
# print "setSubsetMasks() Looking at triplet '%s'" % aTriplet
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
lowFirst = None
lowSecond = None
if type(first) == type('str'):
lowFirst = string.lower(first)
if type(second) == type('str'):
lowSecond = string.lower(second)
if first and not second: # its a single
# print "Got single: %s" % first
if lowFirst == 'all':
for i in range(self.nexusSets.aligNChar):
aSubset.mask[i] = '1'
elif lowFirst in self.nexusSets.predefinedCharSetLowNames:
theCS = None
if lowFirst == 'constant':
theCS = self.nexusSets.constant
elif lowFirst == 'gapped':
theCS = self.nexusSets.gapped
assert theCS
assert theCS.mask
for j in range(self.nexusSets.aligNChar):
if theCS.mask[j] == '1':
aSubset.mask[j] = '1'
elif lowFirst in self.nexusSets.charSetLowNames:
theCS = None
for cs in self.nexusSets.charSets:
if lowFirst == cs.lowName:
theCS = cs
break
assert theCS
assert theCS.mask
for j in range(self.nexusSets.aligNChar):
if theCS.mask[j] == '1':
aSubset.mask[j] = '1'
# Its legit to use this as a single char.
elif first == '.':
aSubset.mask[-1] = '1'
elif type(first) == type(1):
if first > 0 and first <= self.nexusSets.aligNChar:
aSubset.mask[first - 1] = '1'
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Charset definition element '%s' is out of range" % first)
gm.append("(aligNChar = %i)" %
self.nexusSets.aligNChar)
raise P4Error(gm)
elif lowFirst == 'remainder':
# print "Got first == remainder"
for i in range(self.nexusSets.aligNChar):
aSubset.mask[i] = '1'
# print "Got new aSubset.mask = %s" % aSubset.mask
for ss in self.subsets[:-1]:
if ss.mask:
# print "Previous mask: %s" % ss.mask
for j in range(self.nexusSets.aligNChar):
if ss.mask[j] == '1':
aSubset.mask[j] = '0'
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" %
aSubset.name)
gm.append(
"When implementing 'remainder' charset")
gm.append(
"Found that subset '%s' had no mask" % ss)
raise P4Error(gm)
else:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Charset definition element '%s' is not understood" % first)
raise P4Error(gm)
elif first and second: # its a range
try:
start = int(first)
except ValueError:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % first)
raise P4Error(gm)
if second == '.':
fin = len(aSubset.mask)
else:
try:
fin = int(second)
except ValueError:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % second)
raise P4Error(gm)
if third:
try:
bystep = int(third)
except ValueError:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % third)
for spot in range(start - 1, fin, bystep):
aSubset.mask[spot] = '1'
else:
for spot in range(start - 1, fin):
aSubset.mask[spot] = '1'
aSubset.mask = aSubset.mask.tostring()
# print "Got char subset '%s' mask '%s'" % (aSubset.name,
# aSubset.mask)
if aSubset.mask.count('1') == 0:
gm.append(
"The mask for charPartitionSubset '%s' is empty." % aSubset.name)
raise P4Error(gm)
def checkForOverlaps(self):
gm = ['CharParitition.checkForOverlaps()']
unspanned = 0
for i in range(self.nexusSets.aligNChar):
sum = 0
for aSubset in self.subsets:
if aSubset.mask[i] == '1':
sum += 1
if sum > 1:
gm.append("Char partition '%s'" % self.name)
gm.append(
"The problem is that there are overlapping subsets in this")
gm.append(
"charpartition. The same position is in more than one subset.")
gm.append(
"Zero-based position %i, one-based position %i." % (i, i + 1))
raise P4Error(gm)
if sum < 1:
unspanned = 1
if unspanned:
gm.append("Char partition '%s'" % self.name)
gm.append("You should be aware that this partition does not span")
gm.append("the entire sequence. Hopefully that is intentional.")
def dump(self):
print " CharPartition: name: %s" % func.nexusFixNameIfQuotesAreNeeded(self.name)
# string.join(self.tokens)
print " tokens: %s" % self.tokens
# for t in self.tokens:
# print " %s" % t
print " number of subsets: %s" % len(self.subsets)
for aSubset in self.subsets:
aSubset.dump()
def writeNexusToOpenFile(self, flob):
flob.write(' charPartition %s = ' % self.name)
# print " [ %s subsets ] " % len(self.subsets)
for aSubset in self.subsets[:-1]:
aSubset.writeNexusToOpenFile(flob)
flob.write(', ')
self.subsets[-1].writeNexusToOpenFile(flob)
flob.write(';\n')
def mask(self):
if not self.nexusSets.aligNChar:
self.nexusSets.aligNChar = self.theNexusSets.aligNChar
self.setSubsetMasks()
import array
m = array.array('c', self.nexusSets.aligNChar * '0')
for i in range(self.nexusSets.aligNChar):
for aSubset in self.subsets:
if aSubset.mask[i] == '1':
m[i] = '1'
return m.tostring()
| gpl-2.0 | 7,514,382,857,311,807,000 | 41.579559 | 123 | 0.453856 | false | 4.457079 | false | false | false |
AndrewGYork/tools | zaber.py | 1 | 8750 | import time
import serial
class Stage:
"""Zaber stage(s), attached through the (USB?) serial port."""
def __init__(
self,
port_name, # For example, 'COM3' on Windows
timeout=1,
verbose=True,
very_verbose=False):
"""port_name: which serial port the stage is connected to, e.g. 'COM3'
"""
self.verbose = verbose
self.very_verbose = very_verbose
try:
self.serial = serial.Serial(
port=port_name,
baudrate=9600,
bytesize=8,
parity='N',
stopbits=1,
timeout=timeout)
except serial.serialutil.SerialException:
print('Failed to open serial port for Zaber stage(s).')
print('Sometimes Windows is weird about this!')
print('Consider trying again.')
raise
if self.verbose: print("Renumbering stages:")
self.devices = self.renumber_all_devices()
self.pending_moves = [False for d in self.devices]
if self.verbose:
for d in self.devices:
print(' Axis:', d)
print(' Done renumbering.')
self.restore_settings()
self.default_speed = min([r['speed'] for r in self.get_target_speed()])
if verbose: print(" Default stage speed:", self.default_speed)
self.move_home()
def send(self, instruction):
"""Send an instruction to the Zaber stage.
'instruction' must be a list of 6 integers, 0-255 (no error
checking).
See: http://www.zaber.com/wiki/Manuals/Binary_Protocol_Manual
for a list of instructions.
"""
assert len(instruction) == 6
if self.very_verbose: print("Sending to stage:", instruction)
serial_cmd = bytes(instruction) # 0 <= int(i) < 256 for i in instruction
self.serial.write(serial_cmd)
return None
def receive(self, expected_command_ID=None):
"""Return 6 bytes from the serial port
There must be 6 bytes to receive (no error checking).
"""
response = self.serial.read(6)
if len(response) != 6:
raise UserWarning(
"Zaber stage failed to respond. Is the timeout too short?\n" +
"Is the stage plugged in?")
response = {'device_number': response[0],
'command_ID': response[1],
'data': four_bytes_to_uint(response[2:6])}
if expected_command_ID is not None:
assert response['command_ID'] == expected_command_ID
if self.very_verbose:
print("Response from stage:\n", response)
return response
def get_position(self, axis='all'):
if axis == 'all':
axis = 0
num_responses = len(self.devices)
else:
num_responses = 1
assert axis in range(len(self.devices) + 1)
self.send([axis, 60, 0, 0, 0, 0])
responses = []
for i in range(num_responses):
responses.append(self.receive(expected_command_ID=60))
axis_positions = {}
for r in responses:
axis_positions[r['device_number']] = r['data']
return axis_positions
def move(self, distance, movetype='absolute', response=True, axis='all'):
distance = int(distance)
if self.verbose:
print("Moving axis: ", repr(axis),
" distance ", distance, " (", movetype, ")", sep='')
if axis == 'all':
axis = 0
assert self.pending_moves == [False for d in self.devices]
else:
assert axis in [d['device_number'] for d in self.devices]
assert self.pending_moves[(axis - 1)] == False
if movetype == 'absolute':
instruction = [axis, 20]
elif movetype == 'relative':
instruction = [axis, 21]
else:
raise UserWarning("Move type must be 'relative' or 'absolute'")
# Data conversion and transfer:
instruction.extend(uint_to_four_bytes(distance))
self.send(instruction)
if axis == 0:
self.pending_moves = [True for d in self.devices]
else:
self.pending_moves[axis - 1] = True
if response:
return self.finish_moving()
return None
def finish_moving(self):
response = []
for i in range(len(self.devices)):
if self.pending_moves[i]:
response.append(self.receive())
assert response[-1]['command_ID'] in (1, 20, 21)
self.pending_moves = [False for d in self.devices]
assert self.serial.inWaiting() == 0
return response
def move_home(self, response=True):
if self.verbose: print("Moving stage(s) near home...")
self.move(100)
if self.verbose: print("Moving stage(s) home.")
assert self.pending_moves == [False for d in self.devices]
self.send([0, 1, 0, 0, 0, 0])
self.pending_moves = [True for d in self.devices]
if response:
return self.finish_moving()
return None
def restore_settings(self):
if self.verbose: print("Restoring stage(s) to default settings.")
assert self.pending_moves == [False for d in self.devices]
assert self.serial.inWaiting() == 0
self.send([0, 36, 0, 0, 0, 0]) # Restore to default settings
for d in self.devices:
self.receive(expected_command_ID=36)
self.send([0, 116, 1, 0, 0, 0]) # Disable manual move tracking
for d in self.devices:
self.receive(expected_command_ID=116)
assert self.serial.inWaiting() == 0
return None
def renumber_all_devices(self):
self.serial.flushInput()
self.serial.flushOutput()
self.send([0, 2, 0, 0, 0, 0])
# We can't predict the length of the response, since we don't
# yet know how many stages there are. Just wait a healthy amount
# of time for the answer:
time.sleep(.8) # Seems to take a little over 0.5 seconds.
bytes_waiting = self.serial.inWaiting()
assert bytes_waiting % 6 == 0 # Each stage responds with 6 bytes.
num_stages = int(bytes_waiting / 6)
stages = []
for n in range(num_stages):
r = self.receive()
assert (r['device_number'] - 1) in range(num_stages)
assert r.pop('command_ID') == 2
r['device_ID'] = r.pop('data')
assert r['device_ID'] in (# List of devices we've tested; add liberally.
20053,
)
stages.append(r)
assert self.serial.inWaiting() == 0
return stages
def set_target_speed(self, speed, response=True):
min_speed = int(self.default_speed * 0.01)
max_speed = int(2*self.default_speed)
speed = int(speed)
assert min_speed <= speed < max_speed
if self.verbose: print("Setting stage speed to", speed)
inst = [0, 42]
inst.extend(uint_to_four_bytes(speed))
self.send(inst)
if response:
reply = [self.receive(expected_command_ID=42)
for d in self.devices]
return reply
def get_target_speed(self):
inst = [0, 53, 42, 0, 0, 0]
self.send(inst)
reply = []
for d in self.devices:
reply.append(self.receive())
assert reply[-1].pop('command_ID') == 42
reply[-1]['speed'] = reply[-1].pop('data')
return reply
def close(self):
self.move_home()
self.serial.close()
def four_bytes_to_uint(x):
assert len(x) == 4
return int.from_bytes(x, byteorder='little')
def uint_to_four_bytes(x):
assert 0 <= x < 4294967296
return [x >> i & 0xff for i in (0, 8, 16, 24)]
if __name__ == '__main__':
my_stage = Stage(port_name='COM3', verbose=True, very_verbose=False)
try:
my_stage.move(0, movetype='absolute', axis='all')
for i in range(len(my_stage.devices)):
my_stage.move(70000, movetype='absolute', axis=i+1)
print("Stage postion:", my_stage.get_position())
my_stage.move(0, movetype='absolute', axis=i+1)
print("Stage postion:", my_stage.get_position())
my_stage.set_target_speed(my_stage.default_speed * 1.3)
my_stage.move(70000, movetype='absolute', axis='all')
print("Stage postion:", my_stage.get_position())
my_stage.move(0, movetype='absolute', axis='all')
print("Stage postion:", my_stage.get_position())
my_stage.set_target_speed(my_stage.default_speed)
finally:
my_stage.close()
| gpl-2.0 | 8,001,192,624,062,251,000 | 37.377193 | 84 | 0.56 | false | 3.888889 | false | false | false |
jfunez/scielo-manager | scielomanager/journalmanager/tests/modelfactories.py | 1 | 7014 | # coding: utf-8
import factory
import datetime
from journalmanager import models
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
front = {
'default-language': 'en',
'title-group': {
'en': u'Article Title',
'pt': u'Título do Artigo',
}
}
xml_url = 'http://xml.url/'
pdf_url = 'http://pdf.url/'
images_url = 'http://img.url/'
class UserFactory(factory.Factory):
FACTORY_FOR = models.User
@classmethod
def _setup_next_sequence(cls):
try:
return cls._associated_class.objects.values_list(
'id', flat=True).order_by('-id')[0] + 1
except IndexError:
return 0
username = factory.Sequence(lambda n: "username%s" % n)
first_name = factory.Sequence(lambda n: "first_name%s" % n)
last_name = factory.Sequence(lambda n: "last_name%s" % n)
email = factory.Sequence(lambda n: "email%[email protected]" % n)
password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime.datetime(2000, 1, 1)
date_joined = datetime.datetime(1999, 1, 1)
class SubjectCategoryFactory(factory.Factory):
FACTORY_FOR = models.SubjectCategory
term = 'Acoustics'
class StudyAreaFactory(factory.Factory):
FACTORY_FOR = models.StudyArea
study_area = 'Health Sciences'
class SponsorFactory(factory.Factory):
FACTORY_FOR = models.Sponsor
name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
email = '[email protected]'
complement = ''
class UseLicenseFactory(factory.Factory):
FACTORY_FOR = models.UseLicense
license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)
reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'
disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'[email protected]'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA""".strip()
pub_status = u'current'
pub_status_reason = u'Motivo da mudança é...'
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
editor_email = u'[email protected]'
creator = factory.SubFactory(UserFactory)
pub_status_changed_by = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
collection = factory.SubFactory(CollectionFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email = factory.Sequence(lambda n: 'email%[email protected]' % n)
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class DataChangeEventFactory(factory.Factory):
FACTORY_FOR = models.DataChangeEvent
user = factory.SubFactory(UserFactory)
content_object = factory.SubFactory(JournalFactory)
collection = factory.SubFactory(CollectionFactory)
event_type = 'added'
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
| bsd-2-clause | 7,171,025,614,157,088,000 | 29.806167 | 429 | 0.680967 | false | 3.185877 | false | false | false |
daGrevis/squirrel | ware.py | 1 | 2640 | import inspect
class MiddlewareDuplicationError(Exception):
def __init__(self, middleware_name, middleware_names):
message = ("Middleware `{}` was already found in `{}` middlewares!"
.format(middleware_name, middleware_names))
super().__init__(message)
class MiddlewareMissingError(Exception):
def __init__(self, middleware_name, middleware_names):
message = ("Middleware `{}` wasn't found between `{}` middlewares!"
.format(middleware_name, middleware_names))
super().__init__(message)
class MiddlewareOrderError(Exception):
def __init__(self, middleware_name,
names_for_before_middlewares, names_for_after_middlewares):
message = ("Middleware `{}` can't be added before `{}` middlewares"
" and after `{}` middlewares!"
.format(middleware_name,
names_for_before_middlewares,
names_for_after_middlewares))
super().__init__(message)
class MiddlewareArgumentsError(Exception):
def __init__(self, middleware_name):
message = ("Middleware `{}` has wrong count of arguments!"
.format(middleware_name))
super().__init__(message)
class Ware(object):
def __init__(self, middlewares=[]):
self.middlewares = []
def get_names_for_middlewares(self):
return [name for name, _ in self.middlewares]
def add(self, middleware_name, middleware_callable):
if len((inspect.getfullargspec(middleware_callable)).args) != 1:
raise MiddlewareArgumentsError(middleware_name)
names_for_middlewares = self.get_names_for_middlewares()
if middleware_name in names_for_middlewares:
raise MiddlewareDuplicationError(middleware_name,
names_for_middlewares)
(self.middlewares).append((middleware_name, middleware_callable, ))
def remove(self, middleware_name):
names_for_middlewares = self.get_names_for_middlewares()
if middleware_name not in names_for_middlewares:
raise MiddlewareMissingError(middleware_name,
names_for_middlewares)
for i, (name, _) in enumerate(self.middlewares):
if name == middleware_name:
(self.middlewares).pop(i)
break
def run(self, initial_context={}):
context = initial_context
for _, middleware_callable in self.middlewares:
context = middleware_callable(context)
return context
| mit | -4,876,543,742,609,040,000 | 33.285714 | 76 | 0.6 | false | 4.306688 | false | false | false |
ejekt/rigging-system | Modules/System/groupSelected.py | 1 | 9019 | import maya.cmds as mc
from functools import partial
import os
import System.utils as utils
class GroupSelected:
def __init__(self):
self.objectsToGroup = []
def showUI(self):
# build the grouping GUI
self.findSelectionToGroup()
if len(self.objectsToGroup) == 0:
return
self.dUiElements = {}
if mc.window('groupSelected_UI_window', exists=True):
mc.deleteUI('groupSelected_UI_window')
windowWidth = 300
windowHeight = 150
self.dUiElements['window'] = mc.window('groupSelected_UI_window',
w=windowWidth,
h=windowHeight,
t='Blueprint UI',
sizeable=False,)
self.dUiElements['topLevelColumn'] = mc.columnLayout(adj=True, columnAlign='center', rs=3)
self.dUiElements['groupName_rowColumn'] = mc.rowColumnLayout(nc=2, columnAttach=[1,'right',0], columnWidth=[(1,80), (2,windowWidth-90)])
mc.text(label='Group Name :')
self.dUiElements['groupName'] = mc.textField(text='group')
mc.setParent(self.dUiElements['topLevelColumn'])
self.dUiElements['createAt_rowColumn'] = mc.rowColumnLayout(nc=3, columnAttach=(1,'right',0), columnWidth=[(1,80),(2,windowWidth-170),(3,80)])
# row 1
mc.text(label='Position at :')
mc.text(label='')
mc.text(label='')
# row 2
mc.text(label='')
self.dUiElements['createAtBtn_lastSelected'] = mc.button(l='Last Selected', c=self.createAtLastSelected)
mc.text(label='')
# row 3
mc.text(label='')
self.dUiElements['createAveragePosBtn_lastSelected'] = mc.button(l='Average Position', c=self.createAtAveragePosition)
mc.text(label='')
mc.setParent(self.dUiElements['topLevelColumn'])
mc.separator()
# final row of buttons
columnWidth = (windowWidth/2) - 5
self.dUiElements['buttonRowLayout'] = mc.rowLayout(nc=2,
columnAttach=[(1,'both',10),(2,'both',10)],
columnWidth=[(1,columnWidth),(2,columnWidth)],
columnAlign=[(1,'center'),(2,'center')])
self.dUiElements['acceptBtn'] = mc.button(l='Accept', c=self.acceptWindow)
self.dUiElements['cancelBtn'] = mc.button(l='Cancel', c=self.cancelWindow)
mc.showWindow(self.dUiElements['window'])
self.createTempGroupRepresentation()
self.createAtLastSelected()
mc.select(self.tempGrpTransform, r=True)
mc.setToolTo('moveSuperContext')
def findSelectionToGroup(self):
# filters selection to only contain module transform controls
selectedObjects = mc.ls(sl=True, transforms=True)
self.objectsToGroup = []
for obj in selectedObjects:
valid = False
if obj.find('module_transform') != -1:
splitString = obj.rsplit('module_transform')
if splitString[1] == '':
valid = True
if valid == False and obj.find('Group__') == 0:
valid = True
if valid == True:
self.objectsToGroup.append(obj)
def createTempGroupRepresentation(self):
controlGrpFile = os.environ['RIGGING_TOOL_ROOT'] + '/ControlObjects/Blueprint/controlGroup_control.ma'
mc.file(controlGrpFile, i=True)
self.tempGrpTransform = mc.rename('controlGroup_control', 'Group__tempGroupTransform__')
mc.connectAttr(self.tempGrpTransform+'.sy', self.tempGrpTransform+'.sx')
mc.connectAttr(self.tempGrpTransform+'.sy', self.tempGrpTransform+'.sz')
for attr in ['sx','sz','v']:
mc.setAttr(self.tempGrpTransform+'.'+attr, l=True, k=False)
mc.aliasAttr('globalScale', self.tempGrpTransform+'.sy')
def createAtLastSelected(self, *args):
controlPos = mc.xform(self.objectsToGroup[-1], q=True, ws=True, t=True)
mc.xform(self.tempGrpTransform, ws=True, absolute=True, t=controlPos)
def createAtAveragePosition(self, *args):
controlPos = [0.0,0.0,0.0]
for obj in self.objectsToGroup:
objPos = mc.xform(obj, q=True, ws=True, absolute=True, t=True)
controlPos[0] += objPos[0]
controlPos[1] += objPos[1]
controlPos[2] += objPos[2]
numberOfObjects = len(self.objectsToGroup)
controlPos[0] /= numberOfObjects
controlPos[1] /= numberOfObjects
controlPos[2] /= numberOfObjects
mc.xform(self.tempGrpTransform, ws=True, absolute=True, t=controlPos)
def cancelWindow(self, *args):
mc.deleteUI(self.dUiElements['window'])
mc.delete(self.tempGrpTransform)
def acceptWindow(self, *args):
groupName = mc.textField(self.dUiElements['groupName'], q=True, text=True)
if self.createGroup(groupName) != None:
mc.deleteUI(self.dUiElements['window'])
def createGroup(self, sGroupName):
# check that group of that name doesn't exist yet
fullGroupName = 'Group__' + sGroupName
if mc.objExists(fullGroupName):
mc.confirmDialog(title='Name Conflict', m='Group \''+groupName+'\' already exists', button='Accept', db='Accept')
return None
# rename the tempGroup to the user specified name
groupTransform = mc.rename(self.tempGrpTransform, fullGroupName)
groupContainer = 'group_container'
if not mc.objExists(groupContainer):
mc.container(n=groupContainer)
containers = [groupContainer]
for obj in self.objectsToGroup:
if obj.find('Group__') == 0:
continue
objNamespace = utils.stripLeadingNamespace(obj)[0]
containers.append(objNamespace+':module_container')
for c in containers:
mc.lockNode(c, lock=False, lockUnpublished=False)
if len(self.objectsToGroup) != 0:
tempGroup = mc.group(self.objectsToGroup, absolute=True)
groupParent = mc.listRelatives(tempGroup, parent=True)
if groupParent:
mc.parent(groupTransform, groupParent[0], absolute=True)
mc.parent(self.objectsToGroup, groupTransform, absolute=True)
mc.delete(tempGroup)
self.addGroupToContainer(groupTransform)
for c in containers:
mc.lockNode(c, lock=True, lockUnpublished=True)
mc.setToolTo('moveSuperContext')
mc.select(groupTransform, r=True)
return groupTransform
def addGroupToContainer(self, sGroup):
groupContainer = 'group_container'
utils.addNodeToContainer(groupContainer, sGroup, includeShapes=True)
groupName = sGroup.rpartition('Group__')[2]
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.t', groupName+'_T'])
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.r', groupName+'_R'])
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.globalScale', groupName+'_globalScale'])
def createGroupAtSpecified(self, sName, sTargetGroup, sParent):
self.createTempGroupRepresentation()
pCon = mc.parentConstraint(sTargetGroup, self.tempGrpTransform , mo=False)[0]
mc.delete(pCon)
scale = mc.getAttr(sTargetGroup+'.globalScale')
mc.setAttr(self.tempGrpTransform +'.globalScale', scale)
if sParent:
mc.parent(self.tempGrpTransform , sParent, absolute=True)
newGroup = self.createGroup(sName)
return newGroup
###-------------------------------------------------------------------------------------------
### UNGROUPED SELECTED CLASS
class UngroupSelected:
def __init__(self):
selectedObjects = mc.ls(sl=True, transforms=True)
filteredGroups = []
for obj in selectedObjects:
if obj.find('Group__') == 0:
filteredGroups.append(obj)
# no group selected just exit
if len(filteredGroups) == 0:
return
groupContainer = 'group_container'
# find any modules nested under the selected group
modules = []
for group in filteredGroups:
modules.extend(self.findChildModules(group))
# gather all module containers
moduleContainers = [groupContainer]
for module in modules:
moduleContainer = module + ':module_container'
moduleContainers.append(moduleContainer)
# unlock each container
for container in moduleContainers:
mc.lockNode(container, l=False, lockUnpublished=False)
# ungroup
for group in filteredGroups:
numChildren = len(mc.listRelatives(group, children=True))
if numChildren > 1:
mc.ungroup(group, absolute=True)
for attr in ['t','r','globalScale']:
mc.container(groupContainer, e=True, unbindAndUnpublish=group+'.'+attr)
parentGroup = mc.listRelatives(group, parent=True)
mc.delete(group)
# for the case that a group is left empty
if parentGroup != None:
parentGroup = parentGroup[0]
children = mc.listRelatives(parentGroup, children=True)
children = mc.ls(children, transforms=True)
if len(children) == 0:
mc.select(parentGroup, r=True)
UngroupSelected()
# lock the container
for container in moduleContainers:
if mc.objExists(container):
mc.lockNode(container, l=True, lockUnpublished=True)
def findChildModules(self, sGroup):
modules = []
children = mc.listRelatives(sGroup, children = True)
if children != None:
for child in children:
moduleNamespaceInfo = utils.stripLeadingNamespace(child)
if moduleNamespaceInfo:
modules.append(moduleNamespaceInfo[0])
elif child.find('Group__') != -1:
modules.extend(self.findChildModules(child))
return modules
| mit | -222,559,346,205,777,180 | 29.98227 | 144 | 0.684777 | false | 3.263025 | false | false | false |
ai-se/parGALE | algorithms/serial/gale/gale.py | 1 | 6295 | from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from utils.lib import *
from algorithms.serial.algorithm import Algorithm
from where import Node, sqrt
__author__ = 'panzer'
def default_settings():
"""
Default Settings for NSGA 3
:return: default settings
"""
return O(
pop_size = 100,
gens = 50,
allowDomination = True,
gamma = 0.15
)
class GALE(Algorithm):
count = 0
unsatisfied = 0
"""
.. [Krall2015] Krall, Menzies et.all, "
GALE: Geometric Active Learning for Search-Based Software Engineering"
Check References folder for the paper
"""
def __init__(self, problem, **settings):
"""
Initialize GALE algorithm
:param problem: Instance of the problem
:param gens: Max number of generations
"""
Algorithm.__init__(self, GALE.__name__, problem)
self.select = self._select
self.evolve = self._evolve
self.recombine = self._recombine
self.settings = default_settings().update(**settings)
def run(self, init_pop=None):
if init_pop is None:
init_pop = self.problem.populate(self.settings.pop_size)
population = Node.format(init_pop)
best_solutions = []
gen = 0
while gen < self.settings.gens:
say(".")
total_evals = 0
# SELECTION
selectees, evals = self.select(population)
solutions, evals = self.get_best(selectees)
best_solutions += solutions
total_evals += evals
# EVOLUTION
selectees, evals = self.evolve(selectees)
total_evals += evals
population, evals = self.recombine(selectees, self.settings.pop_size)
total_evals += evals
gen += 1
print("")
return best_solutions
def get_best(self, non_dom_leaves):
"""
Return the best row from all the
non dominated leaves
:param non_dom_leaves:
:return:
"""
bests = []
evals = 0
for leaf in non_dom_leaves:
east = leaf._pop[0]
west = leaf._pop[-1]
if not east.evaluated:
east.evaluate(self.problem)
evals += 1
if not west.evaluated:
west.evaluate(self.problem)
evals += 1
weights = self.problem.directional_weights()
weighted_west = [c*w for c,w in zip(west.objectives, weights)]
weighted_east = [c*w for c,w in zip(east.objectives, weights)]
objs = self.problem.objectives
west_loss = Algorithm.dominates_continuous(weighted_west,
weighted_east,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
east_loss = Algorithm.dominates_continuous(weighted_east,
weighted_west,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
if east_loss < west_loss:
bests.append(east)
else:
bests.append(west)
return bests, evals
def _select(self, pop):
node = Node(self.problem, pop, self.settings.pop_size).divide(sqrt(pop))
non_dom_leafs = node.nonpruned_leaves()
all_leafs = node.leaves()
# Counting number of evals
evals = 0
for leaf in all_leafs:
for row in leaf._pop:
if row.evaluated:
evals+=1
return non_dom_leafs, evals
def _evolve(self, selected):
evals = 0
GAMMA = self.settings.gamma
for leaf in selected:
#Poles
east = leaf._pop[0]
west = leaf._pop[-1]
# Evaluate poles if required
if not east.evaluated:
east.evaluate(self.problem)
evals += 1
if not west.evaluated:
west.evaluate(self.problem)
evals += 1
weights = self.problem.directional_weights()
weighted_west = [c*w for c,w in zip(west.objectives, weights)]
weighted_east = [c*w for c,w in zip(east.objectives, weights)]
objs = self.problem.objectives
west_loss = Algorithm.dominates_continuous(weighted_west,
weighted_east,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
east_loss = Algorithm.dominates_continuous(weighted_east,
weighted_west,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
# Determine better Pole
if east_loss < west_loss:
south_pole,north_pole = east,west
else:
south_pole,north_pole = west,east
# Magnitude of the mutations
g = abs(south_pole.x - north_pole.x)
for row in leaf._pop:
clone = row.clone()
clone_x = row.x
for dec_index in range(len(self.problem.decisions)):
# Few naming shorthands
me = row.decisions[dec_index]
good = south_pole.decisions[dec_index]
bad = north_pole.decisions[dec_index]
dec = self.problem.decisions[dec_index]
if me > good: d = -1
elif me < good: d = +1
else : d = 0
# Mutating towards the better solution
row.decisions[dec_index] = min(dec.high, max(dec.low, me + me * g * d))
# Project the mutant
a = row.dist(self.problem, north_pole, is_obj=False)
b = row.dist(self.problem, south_pole, is_obj=False)
x = (a**2 + row.c**2 - b**2) / (2*row.c+0.00001)
row.x = x
GALE.count += 1
if abs(x - clone_x) > (g * GAMMA) or not self.problem.check_constraints(row.decisions):
GALE.unsatisfied += 1
row.decisions = clone.decisions
row.x = clone_x
pop = []
for leaf in selected:
for row in leaf._pop:
if row.evaluated:
row.evaluate(self.problem) # Re-evaluating
pop.append(row)
return pop, evals
def _recombine(self, mutants, total_size):
remaining = total_size - len(mutants)
pop = []
for _ in range(remaining):
pop.append(self.problem.generate())
return mutants + Node.format(pop), 0
def _test():
from problems.feature_models.webportal import WebPortal
import time
o = WebPortal()
gale = GALE(o)
start = time.time()
gale.run()
print(time.time() - start)
print(GALE.count, GALE.unsatisfied)
if __name__ == "__main__":
_test() | unlicense | -9,061,544,763,114,333,000 | 28.980952 | 95 | 0.581732 | false | 3.485604 | false | false | false |
intake/filesystem_spec | fsspec/implementations/tests/test_ftp.py | 1 | 3434 | import os
import subprocess
import sys
import time
import pytest
import fsspec
from fsspec import open_files
from fsspec.implementations.ftp import FTPFileSystem
here = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def ftp():
pytest.importorskip("pyftpdlib")
P = subprocess.Popen(
[sys.executable, "-m", "pyftpdlib", "-d", here],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
try:
time.sleep(1)
yield "localhost", 2121
finally:
P.terminate()
P.wait()
def test_basic(ftp):
host, port = ftp
fs = FTPFileSystem(host, port)
assert fs.ls("/", detail=False) == sorted(os.listdir(here))
out = fs.cat("/" + os.path.basename(__file__))
assert out == open(__file__, "rb").read()
def test_not_cached(ftp):
host, port = ftp
fs = FTPFileSystem(host, port)
fs2 = FTPFileSystem(host, port)
assert fs is not fs2
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_complex(ftp_writable, cache_type):
from fsspec.core import BytesCache
host, port, user, pw = ftp_writable
files = open_files(
"ftp:///ou*",
host=host,
port=port,
username=user,
password=pw,
block_size=10000,
cache_type=cache_type,
)
assert len(files) == 1
with files[0] as fo:
assert fo.read(10) == b"hellohello"
if isinstance(fo.cache, BytesCache):
assert len(fo.cache.cache) == 10010
assert fo.read(2) == b"he"
assert fo.tell() == 12
def test_write_small(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
with fs.open("/out2", "wb") as f:
f.write(b"oi")
assert fs.cat("/out2") == b"oi"
def test_with_url(ftp_writable):
host, port, user, pw = ftp_writable
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "wb")
with fo as f:
f.write(b"hello")
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "rb")
with fo as f:
assert f.read() == b"hello"
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_write_big(ftp_writable, cache_type):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw, block_size=1000, cache_type=cache_type)
fn = "/bigger"
with fs.open(fn, "wb") as f:
f.write(b"o" * 500)
assert not fs.exists(fn)
f.write(b"o" * 1000)
fs.invalidate_cache()
assert fs.exists(fn)
f.write(b"o" * 200)
f.flush()
assert fs.info(fn)["size"] == 1700
assert fs.cat(fn) == b"o" * 1700
def test_transaction(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fn = "/tr"
with fs.transaction:
with fs.open(fn, "wb") as f:
f.write(b"not")
assert not fs.exists(fn)
assert fs.exists(fn)
assert fs.cat(fn) == b"not"
fs.rm(fn)
assert not fs.exists(fn)
def test_transaction_with_cache(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fs.mkdir("/tmp/dir")
assert "dir" in fs.ls("/tmp", detail=False)
with fs.transaction:
fs.rmdir("/tmp/dir")
assert "dir" not in fs.ls("/tmp", detail=False)
assert not fs.exists("/tmp/dir")
| bsd-3-clause | 1,466,260,043,482,891,300 | 25.015152 | 84 | 0.586488 | false | 3.15625 | true | false | false |
google/importlab | importlab/import_finder.py | 1 | 5424 | # NOTE: Do not add any dependencies to this file - it needs to be run in a
# subprocess by a python version that might not have any installed packages,
# including importlab itself.
from __future__ import print_function
import ast
import json
import os
import sys
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] >= 3:
# Note that `import importlib` does not work: accessing `importlib.util`
# will give an attribute error. This is hard to reproduce in a unit test but
# can be seen by installing importlab in a Python 3 environment and running
# `importlab --tree --trim` on a file that imports one of:
# * jsonschema (`pip install jsonschema`)
# * pytype (`pip install pytype`),
# * dotenv (`pip install python-dotenv`)
# * IPython (`pip install ipython`)
# A correct output will look like:
# Reading 1 files
# Source tree:
# + foo.py
# :: jsonschema/__init__.py
# An incorrect output will be missing the line with the import.
import importlib.util
else:
import imp
class ImportFinder(ast.NodeVisitor):
"""Walk an AST collecting import statements."""
def __init__(self):
# tuples of (name, alias, is_from, is_star)
self.imports = []
def visit_Import(self, node):
for alias in node.names:
self.imports.append((alias.name, alias.asname, False, False))
def visit_ImportFrom(self, node):
module_name = '.'*node.level + (node.module or '')
for alias in node.names:
if alias.name == '*':
self.imports.append((module_name, alias.asname, True, True))
else:
if not module_name.endswith('.'):
module_name = module_name + '.'
name = module_name + alias.name
asname = alias.asname or alias.name
self.imports.append((name, asname, True, False))
def _find_package(parts):
"""Helper function for _resolve_import_versioned."""
for i in range(len(parts), 0, -1):
prefix = '.'.join(parts[0:i])
if prefix in sys.modules:
return i, sys.modules[prefix]
return 0, None
def is_builtin(name):
return name in sys.builtin_module_names or name.startswith("__future__")
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] < 3:
def _resolve_import_versioned(name):
"""Python 2 helper function for resolve_import."""
parts = name.split('.')
i, mod = _find_package(parts)
if mod:
if hasattr(mod, '__file__'):
path = [os.path.dirname(mod.__file__)]
elif hasattr(mod, '__path__'):
path = mod.__path__
else:
path = None
else:
path = None
for part in parts[i:]:
try:
if path:
spec = imp.find_module(part, [path])
else:
spec = imp.find_module(part)
except ImportError:
return None
path = spec[1]
return path
else:
def _resolve_import_versioned(name):
"""Python 3 helper function for resolve_import."""
try:
spec = importlib.util.find_spec(name)
return spec and spec.origin
except Exception:
# find_spec may re-raise an arbitrary exception encountered while
# inspecting a module. Since we aren't able to get the file path in
# this case, we consider the import unresolved.
return None
def _resolve_import(name):
"""Helper function for resolve_import."""
if name in sys.modules:
return getattr(sys.modules[name], '__file__', name + '.so')
return _resolve_import_versioned(name)
def resolve_import(name, is_from, is_star):
"""Use python to resolve an import.
Args:
name: The fully qualified module name.
Returns:
The path to the module source file or None.
"""
# Don't try to resolve relative imports or builtins here; they will be
# handled by resolve.Resolver
if name.startswith('.') or is_builtin(name):
return None
ret = _resolve_import(name)
if ret is None and is_from and not is_star:
package, _ = name.rsplit('.', 1)
ret = _resolve_import(package)
return ret
def get_imports(filename):
"""Get all the imports in a file.
Each import is a tuple of:
(name, alias, is_from, is_star, source_file)
"""
with open(filename, "rb") as f:
src = f.read()
finder = ImportFinder()
finder.visit(ast.parse(src, filename=filename))
imports = []
for i in finder.imports:
name, _, is_from, is_star = i
imports.append(i + (resolve_import(name, is_from, is_star),))
return imports
def print_imports(filename):
"""Print imports in csv format to stdout."""
print(json.dumps(get_imports(filename)))
def read_imports(imports_str):
"""Print imports in csv format to stdout."""
return json.loads(imports_str)
if __name__ == "__main__":
# This is used to parse a file with a different python version, launching a
# subprocess and communicating with it via reading stdout.
filename = sys.argv[1]
print_imports(filename)
| apache-2.0 | 7,626,106,175,522,213,000 | 31.285714 | 80 | 0.599742 | false | 3.994109 | false | false | false |
Drvanon/Game | venv/lib/python3.3/site-packages/sqlalchemy/sql/visitors.py | 1 | 10003 | # sql/visitors.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
from .. import exc
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'):
super(VisitableType, cls).__init__(clsname, bases, clsdict)
return
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(object, metaclass=VisitableType):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = util.column_dict()
stop_on = util.column_set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = util.column_dict()
stop_on = util.column_set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
| apache-2.0 | 6,985,493,977,411,155,000 | 30.755556 | 84 | 0.615615 | false | 4.356707 | false | false | false |
tedlaz/pyted | tests/pyappgen/pyappgen/qtreports.py | 1 | 3002 | # -*- coding: utf-8 -*-
'''
Created on 2014-01-24
@author: tedlaz
'''
from PyQt4 import QtGui, Qt
class rptDlg(QtGui.QDialog):
def __init__(self,html=u'Δοκιμή',title='Document1',parent=None):
super(rptDlg, self).__init__(parent)
self.setAttribute(Qt.Qt.WA_DeleteOnClose)
self.odtName = '%s.odt' % title
self.pdfName = '%s.pdf' % title
self.setWindowTitle(title)
self.editor = QtGui.QTextEdit(self)
self.editor.setFont(QtGui.QFont('Arial',12))
self.buttonPdf = QtGui.QPushButton(u'Εξαγωγή σε pdf', self)
self.buttonPdf.clicked.connect(self.saveAsPdf)
self.buttonOdt = QtGui.QPushButton(u'Εξαγωγή σε odt', self)
self.buttonOdt.clicked.connect(self.saveAsOdt)
self.buttonPreview = QtGui.QPushButton(u'Προεπισκόπιση', self)
self.buttonPreview.clicked.connect(self.handlePreview)
layout = QtGui.QGridLayout(self)
layout.addWidget(self.editor, 0, 0, 1, 3)
layout.addWidget(self.buttonPdf, 1, 0)
layout.addWidget(self.buttonOdt, 1, 1)
layout.addWidget(self.buttonPreview, 1, 2)
self.editor.setHtml(html)
def handlePrint(self):
dialog = QtGui.QPrintDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
self.editor.document().print_(dialog.printer())
def saveAsPdf(self):
fname = '%s' % QtGui.QFileDialog.getSaveFileName(self,
u"Αποθήκευση σε μορφή pdf",
self.pdfName,
"pdf (*.pdf)")
if fname:
printer = QtGui.QPrinter()
printer.setOutputFormat(QtGui.QPrinter.PdfFormat)
printer.setOutputFileName(fname)
self.editor.document().print_(printer)
def saveAsOdt(self):
fname = '%s' % QtGui.QFileDialog.getSaveFileName(self,
u"Αποθήκευση σε μορφή Libre Office (odt)",
self.odtName,
"Libre Office (*.odt)")
if fname:
doc = QtGui.QTextDocument()
cursor = QtGui.QTextCursor(doc)
cursor.insertHtml(self.editor.toHtml())
writer = QtGui.QTextDocumentWriter()
odf_format = writer.supportedDocumentFormats()[1]
writer.setFormat(odf_format)
writer.setFileName(fname)
writer.write(doc)
def handlePreview(self):
dialog = QtGui.QPrintPreviewDialog()
dialog.paintRequested.connect(self.editor.print_)
dialog.exec_()
if __name__ == "__main__":
import sys
import test_printHtml
app = QtGui.QApplication(sys.argv)
window = rptDlg(test_printHtml.toHtml(),test_printHtml.reportTitle)
window.resize(640, 480)
window.show()
sys.exit(app.exec_()) | gpl-3.0 | 1,066,857,860,350,338,000 | 32.104651 | 71 | 0.574207 | false | 3.468639 | false | false | false |
libvirt/autotest | client/common_lib/magic.py | 1 | 58623 | #!/usr/bin/python
"""
Library used to determine a file MIME type by its magic number, it doesn't have
any external dependencies. Based on work of Jason Petrone ([email protected]),
adapted to autotest.
Command Line Usage: Running as 'python magic.py file_path' will print a
mime string (or just a description) of the file present on file_path.
API Usage:
magic.guess_type(file_path) - Returns a description of what the file on
path 'file' contains. This function name was chosen due to a similar
function on python standard library 'mimetypes'.
@license: GPL v2
@copyright: Jason Petrone ([email protected]) 2000
@copyright: Lucas Meneghel Rodrigues ([email protected]) 2010
@see: http://www.jsnp.net/code/magic.py
"""
import logging, optparse, os, re, sys, string, struct
import logging_config, logging_manager
def _str_to_num(n):
"""
Convert a hex or octal string to a decimal number.
@param n: Hex or octal string to be converted.
@return: Resulting decimal number.
"""
val = 0
col = long(1)
if n[:1] == 'x': n = '0' + n
if n[:2] == '0x':
# hex
n = string.lower(n[2:])
while len(n) > 0:
l = n[len(n) - 1]
val = val + string.hexdigits.index(l) * col
col = col * 16
n = n[:len(n)-1]
elif n[0] == '\\':
# octal
n = n[1:]
while len(n) > 0:
l = n[len(n) - 1]
if ord(l) < 48 or ord(l) > 57:
break
val = val + int(l) * col
col = col * 8
n = n[:len(n)-1]
else:
val = string.atol(n)
return val
class MagicLoggingConfig(logging_config.LoggingConfig):
def configure_logging(self, results_dir=None, verbose=False):
super(MagicLoggingConfig, self).configure_logging(use_console=True,
verbose=verbose)
class MagicTest(object):
"""
Compile a magic database entry so it can be compared with data read from
files.
"""
def __init__(self, offset, t, op, value, msg, mask=None):
"""
Reads magic database data. Maps the list fields into class attributes.
@param offset: Offset from start of the file.
@param t: Type of the magic data.
@param op: Operation to be performed when comparing the data.
@param value: Expected value of the magic data for a given data type.
@param msg: String representing the file mimetype.
"""
if t.count('&') > 0:
mask = _str_to_num(t[t.index('&')+1:])
t = t[:t.index('&')]
if type(offset) == type('a'):
self.offset = _str_to_num(offset)
else:
self.offset = offset
self.type = t
self.msg = msg
self.subTests = []
self.op = op
self.mask = mask
self.value = value
def test(self, data):
"""
Compare data read from file with self.value if operator is '='.
@param data: Data read from the file.
@return: None if no match between data and expected value string. Else,
print matching mime type information.
"""
if self.mask:
data = data & self.mask
if self.op == '=':
if self.value == data:
return self.msg
elif self.op == '<':
pass
elif self.op == '>':
pass
elif self.op == '&':
pass
elif self.op == '^':
pass
return None
def compare(self, data):
"""
Compare data read from the file with the expected data for this
particular mime type register.
@param data: Data read from the file.
"""
try:
if self.type == 'string':
c = ''; s = ''
for i in range(0, len(self.value)+1):
if i + self.offset > len(data) - 1: break
s = s + c
[c] = struct.unpack('c', data[self.offset + i])
data = s
elif self.type == 'short':
[data] = struct.unpack('h', data[self.offset:self.offset + 2])
elif self.type == 'leshort':
[data] = struct.unpack('<h', data[self.offset:self.offset + 2])
elif self.type == 'beshort':
[data] = struct.unpack('>H', data[self.offset:self.offset + 2])
elif self.type == 'long':
[data] = struct.unpack('l', data[self.offset:self.offset + 4])
elif self.type == 'lelong':
[data] = struct.unpack('<l', data[self.offset:self.offset + 4])
elif self.type == 'belong':
[data] = struct.unpack('>l', data[self.offset:self.offset + 4])
else:
pass
except Exception:
return None
return self.test(data)
magic_database = [
[0L, 'leshort', '=', 1538L, 'application/x-alan-adventure-game'],
[0L, 'string', '=', 'TADS', 'application/x-tads-game'],
[0L, 'short', '=', 420L, 'application/x-executable-file'],
[0L, 'short', '=', 421L, 'application/x-executable-file'],
[0L, 'leshort', '=', 603L, 'application/x-executable-file'],
[0L, 'string', '=', 'Core\001', 'application/x-executable-file'],
[0L, 'string', '=', 'AMANDA: TAPESTART DATE', 'application/x-amanda-header'],
[0L, 'belong', '=', 1011L, 'application/x-executable-file'],
[0L, 'belong', '=', 999L, 'application/x-library-file'],
[0L, 'belong', '=', 435L, 'video/mpeg'],
[0L, 'belong', '=', 442L, 'video/mpeg'],
[0L, 'beshort&0xfff0', '=', 65520L, 'audio/mpeg'],
[4L, 'leshort', '=', 44817L, 'video/fli'],
[4L, 'leshort', '=', 44818L, 'video/flc'],
[0L, 'string', '=', 'MOVI', 'video/x-sgi-movie'],
[4L, 'string', '=', 'moov', 'video/quicktime'],
[4L, 'string', '=', 'mdat', 'video/quicktime'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', 'FiLeStArTfIlEsTaRt', 'text/x-apple-binscii'],
[0L, 'string', '=', '\012GL', 'application/data'],
[0L, 'string', '=', 'v\377', 'application/data'],
[0L, 'string', '=', 'NuFile', 'application/data'],
[0L, 'string', '=', 'N\365F\351l\345', 'application/data'],
[0L, 'belong', '=', 333312L, 'application/data'],
[0L, 'belong', '=', 333319L, 'application/data'],
[257L, 'string', '=', 'ustar\000', 'application/x-tar'],
[257L, 'string', '=', 'ustar \000', 'application/x-gtar'],
[0L, 'short', '=', 70707L, 'application/x-cpio'],
[0L, 'short', '=', 143561L, 'application/x-bcpio'],
[0L, 'string', '=', '070707', 'application/x-cpio'],
[0L, 'string', '=', '070701', 'application/x-cpio'],
[0L, 'string', '=', '070702', 'application/x-cpio'],
[0L, 'string', '=', '!<arch>\012debian', 'application/x-dpkg'],
[0L, 'string', '=', '\xed\xab\xee\xdb', 'application/x-rpm'],
[0L, 'long', '=', 177555L, 'application/x-ar'],
[0L, 'short', '=', 177555L, 'application/data'],
[0L, 'long', '=', 177545L, 'application/data'],
[0L, 'short', '=', 177545L, 'application/data'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '!<arch>\012__________E', 'application/x-ar'],
[0L, 'string', '=', '-h-', 'application/data'],
[0L, 'string', '=', '!<arch>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'belong', '=', 1711210496L, 'application/x-ar'],
[0L, 'belong', '=', 1013019198L, 'application/x-ar'],
[0L, 'long', '=', 557605234L, 'application/x-ar'],
[0L, 'lelong', '=', 177555L, 'application/data'],
[0L, 'leshort', '=', 177555L, 'application/data'],
[0L, 'lelong', '=', 177545L, 'application/data'],
[0L, 'leshort', '=', 177545L, 'application/data'],
[0L, 'lelong', '=', 236525L, 'application/data'],
[0L, 'lelong', '=', 236526L, 'application/data'],
[0L, 'lelong&0x8080ffff', '=', 2074L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 2330L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 538L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 794L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1050L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1562L, 'application/x-arc'],
[0L, 'string', '=', '\032archive', 'application/data'],
[0L, 'leshort', '=', 60000L, 'application/x-arj'],
[0L, 'string', '=', 'HPAK', 'application/data'],
[0L, 'string', '=', '\351,\001JAM application/data', ''],
[2L, 'string', '=', '-lh0-', 'application/x-lha'],
[2L, 'string', '=', '-lh1-', 'application/x-lha'],
[2L, 'string', '=', '-lz4-', 'application/x-lha'],
[2L, 'string', '=', '-lz5-', 'application/x-lha'],
[2L, 'string', '=', '-lzs-', 'application/x-lha'],
[2L, 'string', '=', '-lh -', 'application/x-lha'],
[2L, 'string', '=', '-lhd-', 'application/x-lha'],
[2L, 'string', '=', '-lh2-', 'application/x-lha'],
[2L, 'string', '=', '-lh3-', 'application/x-lha'],
[2L, 'string', '=', '-lh4-', 'application/x-lha'],
[2L, 'string', '=', '-lh5-', 'application/x-lha'],
[0L, 'string', '=', 'Rar!', 'application/x-rar'],
[0L, 'string', '=', 'SQSH', 'application/data'],
[0L, 'string', '=', 'UC2\032', 'application/data'],
[0L, 'string', '=', 'PK\003\004', 'application/zip'],
[20L, 'lelong', '=', 4257523676L, 'application/x-zoo'],
[10L, 'string', '=', '# This is a shell archive', 'application/x-shar'],
[0L, 'string', '=', '*STA', 'application/data'],
[0L, 'string', '=', '2278', 'application/data'],
[0L, 'beshort', '=', 560L, 'application/x-executable-file'],
[0L, 'beshort', '=', 561L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\004\036\212\200', 'application/core'],
[0L, 'string', '=', '.snd', 'audio/basic'],
[0L, 'lelong', '=', 6583086L, 'audio/basic'],
[0L, 'string', '=', 'MThd', 'audio/midi'],
[0L, 'string', '=', 'CTMF', 'audio/x-cmf'],
[0L, 'string', '=', 'SBI', 'audio/x-sbi'],
[0L, 'string', '=', 'Creative Voice File', 'audio/x-voc'],
[0L, 'belong', '=', 1314148939L, 'audio/x-multitrack'],
[0L, 'string', '=', 'RIFF', 'audio/x-wav'],
[0L, 'string', '=', 'EMOD', 'audio/x-emod'],
[0L, 'belong', '=', 779248125L, 'audio/x-pn-realaudio'],
[0L, 'string', '=', 'MTM', 'audio/x-multitrack'],
[0L, 'string', '=', 'if', 'audio/x-669-mod'],
[0L, 'string', '=', 'FAR', 'audio/mod'],
[0L, 'string', '=', 'MAS_U', 'audio/x-multimate-mod'],
[44L, 'string', '=', 'SCRM', 'audio/x-st3-mod'],
[0L, 'string', '=', 'GF1PATCH110\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'GF1PATCH100\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'JN', 'audio/x-669-mod'],
[0L, 'string', '=', 'UN05', 'audio/x-mikmod-uni'],
[0L, 'string', '=', 'Extended Module:', 'audio/x-ft2-mod'],
[21L, 'string', '=', '!SCREAM!', 'audio/x-st2-mod'],
[1080L, 'string', '=', 'M.K.', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'M!K!', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'FLT4', 'audio/x-startracker-mod'],
[1080L, 'string', '=', '4CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '6CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '8CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', 'CD81', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', 'OKTA', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', '16CN', 'audio/x-taketracker-mod'],
[1080L, 'string', '=', '32CN', 'audio/x-taketracker-mod'],
[0L, 'string', '=', 'TOC', 'audio/x-toc'],
[0L, 'short', '=', 3401L, 'application/x-executable-file'],
[0L, 'long', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 3001L, 'application/x-executable-file'],
[0L, 'lelong', '=', 314L, 'application/x-executable-file'],
[0L, 'string', '=', '//', 'text/cpp'],
[0L, 'string', '=', '\\\\1cw\\', 'application/data'],
[0L, 'string', '=', '\\\\1cw', 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231440384L, 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231487232L, 'application/data'],
[0L, 'short', '=', 575L, 'application/x-executable-file'],
[0L, 'short', '=', 577L, 'application/x-executable-file'],
[4L, 'string', '=', 'pipe', 'application/data'],
[4L, 'string', '=', 'prof', 'application/data'],
[0L, 'string', '=', ': shell', 'application/data'],
[0L, 'string', '=', '#!/bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#!/bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#!/bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#!/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#!/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', 'BEGIN', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#!/bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#!/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#!/usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#!/', 'text/script'],
[0L, 'string', '=', '#! text/script', ''],
[0L, 'string', '=', '\037\235', 'application/compress'],
[0L, 'string', '=', '\037\213', 'application/x-gzip'],
[0L, 'string', '=', '\037\036', 'application/data'],
[0L, 'short', '=', 17437L, 'application/data'],
[0L, 'short', '=', 8191L, 'application/data'],
[0L, 'string', '=', '\377\037', 'application/data'],
[0L, 'short', '=', 145405L, 'application/data'],
[0L, 'string', '=', 'BZh', 'application/x-bzip2'],
[0L, 'leshort', '=', 65398L, 'application/data'],
[0L, 'leshort', '=', 65142L, 'application/data'],
[0L, 'leshort', '=', 64886L, 'application/x-lzh'],
[0L, 'string', '=', '\037\237', 'application/data'],
[0L, 'string', '=', '\037\236', 'application/data'],
[0L, 'string', '=', '\037\240', 'application/data'],
[0L, 'string', '=', 'BZ', 'application/x-bzip'],
[0L, 'string', '=', '\211LZO\000\015\012\032\012', 'application/data'],
[0L, 'belong', '=', 507L, 'application/x-object-file'],
[0L, 'belong', '=', 513L, 'application/x-executable-file'],
[0L, 'belong', '=', 515L, 'application/x-executable-file'],
[0L, 'belong', '=', 517L, 'application/x-executable-file'],
[0L, 'belong', '=', 70231L, 'application/core'],
[24L, 'belong', '=', 60011L, 'application/data'],
[24L, 'belong', '=', 60012L, 'application/data'],
[24L, 'belong', '=', 60013L, 'application/data'],
[24L, 'belong', '=', 60014L, 'application/data'],
[0L, 'belong', '=', 601L, 'application/x-object-file'],
[0L, 'belong', '=', 607L, 'application/data'],
[0L, 'belong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'lelong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'string', '=', 'GDBM', 'application/x-gdbm'],
[0L, 'belong', '=', 398689L, 'application/x-db'],
[0L, 'belong', '=', 340322L, 'application/x-db'],
[0L, 'string', '=', '<list>\012<protocol bbn-m', 'application/data'],
[0L, 'string', '=', 'diff text/x-patch', ''],
[0L, 'string', '=', '*** text/x-patch', ''],
[0L, 'string', '=', 'Only in text/x-patch', ''],
[0L, 'string', '=', 'Common subdirectories: text/x-patch', ''],
[0L, 'string', '=', '!<arch>\012________64E', 'application/data'],
[0L, 'leshort', '=', 387L, 'application/x-executable-file'],
[0L, 'leshort', '=', 392L, 'application/x-executable-file'],
[0L, 'leshort', '=', 399L, 'application/x-object-file'],
[0L, 'string', '=', '\377\377\177', 'application/data'],
[0L, 'string', '=', '\377\377|', 'application/data'],
[0L, 'string', '=', '\377\377~', 'application/data'],
[0L, 'string', '=', '\033c\033', 'application/data'],
[0L, 'long', '=', 4553207L, 'image/x11'],
[0L, 'string', '=', '!<PDF>!\012', 'application/x-prof'],
[0L, 'short', '=', 1281L, 'application/x-locale'],
[24L, 'belong', '=', 60012L, 'application/x-dump'],
[24L, 'belong', '=', 60011L, 'application/x-dump'],
[24L, 'lelong', '=', 60012L, 'application/x-dump'],
[24L, 'lelong', '=', 60011L, 'application/x-dump'],
[0L, 'string', '=', '\177ELF', 'application/x-executable-file'],
[0L, 'short', '=', 340L, 'application/data'],
[0L, 'short', '=', 341L, 'application/x-executable-file'],
[1080L, 'leshort', '=', 61267L, 'application/x-linux-ext2fs'],
[0L, 'string', '=', '\366\366\366\366', 'application/x-pc-floppy'],
[774L, 'beshort', '=', 55998L, 'application/data'],
[510L, 'leshort', '=', 43605L, 'application/data'],
[1040L, 'leshort', '=', 4991L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 5007L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9320L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9336L, 'application/x-filesystem'],
[0L, 'string', '=', '-rom1fs-\000', 'application/x-filesystem'],
[395L, 'string', '=', 'OS/2', 'application/x-bootable'],
[0L, 'string', '=', 'FONT', 'font/x-vfont'],
[0L, 'short', '=', 436L, 'font/x-vfont'],
[0L, 'short', '=', 17001L, 'font/x-vfont'],
[0L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[6L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[0L, 'belong', '=', 4L, 'font/x-snf'],
[0L, 'lelong', '=', 4L, 'font/x-snf'],
[0L, 'string', '=', 'STARTFONT font/x-bdf', ''],
[0L, 'string', '=', '\001fcp', 'font/x-pcf'],
[0L, 'string', '=', 'D1.0\015', 'font/x-speedo'],
[0L, 'string', '=', 'flf', 'font/x-figlet'],
[0L, 'string', '=', 'flc', 'application/x-font'],
[0L, 'belong', '=', 335698201L, 'font/x-libgrx'],
[0L, 'belong', '=', 4282797902L, 'font/x-dos'],
[7L, 'belong', '=', 4540225L, 'font/x-dos'],
[7L, 'belong', '=', 5654852L, 'font/x-dos'],
[4098L, 'string', '=', 'DOSFONT', 'font/x-dos'],
[0L, 'string', '=', '<MakerFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MIFFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerDictionary', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerScreenFont', 'font/x-framemaker'],
[0L, 'string', '=', '<MML', 'application/x-framemaker'],
[0L, 'string', '=', '<BookFile', 'application/x-framemaker'],
[0L, 'string', '=', '<Maker', 'application/x-framemaker'],
[0L, 'lelong&0377777777', '=', 41400407L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400410L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400413L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400314L, 'application/x-executable-file'],
[7L, 'string', '=', '\357\020\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000', 'application/core'],
[0L, 'lelong', '=', 11421044151L, 'application/data'],
[0L, 'string', '=', 'GIMP Gradient', 'application/x-gimp-gradient'],
[0L, 'string', '=', 'gimp xcf', 'application/x-gimp-image'],
[20L, 'string', '=', 'GPAT', 'application/x-gimp-pattern'],
[20L, 'string', '=', 'GIMP', 'application/x-gimp-brush'],
[0L, 'string', '=', '\336\022\004\225', 'application/x-locale'],
[0L, 'string', '=', '\225\004\022\336', 'application/x-locale'],
[0L, 'beshort', '=', 627L, 'application/x-executable-file'],
[0L, 'beshort', '=', 624L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\001\000\000\000', 'font/ttf'],
[0L, 'long', '=', 1203604016L, 'application/data'],
[0L, 'long', '=', 1702407010L, 'application/data'],
[0L, 'long', '=', 1003405017L, 'application/data'],
[0L, 'long', '=', 1602007412L, 'application/data'],
[0L, 'belong', '=', 34603270L, 'application/x-object-file'],
[0L, 'belong', '=', 34603271L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603272L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603275L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603278L, 'application/x-library-file'],
[0L, 'belong', '=', 34603277L, 'application/x-library-file'],
[0L, 'belong', '=', 34865414L, 'application/x-object-file'],
[0L, 'belong', '=', 34865415L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865416L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865419L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865422L, 'application/x-library-file'],
[0L, 'belong', '=', 34865421L, 'application/x-object-file'],
[0L, 'belong', '=', 34275590L, 'application/x-object-file'],
[0L, 'belong', '=', 34275591L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275592L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275595L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275598L, 'application/x-library-file'],
[0L, 'belong', '=', 34275597L, 'application/x-library-file'],
[0L, 'belong', '=', 557605234L, 'application/x-ar'],
[0L, 'long', '=', 34078982L, 'application/x-executable-file'],
[0L, 'long', '=', 34078983L, 'application/x-executable-file'],
[0L, 'long', '=', 34078984L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341128L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341127L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341131L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341126L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210056L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210055L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341134L, 'application/x-library-file'],
[0L, 'belong', '=', 34341133L, 'application/x-library-file'],
[0L, 'long', '=', 65381L, 'application/x-library-file'],
[0L, 'long', '=', 34275173L, 'application/x-library-file'],
[0L, 'long', '=', 34406245L, 'application/x-library-file'],
[0L, 'long', '=', 34144101L, 'application/x-library-file'],
[0L, 'long', '=', 22552998L, 'application/core'],
[0L, 'long', '=', 1302851304L, 'font/x-hp-windows'],
[0L, 'string', '=', 'Bitmapfile', 'image/unknown'],
[0L, 'string', '=', 'IMGfile', 'CIS image/unknown'],
[0L, 'long', '=', 34341132L, 'application/x-lisp'],
[0L, 'string', '=', 'msgcat01', 'application/x-locale'],
[0L, 'string', '=', 'HPHP48-', 'HP48 binary'],
[0L, 'string', '=', '%%HP:', 'HP48 text'],
[0L, 'beshort', '=', 200L, 'hp200 (68010) BSD'],
[0L, 'beshort', '=', 300L, 'hp300 (68020+68881) BSD'],
[0L, 'beshort', '=', 537L, '370 XA sysV executable'],
[0L, 'beshort', '=', 532L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 54001L, '370 sysV pure executable'],
[0L, 'beshort', '=', 55001L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 56401L, '370 sysV executable'],
[0L, 'beshort', '=', 57401L, '370 XA sysV executable'],
[0L, 'beshort', '=', 531L, 'SVR2 executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 534L, 'SVR2 pure executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 530L, 'SVR2 pure executable (USS/370)'],
[0L, 'beshort', '=', 535L, 'SVR2 executable (USS/370)'],
[0L, 'beshort', '=', 479L, 'executable (RISC System/6000 V3.1) or obj module'],
[0L, 'beshort', '=', 260L, 'shared library'],
[0L, 'beshort', '=', 261L, 'ctab data'],
[0L, 'beshort', '=', 65028L, 'structured file'],
[0L, 'string', '=', '0xabcdef', 'AIX message catalog'],
[0L, 'belong', '=', 505L, 'AIX compiled message catalog'],
[0L, 'string', '=', '<aiaff>', 'archive'],
[0L, 'string', '=', 'FORM', 'IFF data'],
[0L, 'string', '=', 'P1', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P2', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P3', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'P4', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P5', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P6', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'IIN1', 'image/tiff'],
[0L, 'string', '=', 'MM\000*', 'image/tiff'],
[0L, 'string', '=', 'II*\000', 'image/tiff'],
[0L, 'string', '=', '\211PNG', 'image/x-png'],
[1L, 'string', '=', 'PNG', 'image/x-png'],
[0L, 'string', '=', 'GIF8', 'image/gif'],
[0L, 'string', '=', '\361\000@\273', 'image/x-cmu-raster'],
[0L, 'string', '=', 'id=ImageMagick', 'MIFF image data'],
[0L, 'long', '=', 1123028772L, 'Artisan image data'],
[0L, 'string', '=', '#FIG', 'FIG image text'],
[0L, 'string', '=', 'ARF_BEGARF', 'PHIGS clear text archive'],
[0L, 'string', '=', '@(#)SunPHIGS', 'SunPHIGS'],
[0L, 'string', '=', 'GKSM', 'GKS Metafile'],
[0L, 'string', '=', 'BEGMF', 'clear text Computer Graphics Metafile'],
[0L, 'beshort&0xffe0', '=', 32L, 'binary Computer Graphics Metafile'],
[0L, 'beshort', '=', 12320L, 'character Computer Graphics Metafile'],
[0L, 'string', '=', 'yz', 'MGR bitmap, modern format, 8-bit aligned'],
[0L, 'string', '=', 'zz', 'MGR bitmap, old format, 1-bit deep, 16-bit aligned'],
[0L, 'string', '=', 'xz', 'MGR bitmap, old format, 1-bit deep, 32-bit aligned'],
[0L, 'string', '=', 'yx', 'MGR bitmap, modern format, squeezed'],
[0L, 'string', '=', '%bitmap\000', 'FBM image data'],
[1L, 'string', '=', 'PC Research, Inc', 'group 3 fax data'],
[0L, 'beshort', '=', 65496L, 'image/jpeg'],
[0L, 'string', '=', 'hsi1', 'image/x-jpeg-proprietary'],
[0L, 'string', '=', 'BM', 'image/x-bmp'],
[0L, 'string', '=', 'IC', 'image/x-ico'],
[0L, 'string', '=', 'PI', 'PC pointer image data'],
[0L, 'string', '=', 'CI', 'PC color icon data'],
[0L, 'string', '=', 'CP', 'PC color pointer image data'],
[0L, 'string', '=', '/* XPM */', 'X pixmap image text'],
[0L, 'leshort', '=', 52306L, 'RLE image data,'],
[0L, 'string', '=', 'Imagefile version-', 'iff image data'],
[0L, 'belong', '=', 1504078485L, 'x/x-image-sun-raster'],
[0L, 'beshort', '=', 474L, 'x/x-image-sgi'],
[0L, 'string', '=', 'IT01', 'FIT image data'],
[0L, 'string', '=', 'IT02', 'FIT image data'],
[2048L, 'string', '=', 'PCD_IPI', 'x/x-photo-cd-pack-file'],
[0L, 'string', '=', 'PCD_OPA', 'x/x-photo-cd-overfiew-file'],
[0L, 'string', '=', 'SIMPLE =', 'FITS image data'],
[0L, 'string', '=', 'This is a BitMap file', 'Lisp Machine bit-array-file'],
[0L, 'string', '=', '!!', 'Bennet Yee\'s "face" format'],
[0L, 'beshort', '=', 4112L, 'PEX Binary Archive'],
[3000L, 'string', '=', 'Visio (TM) Drawing', '%s'],
[0L, 'leshort', '=', 502L, 'basic-16 executable'],
[0L, 'leshort', '=', 503L, 'basic-16 executable (TV)'],
[0L, 'leshort', '=', 510L, 'application/x-executable-file'],
[0L, 'leshort', '=', 511L, 'application/x-executable-file'],
[0L, 'leshort', '=', 512L, 'application/x-executable-file'],
[0L, 'leshort', '=', 522L, 'application/x-executable-file'],
[0L, 'leshort', '=', 514L, 'application/x-executable-file'],
[0L, 'string', '=', '\210OPS', 'Interleaf saved data'],
[0L, 'string', '=', '<!OPS', 'Interleaf document text'],
[4L, 'string', '=', 'pgscriptver', 'IslandWrite document'],
[13L, 'string', '=', 'DrawFile', 'IslandDraw document'],
[0L, 'leshort&0xFFFC', '=', 38400L, 'little endian ispell'],
[0L, 'beshort&0xFFFC', '=', 38400L, 'big endian ispell'],
[0L, 'belong', '=', 3405691582L, 'compiled Java class data,'],
[0L, 'beshort', '=', 44269L, 'Java serialization data'],
[0L, 'string', '=', 'KarmaRHD', 'Version Karma Data Structure Version'],
[0L, 'string', '=', 'lect', 'DEC SRC Virtual Paper Lectern file'],
[53L, 'string', '=', 'yyprevious', 'C program text (from lex)'],
[21L, 'string', '=', 'generated by flex', 'C program text (from flex)'],
[0L, 'string', '=', '%{', 'lex description text'],
[0L, 'short', '=', 32768L, 'lif file'],
[0L, 'lelong', '=', 6553863L, 'Linux/i386 impure executable (OMAGIC)'],
[0L, 'lelong', '=', 6553864L, 'Linux/i386 pure executable (NMAGIC)'],
[0L, 'lelong', '=', 6553867L, 'Linux/i386 demand-paged executable (ZMAGIC)'],
[0L, 'lelong', '=', 6553804L, 'Linux/i386 demand-paged executable (QMAGIC)'],
[0L, 'string', '=', '\007\001\000', 'Linux/i386 object file'],
[0L, 'string', '=', '\001\003\020\004', 'Linux-8086 impure executable'],
[0L, 'string', '=', '\001\003 \004', 'Linux-8086 executable'],
[0L, 'string', '=', '\243\206\001\000', 'Linux-8086 object file'],
[0L, 'string', '=', '\001\003\020\020', 'Minix-386 impure executable'],
[0L, 'string', '=', '\001\003 \020', 'Minix-386 executable'],
[0L, 'string', '=', '*nazgul*', 'Linux compiled message catalog'],
[216L, 'lelong', '=', 421L, 'Linux/i386 core file'],
[2L, 'string', '=', 'LILO', 'Linux/i386 LILO boot/chain loader'],
[0L, 'string', '=', '0.9', ''],
[0L, 'leshort', '=', 1078L, 'font/linux-psf'],
[4086L, 'string', '=', 'SWAP-SPACE', 'Linux/i386 swap file'],
[0L, 'leshort', '=', 387L, 'ECOFF alpha'],
[514L, 'string', '=', 'HdrS', 'Linux kernel'],
[0L, 'belong', '=', 3099592590L, 'Linux kernel'],
[0L, 'string', '=', 'Begin3', 'Linux Software Map entry text'],
[0L, 'string', '=', ';;', 'Lisp/Scheme program text'],
[0L, 'string', '=', '\012(', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', ';ELC\023\000\000\000', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', "(SYSTEM::VERSION '", 'CLISP byte-compiled Lisp program text'],
[0L, 'long', '=', 1886817234L, 'CLISP memory image data'],
[0L, 'long', '=', 3532355184L, 'CLISP memory image data, other endian'],
[0L, 'long', '=', 3725722773L, 'GNU-format message catalog data'],
[0L, 'long', '=', 2500072158L, 'GNU-format message catalog data'],
[0L, 'belong', '=', 3405691582L, 'mach-o fat file'],
[0L, 'belong', '=', 4277009102L, 'mach-o'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'string', '=', 'SIT!', 'StuffIt Archive (data)'],
[65L, 'string', '=', 'SIT!', 'StuffIt Archive (rsrc + data)'],
[0L, 'string', '=', 'SITD', 'StuffIt Deluxe (data)'],
[65L, 'string', '=', 'SITD', 'StuffIt Deluxe (rsrc + data)'],
[0L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (data)'],
[65L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (rsrc + data)'],
[0L, 'string', '=', 'APPL', 'Macintosh Application (data)'],
[65L, 'string', '=', 'APPL', 'Macintosh Application (rsrc + data)'],
[0L, 'string', '=', 'zsys', 'Macintosh System File (data)'],
[65L, 'string', '=', 'zsys', 'Macintosh System File(rsrc + data)'],
[0L, 'string', '=', 'FNDR', 'Macintosh Finder (data)'],
[65L, 'string', '=', 'FNDR', 'Macintosh Finder(rsrc + data)'],
[0L, 'string', '=', 'libr', 'Macintosh Library (data)'],
[65L, 'string', '=', 'libr', 'Macintosh Library(rsrc + data)'],
[0L, 'string', '=', 'shlb', 'Macintosh Shared Library (data)'],
[65L, 'string', '=', 'shlb', 'Macintosh Shared Library(rsrc + data)'],
[0L, 'string', '=', 'cdev', 'Macintosh Control Panel (data)'],
[65L, 'string', '=', 'cdev', 'Macintosh Control Panel(rsrc + data)'],
[0L, 'string', '=', 'INIT', 'Macintosh Extension (data)'],
[65L, 'string', '=', 'INIT', 'Macintosh Extension(rsrc + data)'],
[0L, 'string', '=', 'FFIL', 'font/ttf'],
[65L, 'string', '=', 'FFIL', 'font/ttf'],
[0L, 'string', '=', 'LWFN', 'font/type1'],
[65L, 'string', '=', 'LWFN', 'font/type1'],
[0L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive (data)'],
[65L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive(rsrc + data)'],
[0L, 'string', '=', 'ttro', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'ttro', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'TEXT', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'TEXT', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'PDF', 'Macintosh PDF File (data)'],
[65L, 'string', '=', 'PDF', 'Macintosh PDF File(rsrc + data)'],
[0L, 'string', '=', '# Magic', 'magic text file for file(1) cmd'],
[0L, 'string', '=', 'Relay-Version:', 'old news text'],
[0L, 'string', '=', '#! rnews', 'batched news text'],
[0L, 'string', '=', 'N#! rnews', 'mailed, batched news text'],
[0L, 'string', '=', 'Forward to', 'mail forwarding text'],
[0L, 'string', '=', 'Pipe to', 'mail piping text'],
[0L, 'string', '=', 'Return-Path:', 'message/rfc822'],
[0L, 'string', '=', 'Path:', 'message/news'],
[0L, 'string', '=', 'Xref:', 'message/news'],
[0L, 'string', '=', 'From:', 'message/rfc822'],
[0L, 'string', '=', 'Article', 'message/news'],
[0L, 'string', '=', 'BABYL', 'message/x-gnu-rmail'],
[0L, 'string', '=', 'Received:', 'message/rfc822'],
[0L, 'string', '=', 'MIME-Version:', 'MIME entity text'],
[0L, 'string', '=', 'Content-Type: ', ''],
[0L, 'string', '=', 'Content-Type:', ''],
[0L, 'long', '=', 31415L, 'Mirage Assembler m.out executable'],
[0L, 'string', '=', '\311\304', 'ID tags data'],
[0L, 'string', '=', '\001\001\001\001', 'MMDF mailbox'],
[4L, 'string', '=', 'Research,', 'Digifax-G3-File'],
[0L, 'short', '=', 256L, 'raw G3 data, byte-padded'],
[0L, 'short', '=', 5120L, 'raw G3 data'],
[0L, 'string', '=', 'RMD1', 'raw modem data'],
[0L, 'string', '=', 'PVF1\012', 'portable voice format'],
[0L, 'string', '=', 'PVF2\012', 'portable voice format'],
[0L, 'beshort', '=', 520L, 'mc68k COFF'],
[0L, 'beshort', '=', 521L, 'mc68k executable (shared)'],
[0L, 'beshort', '=', 522L, 'mc68k executable (shared demand paged)'],
[0L, 'beshort', '=', 554L, '68K BCS executable'],
[0L, 'beshort', '=', 555L, '88K BCS executable'],
[0L, 'string', '=', 'S0', 'Motorola S-Record; binary data in text format'],
[0L, 'string', '=', '@echo off', 'MS-DOS batch file text'],
[128L, 'string', '=', 'PE\000\000', 'MS Windows PE'],
[0L, 'leshort', '=', 332L, 'MS Windows COFF Intel 80386 object file'],
[0L, 'leshort', '=', 358L, 'MS Windows COFF MIPS R4000 object file'],
[0L, 'leshort', '=', 388L, 'MS Windows COFF Alpha object file'],
[0L, 'leshort', '=', 616L, 'MS Windows COFF Motorola 68000 object file'],
[0L, 'leshort', '=', 496L, 'MS Windows COFF PowerPC object file'],
[0L, 'leshort', '=', 656L, 'MS Windows COFF PA-RISC object file'],
[0L, 'string', '=', 'MZ', 'application/x-ms-dos-executable'],
[0L, 'string', '=', 'LZ', 'MS-DOS executable (built-in)'],
[0L, 'string', '=', 'regf', 'Windows NT Registry file'],
[2080L, 'string', '=', 'Microsoft Word 6.0 Document', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Documento Microsoft Word 6', 'text/vnd.ms-word'],
[2112L, 'string', '=', 'MSWordDoc', 'text/vnd.ms-word'],
[0L, 'belong', '=', 834535424L, 'text/vnd.ms-word'],
[0L, 'string', '=', 'PO^Q`', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Microsoft Excel 5.0 Worksheet', 'application/vnd.ms-excel'],
[2114L, 'string', '=', 'Biff5', 'application/vnd.ms-excel'],
[0L, 'belong', '=', 6656L, 'Lotus 1-2-3'],
[0L, 'belong', '=', 512L, 'Lotus 1-2-3'],
[1L, 'string', '=', 'WPC', 'text/vnd.wordperfect'],
[0L, 'beshort', '=', 610L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 615L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 620L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 625L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 630L, 'Tower32/600/400 68020 object'],
[0L, 'beshort', '=', 640L, 'Tower32/800 68020'],
[0L, 'beshort', '=', 645L, 'Tower32/800 68010'],
[0L, 'lelong', '=', 407L, 'NetBSD little-endian object file'],
[0L, 'belong', '=', 407L, 'NetBSD big-endian object file'],
[0L, 'belong&0377777777', '=', 41400413L, 'NetBSD/i386 demand paged'],
[0L, 'belong&0377777777', '=', 41400410L, 'NetBSD/i386 pure'],
[0L, 'belong&0377777777', '=', 41400407L, 'NetBSD/i386'],
[0L, 'belong&0377777777', '=', 41400507L, 'NetBSD/i386 core'],
[0L, 'belong&0377777777', '=', 41600413L, 'NetBSD/m68k demand paged'],
[0L, 'belong&0377777777', '=', 41600410L, 'NetBSD/m68k pure'],
[0L, 'belong&0377777777', '=', 41600407L, 'NetBSD/m68k'],
[0L, 'belong&0377777777', '=', 41600507L, 'NetBSD/m68k core'],
[0L, 'belong&0377777777', '=', 42000413L, 'NetBSD/m68k4k demand paged'],
[0L, 'belong&0377777777', '=', 42000410L, 'NetBSD/m68k4k pure'],
[0L, 'belong&0377777777', '=', 42000407L, 'NetBSD/m68k4k'],
[0L, 'belong&0377777777', '=', 42000507L, 'NetBSD/m68k4k core'],
[0L, 'belong&0377777777', '=', 42200413L, 'NetBSD/ns32532 demand paged'],
[0L, 'belong&0377777777', '=', 42200410L, 'NetBSD/ns32532 pure'],
[0L, 'belong&0377777777', '=', 42200407L, 'NetBSD/ns32532'],
[0L, 'belong&0377777777', '=', 42200507L, 'NetBSD/ns32532 core'],
[0L, 'belong&0377777777', '=', 42400413L, 'NetBSD/sparc demand paged'],
[0L, 'belong&0377777777', '=', 42400410L, 'NetBSD/sparc pure'],
[0L, 'belong&0377777777', '=', 42400407L, 'NetBSD/sparc'],
[0L, 'belong&0377777777', '=', 42400507L, 'NetBSD/sparc core'],
[0L, 'belong&0377777777', '=', 42600413L, 'NetBSD/pmax demand paged'],
[0L, 'belong&0377777777', '=', 42600410L, 'NetBSD/pmax pure'],
[0L, 'belong&0377777777', '=', 42600407L, 'NetBSD/pmax'],
[0L, 'belong&0377777777', '=', 42600507L, 'NetBSD/pmax core'],
[0L, 'belong&0377777777', '=', 43000413L, 'NetBSD/vax demand paged'],
[0L, 'belong&0377777777', '=', 43000410L, 'NetBSD/vax pure'],
[0L, 'belong&0377777777', '=', 43000407L, 'NetBSD/vax'],
[0L, 'belong&0377777777', '=', 43000507L, 'NetBSD/vax core'],
[0L, 'lelong', '=', 459141L, 'ECOFF NetBSD/alpha binary'],
[0L, 'belong&0377777777', '=', 43200507L, 'NetBSD/alpha core'],
[0L, 'belong&0377777777', '=', 43400413L, 'NetBSD/mips demand paged'],
[0L, 'belong&0377777777', '=', 43400410L, 'NetBSD/mips pure'],
[0L, 'belong&0377777777', '=', 43400407L, 'NetBSD/mips'],
[0L, 'belong&0377777777', '=', 43400507L, 'NetBSD/mips core'],
[0L, 'belong&0377777777', '=', 43600413L, 'NetBSD/arm32 demand paged'],
[0L, 'belong&0377777777', '=', 43600410L, 'NetBSD/arm32 pure'],
[0L, 'belong&0377777777', '=', 43600407L, 'NetBSD/arm32'],
[0L, 'belong&0377777777', '=', 43600507L, 'NetBSD/arm32 core'],
[0L, 'string', '=', 'StartFontMetrics', 'font/x-sunos-news'],
[0L, 'string', '=', 'StartFont', 'font/x-sunos-news'],
[0L, 'belong', '=', 326773060L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773063L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773072L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773073L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773573L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773576L, 'font/x-sunos-news'],
[0L, 'string', '=', 'Octave-1-L', 'Octave binary data (little endian)'],
[0L, 'string', '=', 'Octave-1-B', 'Octave binary data (big endian)'],
[0L, 'string', '=', '\177OLF', 'OLF'],
[0L, 'beshort', '=', 34765L, 'OS9/6809 module:'],
[0L, 'beshort', '=', 19196L, 'OS9/68K module:'],
[0L, 'long', '=', 61374L, 'OSF/Rose object'],
[0L, 'short', '=', 565L, 'i386 COFF object'],
[0L, 'short', '=', 10775L, '"compact bitmap" format (Poskanzer)'],
[0L, 'string', '=', '%PDF-', 'PDF document'],
[0L, 'lelong', '=', 101555L, 'PDP-11 single precision APL workspace'],
[0L, 'lelong', '=', 101554L, 'PDP-11 double precision APL workspace'],
[0L, 'leshort', '=', 407L, 'PDP-11 executable'],
[0L, 'leshort', '=', 401L, 'PDP-11 UNIX/RT ldp'],
[0L, 'leshort', '=', 405L, 'PDP-11 old overlay'],
[0L, 'leshort', '=', 410L, 'PDP-11 pure executable'],
[0L, 'leshort', '=', 411L, 'PDP-11 separate I&D executable'],
[0L, 'leshort', '=', 437L, 'PDP-11 kernel overlay'],
[0L, 'beshort', '=', 39168L, 'PGP key public ring'],
[0L, 'beshort', '=', 38145L, 'PGP key security ring'],
[0L, 'beshort', '=', 38144L, 'PGP key security ring'],
[0L, 'beshort', '=', 42496L, 'PGP encrypted data'],
[0L, 'string', '=', '-----BEGIN PGP', 'PGP armored data'],
[0L, 'string', '=', '# PaCkAgE DaTaStReAm', 'pkg Datastream (SVR4)'],
[0L, 'short', '=', 601L, 'mumps avl global'],
[0L, 'short', '=', 602L, 'mumps blt global'],
[0L, 'string', '=', '%!', 'application/postscript'],
[0L, 'string', '=', '\004%!', 'application/postscript'],
[0L, 'belong', '=', 3318797254L, 'DOS EPS Binary File'],
[0L, 'string', '=', '*PPD-Adobe:', 'PPD file'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033E\033', 'image/x-pcl-hp'],
[0L, 'string', '=', '@document(', 'Imagen printer'],
[0L, 'string', '=', 'Rast', 'RST-format raster font data'],
[0L, 'belong&0xff00ffff', '=', 1442840576L, 'ps database'],
[0L, 'long', '=', 1351614727L, 'Pyramid 90x family executable'],
[0L, 'long', '=', 1351614728L, 'Pyramid 90x family pure executable'],
[0L, 'long', '=', 1351614731L, 'Pyramid 90x family demand paged pure executable'],
[0L, 'beshort', '=', 60843L, ''],
[0L, 'string', '=', '{\\\\rtf', 'Rich Text Format data,'],
[38L, 'string', '=', 'Spreadsheet', 'sc spreadsheet file'],
[8L, 'string', '=', '\001s SCCS', 'archive data'],
[0L, 'byte', '=', 46L, 'Sendmail frozen configuration'],
[0L, 'short', '=', 10012L, 'Sendmail frozen configuration'],
[0L, 'lelong', '=', 234L, 'BALANCE NS32000 .o'],
[0L, 'lelong', '=', 4330L, 'BALANCE NS32000 executable (0 @ 0)'],
[0L, 'lelong', '=', 8426L, 'BALANCE NS32000 executable (invalid @ 0)'],
[0L, 'lelong', '=', 12522L, 'BALANCE NS32000 standalone executable'],
[0L, 'leshort', '=', 4843L, 'SYMMETRY i386 .o'],
[0L, 'leshort', '=', 8939L, 'SYMMETRY i386 executable (0 @ 0)'],
[0L, 'leshort', '=', 13035L, 'SYMMETRY i386 executable (invalid @ 0)'],
[0L, 'leshort', '=', 17131L, 'SYMMETRY i386 standalone executable'],
[0L, 'string', '=', 'kbd!map', 'kbd map file'],
[0L, 'belong', '=', 407L, 'old SGI 68020 executable'],
[0L, 'belong', '=', 410L, 'old SGI 68020 pure executable'],
[0L, 'beshort', '=', 34661L, 'disk quotas file'],
[0L, 'beshort', '=', 1286L, 'IRIS Showcase file'],
[0L, 'beshort', '=', 550L, 'IRIS Showcase template'],
[0L, 'belong', '=', 1396917837L, 'IRIS Showcase file'],
[0L, 'belong', '=', 1413695053L, 'IRIS Showcase template'],
[0L, 'belong', '=', 3735927486L, 'IRIX Parallel Arena'],
[0L, 'beshort', '=', 352L, 'MIPSEB COFF executable'],
[0L, 'beshort', '=', 354L, 'MIPSEL COFF executable'],
[0L, 'beshort', '=', 24577L, 'MIPSEB-LE COFF executable'],
[0L, 'beshort', '=', 25089L, 'MIPSEL-LE COFF executable'],
[0L, 'beshort', '=', 355L, 'MIPSEB MIPS-II COFF executable'],
[0L, 'beshort', '=', 358L, 'MIPSEL MIPS-II COFF executable'],
[0L, 'beshort', '=', 25345L, 'MIPSEB-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 26113L, 'MIPSEL-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 320L, 'MIPSEB MIPS-III COFF executable'],
[0L, 'beshort', '=', 322L, 'MIPSEL MIPS-III COFF executable'],
[0L, 'beshort', '=', 16385L, 'MIPSEB-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 16897L, 'MIPSEL-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 384L, 'MIPSEB Ucode'],
[0L, 'beshort', '=', 386L, 'MIPSEL Ucode'],
[0L, 'belong', '=', 3735924144L, 'IRIX core dump'],
[0L, 'belong', '=', 3735924032L, 'IRIX 64-bit core dump'],
[0L, 'belong', '=', 3133063355L, 'IRIX N32 core dump'],
[0L, 'string', '=', 'CrshDump', 'IRIX vmcore dump of'],
[0L, 'string', '=', 'SGIAUDIT', 'SGI Audit file'],
[0L, 'string', '=', 'WNGZWZSC', 'Wingz compiled script'],
[0L, 'string', '=', 'WNGZWZSS', 'Wingz spreadsheet'],
[0L, 'string', '=', 'WNGZWZHP', 'Wingz help file'],
[0L, 'string', '=', '\\#Inventor', 'V IRIS Inventor 1.0 file'],
[0L, 'string', '=', '\\#Inventor', 'V2 Open Inventor 2.0 file'],
[0L, 'string', '=', 'glfHeadMagic();', 'GLF_TEXT'],
[4L, 'belong', '=', 1090584576L, 'GLF_BINARY_LSB_FIRST'],
[4L, 'belong', '=', 321L, 'GLF_BINARY_MSB_FIRST'],
[0L, 'string', '=', '<!DOCTYPE HTML', 'text/html'],
[0L, 'string', '=', '<!doctype html', 'text/html'],
[0L, 'string', '=', '<HEAD', 'text/html'],
[0L, 'string', '=', '<head', 'text/html'],
[0L, 'string', '=', '<TITLE', 'text/html'],
[0L, 'string', '=', '<title', 'text/html'],
[0L, 'string', '=', '<html', 'text/html'],
[0L, 'string', '=', '<HTML', 'text/html'],
[0L, 'string', '=', '<?xml', 'application/xml'],
[0L, 'string', '=', '<!DOCTYPE', 'exported SGML document text'],
[0L, 'string', '=', '<!doctype', 'exported SGML document text'],
[0L, 'string', '=', '<!SUBDOC', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!subdoc', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!--', 'exported SGML document text'],
[0L, 'string', '=', 'RTSS', 'NetMon capture file'],
[0L, 'string', '=', 'TRSNIFF data \032', 'Sniffer capture file'],
[0L, 'string', '=', 'XCP\000', 'NetXRay capture file'],
[0L, 'ubelong', '=', 2712847316L, 'tcpdump capture file (big-endian)'],
[0L, 'ulelong', '=', 2712847316L, 'tcpdump capture file (little-endian)'],
[0L, 'string', '=', '<!SQ DTD>', 'Compiled SGML rules file'],
[0L, 'string', '=', '<!SQ A/E>', 'A/E SGML Document binary'],
[0L, 'string', '=', '<!SQ STS>', 'A/E SGML binary styles file'],
[0L, 'short', '=', 49374L, 'Compiled PSI (v1) data'],
[0L, 'short', '=', 49370L, 'Compiled PSI (v2) data'],
[0L, 'short', '=', 125252L, 'SoftQuad DESC or font file binary'],
[0L, 'string', '=', 'SQ BITMAP1', 'SoftQuad Raster Format text'],
[0L, 'string', '=', 'X SoftQuad', 'troff Context intermediate'],
[0L, 'belong&077777777', '=', 600413L, 'sparc demand paged'],
[0L, 'belong&077777777', '=', 600410L, 'sparc pure'],
[0L, 'belong&077777777', '=', 600407L, 'sparc'],
[0L, 'belong&077777777', '=', 400413L, 'mc68020 demand paged'],
[0L, 'belong&077777777', '=', 400410L, 'mc68020 pure'],
[0L, 'belong&077777777', '=', 400407L, 'mc68020'],
[0L, 'belong&077777777', '=', 200413L, 'mc68010 demand paged'],
[0L, 'belong&077777777', '=', 200410L, 'mc68010 pure'],
[0L, 'belong&077777777', '=', 200407L, 'mc68010'],
[0L, 'belong', '=', 407L, 'old sun-2 executable'],
[0L, 'belong', '=', 410L, 'old sun-2 pure executable'],
[0L, 'belong', '=', 413L, 'old sun-2 demand paged executable'],
[0L, 'belong', '=', 525398L, 'SunOS core file'],
[0L, 'long', '=', 4197695630L, 'SunPC 4.0 Hard Disk'],
[0L, 'string', '=', '#SUNPC_CONFIG', 'SunPC 4.0 Properties Values'],
[0L, 'string', '=', 'snoop', 'Snoop capture file'],
[36L, 'string', '=', 'acsp', 'Kodak Color Management System, ICC Profile'],
[0L, 'string', '=', '#!teapot\012xdr', 'teapot work sheet (XDR format)'],
[0L, 'string', '=', '\032\001', 'Compiled terminfo entry'],
[0L, 'short', '=', 433L, 'Curses screen image'],
[0L, 'short', '=', 434L, 'Curses screen image'],
[0L, 'string', '=', '\367\002', 'TeX DVI file'],
[0L, 'string', '=', '\367\203', 'font/x-tex'],
[0L, 'string', '=', '\367Y', 'font/x-tex'],
[0L, 'string', '=', '\367\312', 'font/x-tex'],
[0L, 'string', '=', 'This is TeX,', 'TeX transcript text'],
[0L, 'string', '=', 'This is METAFONT,', 'METAFONT transcript text'],
[2L, 'string', '=', '\000\021', 'font/x-tex-tfm'],
[2L, 'string', '=', '\000\022', 'font/x-tex-tfm'],
[0L, 'string', '=', '\\\\input\\', 'texinfo Texinfo source text'],
[0L, 'string', '=', 'This is Info file', 'GNU Info text'],
[0L, 'string', '=', '\\\\input', 'TeX document text'],
[0L, 'string', '=', '\\\\section', 'LaTeX document text'],
[0L, 'string', '=', '\\\\setlength', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentstyle', 'LaTeX document text'],
[0L, 'string', '=', '\\\\chapter', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentclass', 'LaTeX 2e document text'],
[0L, 'string', '=', '\\\\relax', 'LaTeX auxiliary file'],
[0L, 'string', '=', '\\\\contentsline', 'LaTeX table of contents'],
[0L, 'string', '=', '\\\\indexentry', 'LaTeX raw index file'],
[0L, 'string', '=', '\\\\begin{theindex}', 'LaTeX sorted index'],
[0L, 'string', '=', '\\\\glossaryentry', 'LaTeX raw glossary'],
[0L, 'string', '=', '\\\\begin{theglossary}', 'LaTeX sorted glossary'],
[0L, 'string', '=', 'This is makeindex', 'Makeindex log file'],
[0L, 'string', '=', '**TI82**', 'TI-82 Graphing Calculator'],
[0L, 'string', '=', '**TI83**', 'TI-83 Graphing Calculator'],
[0L, 'string', '=', '**TI85**', 'TI-85 Graphing Calculator'],
[0L, 'string', '=', '**TI92**', 'TI-92 Graphing Calculator'],
[0L, 'string', '=', '**TI80**', 'TI-80 Graphing Calculator File.'],
[0L, 'string', '=', '**TI81**', 'TI-81 Graphing Calculator File.'],
[0L, 'string', '=', 'TZif', 'timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\002\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\003\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\004\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\005\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\006\000', 'old timezone data'],
[0L, 'string', '=', '.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', 'x T', 'ditroff text'],
[0L, 'string', '=', '@\357', 'very old (C/A/T) troff output data'],
[0L, 'string', '=', 'Interpress/Xerox', 'Xerox InterPress data'],
[0L, 'short', '=', 263L, 'unknown machine executable'],
[0L, 'short', '=', 264L, 'unknown pure executable'],
[0L, 'short', '=', 265L, 'PDP-11 separate I&D'],
[0L, 'short', '=', 267L, 'unknown pure executable'],
[0L, 'long', '=', 268L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 269L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 270L, 'unknown readable demand paged pure executable'],
[0L, 'string', '=', 'begin uuencoded', 'or xxencoded text'],
[0L, 'string', '=', 'xbtoa Begin', "btoa'd text"],
[0L, 'string', '=', '$\012ship', "ship'd binary text"],
[0L, 'string', '=', 'Decode the following with bdeco', 'bencoded News text'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'short', '=', 610L, 'Perkin-Elmer executable'],
[0L, 'beshort', '=', 572L, 'amd 29k coff noprebar executable'],
[0L, 'beshort', '=', 1572L, 'amd 29k coff prebar executable'],
[0L, 'beshort', '=', 160007L, 'amd 29k coff archive'],
[6L, 'beshort', '=', 407L, 'unicos (cray) executable'],
[596L, 'string', '=', 'X\337\377\377', 'Ultrix core file'],
[0L, 'string', '=', 'Joy!peffpwpc', 'header for PowerPC PEF executable'],
[0L, 'lelong', '=', 101557L, 'VAX single precision APL workspace'],
[0L, 'lelong', '=', 101556L, 'VAX double precision APL workspace'],
[0L, 'lelong', '=', 407L, 'VAX executable'],
[0L, 'lelong', '=', 410L, 'VAX pure executable'],
[0L, 'lelong', '=', 413L, 'VAX demand paged pure executable'],
[0L, 'leshort', '=', 570L, 'VAX COFF executable'],
[0L, 'leshort', '=', 575L, 'VAX COFF pure executable'],
[0L, 'string', '=', 'LBLSIZE=', 'VICAR image data'],
[43L, 'string', '=', 'SFDU_LABEL', 'VICAR label file'],
[0L, 'short', '=', 21845L, 'VISX image file'],
[0L, 'string', '=', '\260\0000\000', 'VMS VAX executable'],
[0L, 'belong', '=', 50331648L, 'VMS Alpha executable'],
[1L, 'string', '=', 'WPC', '(Corel/WP)'],
[0L, 'string', '=', 'core', 'core file (Xenix)'],
[0L, 'byte', '=', 128L, '8086 relocatable (Microsoft)'],
[0L, 'leshort', '=', 65381L, 'x.out'],
[0L, 'leshort', '=', 518L, 'Microsoft a.out'],
[0L, 'leshort', '=', 320L, 'old Microsoft 8086 x.out'],
[0L, 'lelong', '=', 518L, 'b.out'],
[0L, 'leshort', '=', 1408L, 'XENIX 8086 relocatable or 80286 small model'],
[0L, 'long', '=', 59399L, 'object file (z8000 a.out)'],
[0L, 'long', '=', 59400L, 'pure object file (z8000 a.out)'],
[0L, 'long', '=', 59401L, 'separate object file (z8000 a.out)'],
[0L, 'long', '=', 59397L, 'overlay object file (z8000 a.out)'],
[0L, 'string', '=', 'ZyXEL\002', 'ZyXEL voice data'],
]
magic_tests = []
for record in magic_database:
magic_tests.append(MagicTest(record[0], record[1], record[2], record[3],
record[4]))
def guess_type(filename):
"""
Guess the mimetype of a file based on its filename.
@param filename: File name.
@return: Mimetype string or description, when appropriate mime not
available.
"""
if not os.path.isfile(filename):
logging.debug('%s is not a file', filename)
return None
try:
data = open(filename, 'r').read(8192)
except Exception, e:
logging.error(str(e))
return None
for test in magic_tests:
type = test.compare(data)
if type:
return type
# No matching magic number in the database. is it binary or text?
for c in data:
if ord(c) > 128:
# Non ASCII (binary) data
return 'Data'
# ASCII, do some text tests
if string.find('The', data, 0, 8192) > -1:
return 'English text'
if string.find('def', data, 0, 8192) > -1:
return 'Python Source'
return 'ASCII text'
if __name__ == '__main__':
parser = optparse.OptionParser("usage: %prog [options] [filenames]")
options, args = parser.parse_args()
logging_manager.configure_logging(MagicLoggingConfig(), verbose=True)
if not args:
parser.print_help()
sys.exit(1)
for arg in args:
msg = None
if os.path.isfile(arg):
msg = guess_type(arg)
if msg:
logging.info('%s: %s', arg, msg)
else:
logging.info('%s: unknown', arg)
| gpl-2.0 | -6,769,077,660,113,957,000 | 53.685634 | 163 | 0.562151 | false | 2.693082 | false | false | false |
lauromoraes/redes | MyTCPRequestHandler.py | 1 | 1443 | import logging
import socket
import threading
import SocketServer
import time
from recvall import *
from calc import *
logging.basicConfig( level = logging.DEBUG, format = "%(name)s: %(message)s", )
class MyTCPRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.logger = logging.getLogger('MyTCPRequestHandler')
self.logger.debug('__init__')
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
return
def setup(self):
self.logger.debug('setup')
return SocketServer.BaseRequestHandler.setup(self)
def handle(self):
self.logger.debug('handle')
data = recvall(self.request, 2)
#print(self.request.accept()[1])
#current_thread = threading.currentThread()
#resp = "%s, %s" % (current_thread.getName(), data)
#self.logger.debug('Thread: %s | recv()->"%s"', current_thread.getName(), data)
#self.logger.debug('Threads: %s' % str( [ t.getName() for t in threading.enumerate()] ) )
resp = calc(data)
sent = 0
size = 1024*5
while(sent < len(resp)):
if(sent+size <= len(resp)):
sent += self.request.send(resp[sent:sent+size])
else:
sent += self.request.send(resp[sent:])
time.sleep(0.1)
#self.request.sendall("data")
self.request.shutdown(socket.SHUT_WR)
self.request.close()
#time.sleep(3)
return
def finish(self):
self.logger.debug('finish')
return SocketServer.BaseRequestHandler.finish(self)
| gpl-2.0 | -1,314,863,412,308,185,900 | 29.0625 | 91 | 0.699931 | false | 3.164474 | false | false | false |
ensemblr/llvm-project-boilerplate | include/llvm/projects/compiler-rt/lib/asan/scripts/asan_symbolize.py | 1 | 18097 | #!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import argparse
import bisect
import getopt
import os
import re
import subprocess
import sys
symbolizers = {}
DEBUG = False
demangle = False
binutils_prefix = None
sysroot_path = None
binary_name_filter = None
fix_filename_patterns = None
logfile = sys.stdin
allow_system_symbolizer = True
force_system_symbolizer = False
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
if fix_filename_patterns:
for path_to_cut in fix_filename_patterns:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def sysroot_path_filter(binary_name):
return sysroot_path + binary_name
def is_valid_arch(s):
return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s",
"armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390"]
def guess_arch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = default_arch
self.system = system
self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=linkage',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if self.system == 'Darwin':
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print ' '.join(cmd)
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
self.output_terminator = -1
def open_addr2line(self):
addr2line_tool = 'addr2line'
if binutils_prefix:
addr2line_tool = binutils_prefix + addr2line_tool
cmd = [addr2line_tool, '-fi']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
lines = []
try:
print >> self.pipe.stdin, offset
print >> self.pipe.stdin, self.output_terminator
is_first_frame = True
while True:
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
if is_first_frame:
is_first_frame = False
elif function_name in ['', '??']:
assert file_name == function_name
break
lines.append((function_name, file_name));
except Exception:
lines.append(('??', '??:0'))
return ['%s in %s %s' % (addr, function, fix_filename(file)) for (function, file) in lines]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary, arch):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = arch
self.open_atos()
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary, arch):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary, arch)
elif system == 'Linux' or system == 'FreeBSD':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset, arch):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
# a single symbolizer binary.
# On Darwin, if the dsym hint producer is present:
# 1. check whether we've seen this binary already; if so,
# use |llvm_symbolizers[binary]|, which has already loaded the debug
# info for this binary (might not be the case for
# |last_llvm_symbolizer|);
# 2. otherwise check if we've seen all the hints for this binary already;
# if so, reuse |last_llvm_symbolizer| which has the full set of hints;
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
result = None
if not force_system_symbolizer:
if not binary in self.llvm_symbolizers:
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
self.system, arch, self.dsym_hints)
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
else:
symbolizers[binary] = ChainSymbolizer([])
if result is None:
if not allow_system_symbolizer:
raise Exception('Failed to launch or use llvm-symbolizer.')
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary, arch))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_logfile(self):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
arch = ""
# Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
colon_pos = binary.rfind(":")
if colon_pos != -1:
maybe_arch = binary[colon_pos+1:]
if is_valid_arch(maybe_arch):
arch = maybe_arch
binary = binary[0:colon_pos]
if arch == "":
arch = guess_arch(addr)
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
parser.add_argument('--force-system-symbolizer', action='store_true',
help='don\'t use llvm-symbolizer')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
if args.demangle:
demangle = True
if args.s:
binary_name_filter = sysroot_path_filter
sysroot_path = args.s
if args.c:
binutils_prefix = args.c
if args.logfile:
logfile = args.logfile
else:
logfile = sys.stdin
if args.force_system_symbolizer:
force_system_symbolizer = True
if force_system_symbolizer:
assert(allow_system_symbolizer)
loop = SymbolizationLoop(binary_name_filter)
loop.process_logfile()
| mit | 6,929,749,312,844,605,000 | 34.139806 | 95 | 0.62375 | false | 3.463541 | false | false | false |
yoseforb/lollypop | src/fullscreen.py | 1 | 9247 | #!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk, GLib
from cgi import escape
from gettext import gettext as _
from lollypop.define import Lp, ArtSize, Type
from lollypop.utils import seconds_to_string
# Show a fullscreen window showing current track context
class FullScreen(Gtk.Window):
"""
Init window and set transient for parent
@param: parent as Gtk.window
"""
def __init__(self, parent):
Gtk.Window.__init__(self)
self._timeout = None
self._seeking = False
self._signal1_id = None
self._signal2_id = None
self.set_transient_for(parent)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Lollypop/FullScreen.ui')
main_widget = builder.get_object('fs')
self.add(main_widget)
self._prev_btn = builder.get_object('prev_btn')
self._prev_btn.connect('clicked', self._on_prev_btn_clicked)
self._play_btn = builder.get_object('play_btn')
self._play_btn.connect('clicked', self._on_play_btn_clicked)
self._next_btn = builder.get_object('next_btn')
self._next_btn.connect('clicked', self._on_next_btn_clicked)
self._play_image = builder.get_object('play_image')
self._pause_image = builder.get_object('pause_image')
close_btn = builder.get_object('close_btn')
close_btn.connect('clicked', self._destroy)
self._cover = builder.get_object('cover')
self._title = builder.get_object('title')
self._artist = builder.get_object('artist')
self._album = builder.get_object('album')
self._next = builder.get_object('next')
self._next_cover = builder.get_object('next_cover')
self._progress = builder.get_object('progress_scale')
self._progress.connect('button-release-event',
self._on_progress_release_button)
self._progress.connect('button-press-event',
self._on_progress_press_button)
self._timelabel = builder.get_object('playback')
self._total_time_label = builder.get_object('duration')
self.connect('key-release-event', self._on_key_release_event)
"""
Init signals, set color and go party mode if nothing is playing
"""
def do_show(self):
is_playing = Lp.player.is_playing()
self._signal1_id = Lp.player.connect('current-changed',
self._on_current_changed)
self._signal2_id = Lp.player.connect('status-changed',
self._on_status_changed)
if is_playing:
self._change_play_btn_status(self._pause_image, _('Pause'))
self._on_current_changed(Lp.player)
else:
Lp.player.set_party(True)
if not self._timeout:
self._timeout = GLib.timeout_add(1000, self._update_position)
Gtk.Window.do_show(self)
self._update_position()
self.fullscreen()
"""
Remove signals and unset color
"""
def do_hide(self):
if self._signal1_id:
Lp.player.disconnect(self._signal1_id)
self._signal1_id = None
if self._signal2_id:
Lp.player.disconnect(self._signal2_id)
self._signal2_id = None
if self._timeout:
GLib.source_remove(self._timeout)
self._timeout = None
#######################
# PRIVATE #
#######################
"""
Update View for current track
- Cover
- artist/title
- reset progress bar
- update time/total labels
@param player as Player
"""
def _on_current_changed(self, player):
if player.current_track.id is None:
pass # Impossible as we force play on show
else:
if Lp.player.current_track.id == Type.RADIOS:
self._prev_btn.set_sensitive(False)
self._next_btn.set_sensitive(False)
self._timelabel.hide()
self._total_time_label.hide()
self._progress.hide()
cover = Lp.art.get_radio(player.current_track.artist,
ArtSize.MONSTER)
else:
self._prev_btn.set_sensitive(True)
self._next_btn.set_sensitive(True)
self._timelabel.show()
self._total_time_label.show()
self._progress.show()
cover = Lp.art.get_album(player.current_track.album_id,
ArtSize.MONSTER)
self._cover.set_from_pixbuf(cover)
del cover
album = player.current_track.album
if player.current_track.year != '':
album += " (%s)" % player.current_track.year
self._title.set_text(player.current_track.title)
self._artist.set_text(player.current_track.artist)
self._album.set_text(album)
next_cover = Lp.art.get_album(player.next_track.album_id,
ArtSize.MEDIUM)
self._next_cover.set_from_pixbuf(next_cover)
del next_cover
self._next.set_markup("<b>%s</b> - %s" %
(escape(player.next_track.artist),
escape(player.next_track.title)))
self._progress.set_value(1.0)
self._progress.set_range(0.0, player.current_track.duration * 60)
self._total_time_label.set_text(
seconds_to_string(player.current_track.duration))
self._timelabel.set_text("0:00")
"""
Destroy window if Esc
@param widget as Gtk.Widget
@param event as Gdk.event
"""
def _on_key_release_event(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self.destroy()
"""
Go to prev track
@param widget as Gtk.Button
"""
def _on_prev_btn_clicked(self, widget):
Lp.player.prev()
"""
Play/pause
@param widget as Gtk.Button
"""
def _on_play_btn_clicked(self, widget):
if Lp.player.is_playing():
Lp.player.pause()
widget.set_image(self._play_image)
else:
Lp.player.play()
widget.set_image(self._pause_image)
"""
Go to next track
@param widget as Gtk.Button
"""
def _on_next_btn_clicked(self, widget):
Lp.player.next()
"""
Update buttons and progress bar
@param obj as unused
"""
def _on_status_changed(self, obj):
is_playing = Lp.player.is_playing()
if is_playing and not self._timeout:
self._timeout = GLib.timeout_add(1000, self._update_position)
self._change_play_btn_status(self._pause_image, _("Pause"))
elif not is_playing and self._timeout:
GLib.source_remove(self._timeout)
self._timeout = None
self._change_play_btn_status(self._play_image, _("Play"))
"""
On press, mark player as seeking
@param unused
"""
def _on_progress_press_button(self, scale, data):
self._seeking = True
"""
Callback for scale release button
Seek player to scale value
@param scale as Gtk.Scale, data as unused
"""
def _on_progress_release_button(self, scale, data):
value = scale.get_value()
self._seeking = False
self._update_position(value)
Lp.player.seek(value/60)
"""
Update play button with image and status as tooltip
@param image as Gtk.Image
@param status as str
"""
def _change_play_btn_status(self, image, status):
self._play_btn.set_image(image)
self._play_btn.set_tooltip_text(status)
"""
Update progress bar position
@param value as int
"""
def _update_position(self, value=None):
if not self._seeking and self._progress.is_visible():
if value is None:
value = Lp.player.get_position_in_track()/1000000
self._progress.set_value(value)
self._timelabel.set_text(seconds_to_string(value/60))
return True
"""
Destroy self
@param widget as Gtk.Button
"""
def _destroy(self, widget):
self.destroy()
| gpl-3.0 | 6,808,737,461,966,250,000 | 35.988 | 77 | 0.571753 | false | 3.931548 | false | false | false |
vfuse/nixstatsagent | nixstatsagent/plugins/mdstat.py | 2 | 1272 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import plugins
import json
class Plugin(plugins.BasePlugin):
__name__ = 'mdstat'
def run(self, config):
'''
Monitor software raid status using mdadm
pip install mdstat
'''
data = os.popen('sudo mdjson').read()
results = {}
try:
data = json.loads(data)
except Exception:
return "Could not load mdstat data"
for key, value in data['devices'].items():
device = {}
if(value['active'] is not True):
device['active'] = 0
else:
device['active'] = 1
if(value['read_only'] is not False):
device['read_only'] = 1
else:
device['read_only'] = 0
if(value['resync'] is not None):
device['resync'] = 1
else:
device['resync'] = 0
device['faulty'] = 0
for disk, diskvalue in value['disks'].items():
if diskvalue['faulty'] is not False:
device['faulty'] = device['faulty'] + 1
results[key] = device
return results
if __name__ == '__main__':
Plugin().execute()
| bsd-3-clause | 1,352,827,415,557,494,500 | 26.652174 | 59 | 0.474843 | false | 4.311864 | false | false | false |
mcheatham/computationalEnvironmentODP | bin/kernels.py | 1 | 3923 | #!/usr/bin/env python3
from bs4 import BeautifulSoup
from requests import get
from bs4.element import Tag
ignore = {'Lisp Machines, Inc.', 'Symbolics', 'Texas Instruments', 'Xerox'}
levels = {}
levels['Apple Inc.'] = {3}
levels['On S/360, S/370, and successor mainframes'] = {3}
levels['On other hardware platforms'] = {3}
def before(tag1, tag2, startTag):
if len(tag1) == 0: return False;
if len(tag2) == 0 :return True;
tempTag = startTag
while tempTag and tempTag.previous_sibling:
tempTag = tempTag.previous_sibling
if isinstance(tempTag, Tag):
if tag1 in tempTag.getText():
return True
elif tag2 in tempTag.getText():
return False
return True
def includeLI(tag):
for p in tag.parents:
# ignores tags in the page's table of contents, navigation header, and footer
if 'id' in p.attrs.keys() and ('toc' in p['id'] or 'mw-navigation' in p['id'] or 'footer' in p['id']):
return False;
# ignores links to external references and wikipedia categories
if 'class' in p.attrs.keys() and ('references' in p['class'] or 'reference' in p['class'] or 'catlinks' in p['class']):
return False;
# ignores navigation links
if 'role' in p.attrs.keys() and 'navigation' in p['role']:
return False;
# ignores the 'See also' links
if tag.parent and tag.parent.find_previous_sibling('h2') and 'See also' in tag.parent.find_previous_sibling('h2').text:
return False;
# ignores the external links
if tag.parent and tag.parent.find_previous_sibling('h2') and 'External links' in tag.parent.find_previous_sibling('h2').text:
return False;
return True;
def includeA(tag):
# ignores tags specified directly in the ignore list
if tag.text in ignore:
return False;
# ignores links to external references and wikipedia categories
p = tag.parent
if p and 'class' in p.attrs.keys() and 'reference' in p['class']:
return False;
# this page displays operating systems at various levels of specificity,from kernel down to
# particular distributions in some cases. the script allows the user to specify the correct
# level(s) of each list to pull using the 'levels' dictionary defined abouve. the code below
# insures that the tag is at an acceptable level. if the level is not specified, top-level
# items are pulled.
h4Depth = -1 # -1 because it takes one move to get out of the <a> tag itself
h4Heading = ''
temp = tag
while temp and not temp.find_previous_sibling('h4'):
h4Depth += 1
temp = temp.parent
if temp and temp.find_previous_sibling('h4') and temp.find_previous_sibling('h4').select('span'):
h4Heading = temp.find_previous_sibling('h4').select('span')[0].getText()
h3Depth = -1
h3Heading = ''
temp = tag
while temp and not temp.find_previous_sibling('h3'):
h3Depth += 1
temp = temp.parent
if temp and temp.find_previous_sibling('h3') and temp.find_previous_sibling('h3').select('span'):
h3Heading = temp.find_previous_sibling('h3').select('span')[0].getText()
if h4Depth < h3Depth or before(h4Heading, h3Heading, temp) and h4Heading in levels:
return h4Depth in levels[h4Heading]
elif h3Heading in levels:
return h3Depth in levels[h3Heading];
else:
return h3Depth == 1
baseUrl = 'https://en.wikipedia.org/wiki/List_of_operating_systems'
doc = get(baseUrl).text
soup = BeautifulSoup(doc, 'html.parser')
listItems = soup.select('li')
answers = set()
for i in listItems:
if not includeLI(i): continue
links = i.select('a')
if links and includeA(links[0]) and not links[0].getText() in answers:
answers.add(links[0].getText())
for answer in sorted(answers):
print(answer)
| mit | -7,770,033,104,525,160,000 | 29.889764 | 129 | 0.647973 | false | 3.666355 | false | false | false |
dariox2/CADL | test/testyida6b.py | 1 | 4901 |
#
# test shuffle_batch - 6b
#
# generates a pair of files (color+bn)
# pending: make the tuple match
#
print("Loading tensorflow...")
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from libs import utils
import datetime
tf.set_random_seed(1)
def create_input_pipeline_yida(files1, files2, batch_size, n_epochs, shape, crop_shape=None,
crop_factor=1.0, n_threads=1, seed=None):
producer1 = tf.train.string_input_producer(
files1, capacity=len(files1), shuffle=False)
producer2 = tf.train.string_input_producer(
files2, capacity=len(files2), shuffle=False)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys1, vals1 = reader.read(producer1)
keys2, vals2 = reader.read(producer2)
# And then have to decode its contents as we know it is a jpeg image
imgs1 = tf.image.decode_jpeg(vals1, channels=3)
imgs2 = tf.image.decode_jpeg(vals2, channels=3)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs1.set_shape(shape)
imgs2.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor),
int(crop_shape[1] / crop_factor)]
else:
rsz_shape = [int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)]
rszs1 = tf.image.resize_images(imgs1, rsz_shape[0], rsz_shape[1])
rszs2 = tf.image.resize_images(imgs2, rsz_shape[0], rsz_shape[1])
crops1 = (tf.image.resize_image_with_crop_or_pad(
rszs1, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs1)
crops2 = (tf.image.resize_image_with_crop_or_pad(
rszs2, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs2)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files1) // 5
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch([crops1, crops2],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads,
#seed=seed,
)#shapes=(64,64,3))
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def CELEByida(path):
fs = [os.path.join(path, f)
for f in os.listdir(path) if f.endswith('.jpg')]
fs=sorted(fs)
return fs
print("Loading celebrities...")
from libs.datasets import CELEB
files1 = CELEByida("../session-1/img_align_celeba/") # only 100
files2 = CELEByida("../session-1/img_align_celeba_n/") # only 100
from libs.dataset_utils import create_input_pipeline
batch_size = 8
n_epochs = 3
input_shape = [218, 178, 3]
crop_shape = [64, 64, 3]
crop_factor = 0.8
seed=15
batch1 = create_input_pipeline_yida(
files1=files1, files2=files2,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape,
seed=seed)
mntg=[]
sess = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
batres = sess.run(batch1)
batch_xs1=np.array(batres[0])
batch_xs2=np.array(batres[1])
for i in range(0,len(batch_xs1)):
img=batch_xs1[i] / 255.0
mntg.append(img)
img=batch_xs2[i] / 255.0
mntg.append(img)
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
m=utils.montage(mntg, saveto="montage_"+TID+".png")
# mntg[0]=color
# mntg[1]=b/n
plt.figure(figsize=(5, 5))
plt.imshow(m)
plt.show()
# eop
| apache-2.0 | 4,011,695,546,058,428,000 | 29.823899 | 98 | 0.64293 | false | 3.254316 | false | false | false |
CartoDB/cartoframes | cartoframes/viz/layout.py | 1 | 9384 | from . import constants
from .map import Map
from .html import HTMLLayout
from ..utils.utils import get_center, get_credentials
from ..utils.metrics import send_metrics
from .kuviz import KuvizPublisher
class Layout:
"""Create a layout of visualizations in order to compare them.
Args:
maps (list of :py:class:`Map <cartoframes.viz.Map>`): List of
maps. Zero or more of :py:class:`Map <cartoframes.viz.Map>`.
n_size (number, optional): Number of columns of the layout
m_size (number, optional): Number of rows of the layout
viewport (dict, optional): Properties for display of the maps viewport.
Keys can be `bearing` or `pitch`.
is_static (boolean, optional): By default is False. All the maps in each visualization
are interactive. In order to set them static images for performance reasons
set `is_static` to True.
map_height (number, optional): Height in pixels for each visualization.
Default is 250.
full_height (boolean, optional): When a layout visualization is published, it
will fit the screen height. Otherwise, each visualization height will be
`map_height`. Default True.
Raises:
ValueError: if the input elements are not instances of :py:class:`Map <cartoframes.viz.Map>`.
Examples:
Basic usage.
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ])
Display a 2x2 layout.
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ], 2, 2)
Custom Titles.
>>> Layout([
... Map(Layer('table_in_your_account'), title="Visualization 1 custom title"),
... Map(Layer('table_in_your_account'), title="Visualization 2 custom title")),
>>> ])
Viewport.
>>> Layout([
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account'))
>>> ], viewport={ 'zoom': 2 })
>>> Layout([
... Map(Layer('table_in_your_account'), viewport={ 'zoom': 0.5 }),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account'))
>>> ], viewport={ 'zoom': 2 })
Create an static layout
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ], is_static=True)
"""
def __init__(self,
maps,
n_size=None,
m_size=None,
viewport=None,
map_height=250,
full_height=True,
is_static=False,
**kwargs):
self._maps = maps
self._layout = _init_layout(self._maps, is_static, viewport)
self._n_size = n_size if n_size is not None else len(self._layout)
self._m_size = m_size if m_size is not None else constants.DEFAULT_LAYOUT_M_SIZE
self._viewport = viewport
self._is_static = is_static
self._map_height = map_height
self._full_height = full_height
self._publisher = None
self._carto_vl_path = kwargs.get('_carto_vl_path', None)
self._airship_path = kwargs.get('_airship_path', None)
def _repr_html_(self):
self._html_layout = HTMLLayout()
map_height = '100%' if self._full_height else '{}px'.format(self._map_height)
self._html_layout.set_content(
maps=self._layout,
size=['100%', self._map_height * self._m_size],
n_size=self._n_size,
m_size=self._m_size,
is_static=self._is_static,
map_height=map_height,
full_height=self._full_height,
_carto_vl_path=self._carto_vl_path,
_airship_path=self._airship_path
)
return self._html_layout.html
@send_metrics('map_published')
def publish(self, name, password, credentials=None, if_exists='fail', maps_api_key=None):
"""Publish the layout visualization as a CARTO custom visualization.
Args:
name (str): The visualization name on CARTO.
password (str): By setting it, your visualization will be protected by
password. When someone tries to show the visualization, the password
will be requested. To disable password you must set it to None.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
A Credentials instance. If not provided, the credentials will be automatically
obtained from the default credentials if available. It is used to create the
publication and also to save local data (if exists) into your CARTO account.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with
the same name already exists in your account. Default is 'fail'.
maps_api_key (str, optional): The Maps API key used for private datasets.
Example:
Publishing the map visualization.
>>> tlayout = Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ])
>>> tlayout.publish('Custom Map Title', password=None)
"""
_credentials = get_credentials(credentials)
layers = []
for viz_map in self._maps:
for layer in viz_map.layers:
layers.append(layer)
self._publisher = _get_publisher(_credentials)
self._publisher.set_layers(layers, maps_api_key)
html = self._get_publication_html()
return self._publisher.publish(html, name, password, if_exists)
def update_publication(self, name, password, if_exists='fail'):
"""Update the published layout visualization.
Args:
name (str): The visualization name on CARTO.
password (str): setting it your visualization will be protected by
password and using `None` the visualization will be public.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with the same name already
exists in your account. Default is 'fail'.
Raises:
PublishError: if the map has not been published yet.
"""
html = self._get_publication_html()
return self._publisher.update(html, name, password, if_exists)
def _get_publication_html(self):
if not self._publisher:
_credentials = get_credentials(None)
self._publisher = _get_publisher(_credentials)
html_layout = HTMLLayout('templates/viz/main_layout.html.j2')
layers = self._publisher.get_layers()
layer_index = 0
for viz_map in self._maps:
for layer in viz_map.layers:
layer.credentials = layers[layer_index].credentials
layer_index += 1
maps = _init_layout(self._maps, self._is_static, self._viewport)
map_height = '100%' if self._full_height else '{}px'.format(self._map_height)
html_layout.set_content(
maps=maps,
size=['100%', self._map_height * self._m_size],
n_size=self._n_size,
m_size=self._m_size,
is_static=self._is_static,
is_embed=True,
map_height=map_height
)
return html_layout.html
def _init_layout(maps, is_static, viewport):
layout = []
for map_index, viz in enumerate(maps):
if not isinstance(viz, Map):
raise ValueError('All the elements in the Layout should be an instance of Map.')
viz.is_static = _get_is_static(viz.is_static, is_static)
viz.viewport = _get_viewport(viz.viewport, viewport)
viz.camera = _get_camera(viz.viewport)
for layer in viz.layers:
layer.map_index = map_index
layer.reset_ui(viz)
layout.append(viz.get_content())
return layout
def _get_viewport(map_settings_viewport, layout_viewport):
if map_settings_viewport is not None:
return map_settings_viewport
return layout_viewport
def _get_camera(viewport):
camera = None
if viewport is not None:
camera = {
'center': get_center(viewport),
'zoom': viewport.get('zoom'),
'bearing': viewport.get('bearing'),
'pitch': viewport.get('pitch')
}
return camera
def _get_is_static(map_settings_is_static, layout_is_static):
if map_settings_is_static is not None:
return map_settings_is_static
return layout_is_static
def _get_publisher(credentials):
return KuvizPublisher(credentials)
| bsd-3-clause | 7,968,791,184,678,179,000 | 36.090909 | 117 | 0.586104 | false | 4 | false | false | false |
alphacsc/alphacsc | examples/csc/plot_lfp_data.py | 1 | 3791 | """
==============================
CSC to learn LFP spiking atoms
==============================
Here, we show how CSC can be used to learn spiking
atoms from Local Field Potential (LFP) data [1].
[1] Hitziger, Sebastian, et al.
Adaptive Waveform Learning: A Framework for Modeling Variability in
Neurophysiological Signals. IEEE Transactions on Signal Processing (2017).
"""
###############################################################################
# First, let us fetch the data (~14 MB)
import os
from mne.utils import _fetch_file
url = ('https://github.com/hitziger/AWL/raw/master/Experiments/data/'
'LFP_data_contiguous_1250_Hz.mat')
fname = './LFP_data_contiguous_1250_Hz.mat'
if not os.path.exists(fname):
_fetch_file(url, fname)
###############################################################################
# It is a mat file, so we use scipy to load it
from scipy import io
data = io.loadmat(fname)
X, sfreq = data['X'].T, float(data['sfreq'])
###############################################################################
# And now let us look at the data
import numpy as np
import matplotlib.pyplot as plt
start, stop = 11000, 15000
times = np.arange(start, stop) / sfreq
plt.plot(times, X[0, start:stop], color='b')
plt.xlabel('Time (s)')
plt.ylabel(r'$\mu$ V')
plt.xlim([9., 12.])
###############################################################################
# and filter it using a convenient function from MNE. This will remove low
# frequency drifts, but we keep the high frequencies
from mne.filter import filter_data
X = filter_data(
X.astype(np.float64), sfreq, l_freq=1, h_freq=None, fir_design='firwin')
###############################################################################
# Now, we define the parameters of our model.
reg = 6.0
n_times = 2500
n_times_atom = 350
n_trials = 100
n_atoms = 3
n_iter = 60
###############################################################################
# Let's stick to one random state for now, but if you want to learn how to
# select the random state, consult :ref:`this example
# <sphx_glr_auto_examples_plot_simulate_randomstate.py>`.
random_state = 10
###############################################################################
# Now, we epoch the trials
overlap = 0
starts = np.arange(0, X.shape[1] - n_times, n_times - overlap)
stops = np.arange(n_times, X.shape[1], n_times - overlap)
X_new = []
for idx, (start, stop) in enumerate(zip(starts, stops)):
if idx >= n_trials:
break
X_new.append(X[0, start:stop])
X_new = np.vstack(X_new)
del X
###############################################################################
# We remove the mean and scale to unit variance.
X_new -= np.mean(X_new)
X_new /= np.std(X_new)
###############################################################################
# The convolutions can result in edge artifacts at the edges of the trials.
# Therefore, we discount the contributions from the edges by windowing the
# trials.
from numpy import hamming
X_new *= hamming(n_times)[None, :]
###############################################################################
# Of course, in a data-limited setting we want to use as much of the data as
# possible. If this is the case, you can set `overlap` to non-zero (for example
# half the epoch length).
#
# Now, we run regular CSC since the trials are not too noisy
from alphacsc import learn_d_z
pobj, times, d_hat, z_hat, reg = learn_d_z(X_new, n_atoms, n_times_atom,
reg=reg, n_iter=n_iter,
random_state=random_state, n_jobs=1)
###############################################################################
# Let's look at the atoms now.
plt.figure()
plt.plot(d_hat.T)
plt.show()
| bsd-3-clause | -4,877,237,652,640,226,000 | 34.101852 | 79 | 0.512002 | false | 3.837045 | false | false | false |
AlessioCasco/gandi-dyndns | gandi-dyndns.py | 1 | 9763 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gandi-dyndns
@author: AlessioCasco
"""
from bottle import route, run, request, response
from optparse import OptionParser
import logging as log
import xmlrpclib
import json
import sys
import re
gandi_fqdn_ip = {}
@route('/ping', method=['GET', 'POST'])
def ping():
'''Function for monitoring/ping'''
response.headers['Server'] = 'gandi-dyndns'
response.status = 200
return('I\'am alive!\n')
@route('/nic_update', method=['GET', 'POST'])
def gandi_dyndns():
'''Main function'''
response.headers['Server'] = 'gandi-dyndns'
# dictionary gandi_fqdn_ip, has fqdn:ip key:value from all the legit requests
global gandi_fqdn_ip
# dictionar ynew_fqdn_ip, has fqdn:ip key:value from the current request
new_fqdn_ip = {}
# define the action to perform into the gandi_api function
action = ''
try:
fqdn, new_ip, fqdn_match = fetch_parameters()
except TypeError:
response.status = 400
return
# create new dictionary with the info we got from the webserver
new_fqdn_ip[fqdn] = new_ip
# check if we need to fetch the ip from gandi
try:
if new_fqdn_ip[fqdn] != gandi_fqdn_ip[fqdn]:
log.debug('Received IP differs from the one saved on Gandi, will update it')
action = 'update'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
return
except KeyError:
log.debug('Do not know the current Gandi IP for fqdn %s, will fetch it' % fqdn)
try:
action = 'fetch'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
if new_fqdn_ip[fqdn] != gandi_fqdn_ip[fqdn]:
action = 'update'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
return
except ValueError:
response.status = 404
return
log.debug('Nothing to do, received IP is same as the one configured on gandi for %s' % fqdn)
return
def fetch_parameters():
'''Fetch parameters from the GET request'''
new_ip = ''
method = request.environ.get('REQUEST_METHOD')
# check for missing parameters
if not request.params.ip and not request.params.fqdn:
log.error('Received malformed request, both parameters (fqdn & ip) are missing. Got: \"%s\"' % request.url)
return
elif not request.params.ip:
new_ip = request.environ.get('REMOTE_ADDR')
log.debug('IP parameter is missing, will use client source one: %s' % new_ip)
elif not request.params.fqdn:
log.error('Received malformed request, fqdn parameter is missing. Got: \"%s\"' % request.url)
return
if not new_ip:
new_ip = request.params.ip
fqdn = request.params.fqdn
# check if parameters have correct informations
fqdn_match = re.match(r'^([a-zA-Z0-9][a-zA-Z0-9-]{1,61})\.([a-zA-Z0-9][a-zA-Z0-9-]{1,61}\.[a-zA-Z]{2,}$)', fqdn)
ip_match = re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', new_ip)
priv_ip_match = re.match(r'^(?:10|127|172\.(?:1[6-9]|2[0-9]|3[01])|192\.168)\..*', new_ip)
if not fqdn_match and not ip_match:
log.error('Received invalid values on both parameters. Got fqdn:\"%s\" & IP: %s' % (fqdn, new_ip))
return
elif not ip_match:
log.error('Received invalid ip value. Got %s' % new_ip)
return
elif priv_ip_match:
log.error('Received IP is not a public one. Got %s' % new_ip)
return
elif not fqdn_match:
log.error('Received invalid fqdn value. Got \"%s\"' % fqdn)
return
log.debug('Received %s request: fqdn:\"%s\" & IP: %s' % (method, fqdn, new_ip))
return fqdn, new_ip, fqdn_match
def gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action):
'''Funcion for managing the Gandi API'''
# define some variables about gandi
api = xmlrpclib.ServerProxy('https://rpc.gandi.net/xmlrpc/')
apikey = config['apikey']
hostname = (fqdn_match.group(1))
domain = (fqdn_match.group(2))
# check if the domain is managed by the apikey provided
if not (api.domain.list(apikey, {'~fqdn': domain})):
log.error('Apikey provided does not manage %s domain' % domain)
raise ValueError('Apikey provided does not manage %s domain' % domain)
# check available zones
zones = api.domain.zone.list(apikey)
for zone in zones:
if (zone['name']) == domain:
zone_id = zone['id']
log.debug('Zone id %s found, for domain %s' % (zone_id, domain))
break
else:
log.error('Could not find zone file called %s, you must have a zone having same name as the domain you want to manage' % domain)
raise ValueError('Could not find zone file called %s, you must have a zone having same name as the domain you want to manage' % domain)
# check if we have to fetch the gandi api
if action == 'fetch':
# check & retrieve informations from recods in zone
records = api.domain.zone.record.list(apikey, zone_id, 0)
for record in records:
if (record['name'] == hostname and record['type'].lower() == 'a'):
# add fqdn/ip to the gandi_fqdn_ip dictionary
gandi_fqdn_ip[fqdn] = record['value']
log.debug('DNS \'A\' record found for subdomain \'%s\' having value %s' % (hostname, gandi_fqdn_ip[fqdn]))
break
else:
log.error('Unable to find a DNS \'A\' record for subdomain \'%s\'' % hostname)
raise ValueError('Unable to find a DNS \'A\' record for subdomain \'%s\'' % hostname)
return gandi_fqdn_ip
# check if we have to update the the ip
elif action == 'update':
# create a new zone from the existing one
zone_version = api.domain.zone.version.new(apikey, zone_id)
log.debug('New zone created, new version: %s' % zone_version)
# delete the A record from the new version
api.domain.zone.record.delete(apikey, zone_id, zone_version, {"type": ["A"], "name": [hostname]})
log.debug('Deleted \'A\' record from new zone version %s' % zone_version)
# add the A record we want
new_record = api.domain.zone.record.add(apikey, zone_id, zone_version, {"type": "A", "name": hostname, "value": new_fqdn_ip[fqdn], "ttl": 300})
log.debug('New \'A\' record added as follow: %s' % new_record)
# active the new zone version
if api.domain.zone.version.set(apikey, zone_id, zone_version):
log.info('New IP %s for fqdn %s updated succesfully.' % (new_fqdn_ip[fqdn], fqdn))
else:
log.error('Unable to update IP %s for fqdn %s' % (new_fqdn_ip[fqdn], fqdn))
return
# update gandi_fqdn_ip with the value just saved in the new zone version
gandi_fqdn_ip[fqdn] = new_fqdn_ip[fqdn]
return gandi_fqdn_ip
def init_application():
def get_options():
'''Load options from the command line'''
default_config = "config.json"
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option(
"-c",
"--config",
dest="configfile",
default=default_config,
help='Config file relative or absolute path. Default is %s' % default_config)
(options, args) = parser.parse_args()
if options.configfile is not None:
options.configfile = options.configfile.strip(' ')
return options
def read_config_file(configfile):
'''Loads the config file from disk'''
try:
with open(configfile) as f:
config = validate_config(json.load(f))
return config
# catch if file doesn't exist
except IOError:
print('Config file %s not found' % configfile)
sys.exit(1)
# catch if json file is not formatted corectly
except ValueError:
print('Json file is not formatted properly')
sys.exit(1)
def validate_config(raw_config):
'''Checks the config file.'''
# check if required patameters are present inside the config
if 'port' not in raw_config or 'bind' not in raw_config or 'apikey' not in raw_config or 'logging' not in raw_config:
print('Config file has missing parameters')
sys.exit(1)
else:
return raw_config
def configure_logging(config):
'''Configure logging'''
if config['logging']['log_enable'] == "false":
log.disable('CRITICAL')
return
elif config['logging']['log_enable'] == "true":
try:
log.basicConfig(
format='%(asctime)-15s [%(levelname)s] %(message)s',
filename=config['logging']['log_file'],
level=config['logging']['log_level'])
except ValueError:
print('Log level is not set with a correct value, check the README.md for the full list')
sys.exit(1)
except IOError:
print('Unable to create the log file, check if gandi-dyndns has write permissions')
sys.exit(1)
return
else:
print('Bad congig file, log_enable is not set with a correct value, (true|false) are the two only options')
sys.exit(1)
options = get_options()
config = read_config_file(options.configfile)
configure_logging(config)
return config
if __name__ == "__main__":
config = init_application()
# init webserver
run(host=config["bind"], port=config["port"], quiet=True)
| mit | -3,330,188,730,535,708,700 | 40.021008 | 151 | 0.597767 | false | 3.691115 | true | false | false |
gappleto97/Senior-Project | common/peers.py | 1 | 16088 | from multiprocessing import Queue
import multiprocessing, os, pickle, select, socket, sys, time, rsa, traceback
from common.safeprint import safeprint
from common.bounty import *
global ext_port
global ext_ip
global port
global myPriv
global myPub
global propQueue
ext_port = -1
ext_ip = ""
port = 44565
myPub, myPriv = rsa.newkeys(1024)
propQueue = multiprocessing.Queue()
seedlist = [("127.0.0.1", 44565), ("localhost", 44565),
("10.132.80.128", 44565)]
peerlist = [("24.10.111.111", 44565)]
remove = []
bounties = []
# constants
peers_file = "data" + os.sep + "peerlist.pickle"
key_request = "Key Request".encode('utf-8')
close_signal = "Close Signal".encode("utf-8")
peer_request = "Requesting Peers".encode("utf-8")
bounty_request = "Requesting Bounties".encode("utf-8")
incoming_bounties = "Incoming Bounties".encode("utf-8")
incoming_bounty = "Incoming Bounty".encode("utf-8")
valid_signal = "Bounty was valid".encode("utf-8")
invalid_signal = "Bounty was invalid".encode("utf-8")
end_of_message = "End of message".encode("utf-8")
sig_length = len(max(
close_signal, peer_request, bounty_request, incoming_bounties,
incoming_bounty, valid_signal, invalid_signal, key=len))
def pad(string):
return string + " ".encode('utf-8') * (sig_length - (((len(string) - 1) % sig_length) + 1))
close_signal = pad(close_signal)
peer_request = pad(peer_request)
bounty_request = pad(bounty_request)
incoming_bounties = pad(incoming_bounties)
incoming_bounty = pad(incoming_bounty)
valid_signal = pad(valid_signal)
invalid_signal = pad(invalid_signal)
end_of_message = pad(end_of_message)
signals = [close_signal, peer_request, bounty_request, incoming_bounty, valid_signal, invalid_signal]
def send(msg, conn, key):
while key is None:
safeprint("Key not found. Requesting key")
conn.send(key_request)
try:
key = pickle.loads(conn.recv(1024))
key = rsa.PublicKey(key[0], key[1])
safeprint("Key received")
except EOFError:
continue
if not isinstance(msg, type("a".encode('utf-8'))):
msg = msg.encode('utf-8')
x = 0
while x < len(msg) - 117:
conn.sendall(rsa.encrypt(msg[x:x+117], key))
x += 117
conn.sendall(rsa.encrypt(msg[x:], key))
conn.sendall(rsa.encrypt(end_of_message, key))
return key
def recv(conn):
received = "".encode('utf-8')
a = ""
try:
while True:
a = conn.recv(128)
if a == key_request:
safeprint("Key requested. Sending key")
conn.sendall(pickle.dumps((myPriv.n, myPriv.e), 0))
continue
a = rsa.decrypt(a, myPriv)
safeprint("Packet = " + str(a), verbosity=3)
if a == end_of_message:
return received
received += a
except rsa.pkcs1.DecryptionError as error:
safeprint("Decryption error---Content: " + str(a))
return "".encode('utf-8')
def get_lan_ip():
"""Retrieves the LAN ip. Expanded from http://stackoverflow.com/a/28950776"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('8.8.8.8', 23))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def getFromFile():
"""Load peerlist from a file"""
if os.path.exists(peers_file):
try:
peerlist.extend(pickle.load(open(peers_file, "rb")))
trimPeers()
except:
safeprint("Could not load peerlist from file")
def saveToFile():
"""Save peerlist to a file"""
if not os.path.exists(peers_file.split(os.sep)[0]):
os.mkdir(peers_file.split(os.sep)[0])
pickle.dump(peerlist[:], open(peers_file, "wb"), 0)
def getFromSeeds():
"""Make peer requests to each address on the seedlist"""
for seed in seedlist:
safeprint(seed, verbosity=1)
peerlist.extend(requestPeerlist(seed))
time.sleep(1)
def requestPeerlist(address):
"""Request the peerlist of another node. Currently has additional test commands"""
conn = socket.socket()
conn.settimeout(5)
safeprint(address, verbosity=1)
try:
conn.connect(address)
key = send(peer_request, conn, None)
received = recv(conn)
safeprint(pickle.loads(received), verbosity=2)
if recv(conn) == peer_request:
handlePeerRequest(conn, False, key=key, received=pickle.loads(received))
recv(conn)
conn.close()
return pickle.loads(received)
except Exception as error:
safeprint("Failed:" + str(type(error)))
safeprint(error)
remove.extend([address])
return []
def requestBounties(address):
"""Request the bountylist of another node"""
conn = socket.socket()
conn.settimeout(5)
safeprint(address, verbosity=1)
try:
conn.connect(address)
key = send(bounty_request, conn, None)
received = recv(conn)
if recv(conn) == bounty_request:
handleBountyRequest(conn, False, key=key, received=pickle.loads(received))
safeprint(recv(conn))
conn.close()
addBounties(pickle.loads(received))
except Exception as error:
safeprint("Failed:" + str(type(error)))
safeprint(error)
remove.extend([address])
def initializePeerConnections(newPort, newip, newport):
"""Populate the peer list from a previous session, seeds, and from the peer list if its size is less than 12. Then save this new list to a file"""
port = newPort # Does this affect the global variable?
ext_ip = newip # Does this affect the global variable?
ext_port = newport # Does this affect the global variable?
safeprint([ext_ip, ext_port])
getFromFile()
safeprint("peers fetched from file", verbosity=1)
getFromSeeds()
safeprint("peers fetched from seedlist", verbosity=1)
trimPeers()
if len(peerlist) < 12:
safeprint(len(peerlist))
newlist = []
for peer in peerlist:
newlist.extend(requestPeerlist(peer))
peerlist.extend(newlist)
trimPeers()
safeprint("getting bounties from peers and seeds", verbosity=1)
for peer in peerlist[:] + seedlist[:]:
requestBounties(peer)
safeprint("peer network extended", verbosity=1)
saveToFile()
safeprint("peer network saved to file", verbosity=1)
safeprint(peerlist)
safeprint([ext_ip, ext_port])
def trimPeers():
"""Trim the peerlist to a single set, and remove any that were marked as erroneous before"""
temp = list(set(peerlist[:]))
for peer in remove:
try:
del temp[temp.index(peer)]
except:
continue
del remove[:]
del peerlist[:]
peerlist.extend(temp)
def listen(port, outbound, q, v, serv):
"""BLOCKING function which should only be run in a daemon thread. Listens and responds to other nodes"""
if serv:
from server.bounty import verify, addBounty
server = socket.socket()
server.bind(("0.0.0.0", port))
server.listen(10)
server.settimeout(5)
if sys.version_info[0] < 3 and sys.platform == "win32":
server.setblocking(True)
global ext_ip, ext_port
if outbound is True:
safeprint("UPnP mode is disabled")
else:
safeprint("UPnP mode is enabled")
if not portForward(port):
outbound = True
safeprint([outbound, ext_ip, ext_port])
q.put([outbound, ext_ip, ext_port])
while v.value: # is True is implicit
safeprint("listening on " + str(get_lan_ip()) + ":" + str(port), verbosity=3)
if not outbound:
safeprint("forwarded from " + ext_ip + ":" + str(ext_port), verbosity=3)
try:
conn, addr = server.accept()
server.setblocking(True)
conn.setblocking(True)
safeprint("connection accepted")
packet = recv(conn)
safeprint("Received: " + packet.decode(), verbosity=3)
key = None
if packet == peer_request:
key = handlePeerRequest(conn, True, key=key)
elif packet == bounty_request:
key = handleBountyRequest(conn, True, key=key)
elif packet == incoming_bounty:
key = handleIncomingBounty(conn, key=key)
send(close_signal, conn, key)
conn.close()
server.settimeout(5)
safeprint("connection closed")
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
traceback.print_exc()
def handlePeerRequest(conn, exchange, key=None, received=[]):
"""Given a socket, send the proper messages to complete a peer request"""
if ext_port != -1:
unfiltered = peerlist[:] + [((ext_ip, ext_port), myPub.n, myPub.e)]
unfiltered = peerlist[:]
filtered = list(set(unfiltered) - set(received))
safeprint("Unfiltered: " + str(unfiltered), verbosity=3)
safeprint("Filtered: " + str(filtered), verbosity=3)
toSend = pickle.dumps(filtered, 0)
safeprint("Sending")
key = send(toSend, conn, key)
if exchange:
send(peer_request, conn, key)
received = recv(conn)
safeprint("Received exchange", verbosity=1)
safeprint(pickle.loads(received), verbosity=3)
peerlist.extend(pickle.loads(received))
trimPeers()
return key
def handleBountyRequest(conn, exchange, key=None, received=[]):
"""Given a socket, send the proper messages to complete a bounty request"""
unfiltered = getBountyList()
filtered = list(set(unfiltered) - set(received))
toSend = pickle.dumps(filtered, 0)
safeprint("Sending")
key = send(toSend, conn, key)
if exchange:
send(bounty_request, conn, key)
received = recv(conn)
safeprint("Received exchange")
try:
safeprint(pickle.loads(received), verbosity=2)
bounties = pickle.loads(received)
valids = addBounties(bounties)
toSend = []
for i in range(len(bounties)):
if valids[i] >= 0: # If the bounty is valid and not a duplicate, add it to propagation list
toSend.append(bounties[i])
propQueue.put((incoming_bounties, toSend))
except Exception as error:
safeprint("Could not add bounties")
safeprint(type(error))
traceback.print_exc()
# later add function to request without charity bounties
return key
def handleIncomingBounty(conn, key=None):
"""Given a socket, store an incoming bounty & report it valid or invalid"""
received = recv(conn)
safeprint("Adding bounty: " + received.decode())
try:
valid = addBounty(received)
if valid >= -1: # If valid, even if a duplicate, send valid signal
safeprint("Sending valid signal")
send(valid_signal, conn, key)
if valid >= 0: # If valid and not already received, propagate
propQueue.put((incoming_bounty, received))
else:
send(invalid_signal, conn, key)
except Exception as error:
send(invalid_signal, conn, key)
safeprint("Incoming failed: " + str(type(error)))
safeprint(error)
traceback.print_exc()
return key
def propagate(tup):
try:
conn = socket.socket()
address = tup[1]
conn.connect(address)
key = send(incoming_bounty, conn, None)
send(pickle.dumps(tup[0], 0), conn, key)
recv(conn)
conn.close()
except socket.error as Error:
safeprint("Connection to " + str(address) + " failed; cannot propagate")
def portForward(port):
"""Attempt to forward a port on your router to the specified local port. Prints lots of debug info."""
try:
import miniupnpc
u = miniupnpc.UPnP(None, None, 200, port)
# Begin Debug info
safeprint('inital(default) values :')
safeprint(' discoverdelay' + str(u.discoverdelay))
safeprint(' lanaddr' + str(u.lanaddr))
safeprint(' multicastif' + str(u.multicastif))
safeprint(' minissdpdsocket' + str(u.minissdpdsocket))
safeprint('Discovering... delay=%ums' % u.discoverdelay)
safeprint(str(u.discover()) + 'device(s) detected')
# End Debug info
u.selectigd()
global ext_ip
ext_ip = u.externalipaddress()
safeprint("external ip is: " + str(ext_ip))
for i in range(0, 20):
try:
safeprint("Port forward try: " + str(i), verbosity=1)
if u.addportmapping(port+i, 'TCP', get_lan_ip(), port, 'Bounty Net', ''):
global ext_port
ext_port = port + i
safeprint("External port is " + str(ext_port))
return True
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
return False
def listenp(port, v):
"""BLOCKING function which should only be run in a daemon thread. Listens and responds to other nodes"""
import time
while v.value: # is True is implicit
safeprint("listenp-ing", verbosity=3)
try:
while propQueue.empty() and v.value:
time.sleep(0.01)
packet = propQueue.get()
safeprint("Received: " + str(packet), verbosity=3)
if packet[0] == incoming_bounty:
bounty = pickle.loads(packet[1])
if bounty.isValid():
from multiprocessing.pool import ThreadPool
ThreadPool().map(propagate, [(bounty, x) for x in peerlist[:]])
elif packet[0] == incoming_bounties:
for bounty in packet[1]:
if bounty.isValid():
from multiprocessing.pool import ThreadPool
ThreadPool().map(propagate, [(bounty, x) for x in peerlist[:]])
safeprint("Packet processed")
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
def sync(items):
if items.get('config'):
from common import settings
settings.config = items.get('config')
if items.get('peerList'):
global peerlist
peerList = items.get('peerList')
if items.get('bountyList'):
from common import bounty
bounty.bountyList = items.get('bountyList')
if items.get('bountyLock'):
from common import bounty
bounty.bountyLock = items.get('bountyLock')
if items.get('propQueue'):
global propQueue
propQueue = items.get('propQueue')
class listener(multiprocessing.Process): # pragma: no cover
"""A class to deal with the listener method"""
def __init__(self, port, outbound, q, v, serv):
multiprocessing.Process.__init__(self)
self.outbound = outbound
self.port = port
self.q = q
self.v = v
self.serv = serv
def run(self):
safeprint("listener started")
sync(self.items)
listen(self.port, self.outbound, self.q, self.v, self.serv)
safeprint("listener stopped")
class propagator(multiprocessing.Process): # pragma: no cover
"""A class to deal with the listener method"""
def __init__(self, port, v):
multiprocessing.Process.__init__(self)
self.port = port
self.v = v
def run(self):
safeprint("propagator started")
sync(self.items)
listenp(self.port, self.v)
safeprint("propagator stopped")
| mit | -2,676,792,061,939,969,500 | 34.203501 | 150 | 0.599018 | false | 3.725799 | false | false | false |
pascalmouret/treeio-achievements | achievements/views.py | 1 | 13734 | """
Here are the functions which actually prepare the data and render the pages.
Most of the functions here are very similar since tree.io is, more or less, following
the CRUD (Create, Retrieve, Update, Delete) pattern.
The only thing special are the MassForms, which are quite common in tree.io and I only
adapted the code to fit my purposes.
Also: The forms.py file is in many ways more important since all forms are defined there.
"""
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from treeio.core.models import User
from treeio.core.rendering import render_to_response
from treeio.core.decorators import treeio_login_required, handle_response_format
from achievements.forms import MassActionUserForm, MassActionUserAchievementsForm, MassActionAchievementsForm, \
PrototypeForm, AchievementForm
from achievements.models import Prototype, Achievement
def _get_default_context(request, type):
"""
This function generates a context with a prepared massform.
Arguments:
request -- a Django Request object
type -- the type of MassForm you want
"""
context = {}
massform = type(request.user.get_profile())
context.update({'massform': massform})
return context
def _process_mass_form(f):
"""
This decorator checks if and which mass-form type is received and reacts in a proper fashion. (read: saves)
By excluding this, the views themselfes get a bit less crowded. And it is the way it is in every other module
as well.
Arguments:
f -- the function that is decorated
"""
def wrap(request, *args, **kwargs):
"""
Checks first which MassForm we are dealing with, then check if the user has the necessary permission.
If that all checks out, execute the save() action.
Arguments:
request -- the Django-request
*args -- catch args to pass them on afterwards
**kwargs -- catch kwargs to pass them on afterwards
"""
user = request.user.get_profile()
# check for massform and check permission
if 'massform' in request.POST and request.user.get_profile().is_admin(module_name='achievements'):
for key in request.POST:
if 'mass-user' in key:
try:
user = User.objects.get(pk=request.POST[key])
form = MassActionUserForm(request.user.get_profile(), request.POST, instance=user)
if form.is_valid():
form.save()
except Exception:
pass
if 'mass-achievement' in key:
try:
prototype = Prototype.objects.get(pk=request.POST[key])
form = MassActionAchievementsForm(request.user.get_profile(), request.POST, instance=prototype)
if form.is_valid():
form.save()
except Exception:
pass
if 'mass-userachievement' in key:
try:
achievement = Achievement.objects.get(pk=request.POST[key])
form = MassActionUserAchievementsForm(request.user.get_profile(),
request.POST, instance=achievement)
if form.is_valid():
form.save()
except Exception:
pass
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@handle_response_format
@treeio_login_required
@_process_mass_form
def index(request, response_format='html'):
"""
This view displays a list of user, with their achievements (icons). Has a MassForm.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
users = User.objects.all()
context = _get_default_context(request, MassActionUserForm)
context.update({'users': users})
return render_to_response('achievements/index', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def user(request, user_id, response_format='html'):
"""
This just displays one user and his achievements. Has a MassForm.
Arguments:
request -- a Django Request object
user_id -- the id of the requested User object
response_format -- defines which format the response should be
"""
user = User.objects.get(pk=user_id)
achievements = Achievement.objects.filter(user=user)
context = _get_default_context(request, MassActionUserAchievementsForm)
context.update({'u': user, 'achievements': achievements})
return render_to_response('achievements/user', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def prototypes(request, response_format='html'):
"""
Gives an overview over all available Achievements, with the description. Has a MassForm.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
prototypes = Prototype.objects.filter(trash=False)
context = _get_default_context(request, MassActionAchievementsForm)
context.update({'protos': prototypes})
return render_to_response('achievements/prototypes', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_add(request, response_format='html'):
"""
This delivers a view to create a new Prototype.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
if request.POST:
if not 'cancel' in request.POST:
form = PrototypeForm(request.user.get_profile(), request.POST, files=request.FILES)
if form.is_valid():
prototype = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
else:
return HttpResponseRedirect(reverse('achievements_prototypes'))
else:
form = PrototypeForm(request.user)
return render_to_response('achievements/prototype_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_edit(request, prototype_id, response_format='html'):
"""
Opens a form to edit a Prototype.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
if not request.user.get_profile().has_permission(prototype, mode='w'):
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
if request.POST:
if not 'cancel' in request.POST:
form = PrototypeForm(request.user.get_profile(), request.POST, files=request.FILES, instance=prototype)
if form.is_valid():
prototype = form.save()
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
else:
return HttpResponseRedirect(reverse('achievements_prototypes'))
else:
form = PrototypeForm(request.user, instance=prototype)
return render_to_response('achievements/prototype_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_detail(request, prototype_id, response_format='html'):
"""
Opens a simple overview for one Prototype.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
return render_to_response('achievements/prototype_detail', {'prototype': prototype},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_delete(request, prototype_id, response_format='html'):
"""
Simply deletes a Prototype and redirects to the list. If the permissions are alright, of course.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
if request.user.get_profile().has_permission(Prototype, mode='w'):
prototype.delete()
else:
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
return HttpResponseRedirect(reverse('achievements_prototypes'))
@handle_response_format
@treeio_login_required
def achievement_add(request, response_format='html'):
"""
Opens an empty form for a new Achievement.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
if request.POST:
if not 'cancel' in request.POST:
form = AchievementForm(request.user.get_profile(), request.POST, files=request.FILES)
if form.is_valid():
achievement = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
else:
return HttpResponseRedirect(reverse('achievements'))
else:
form = AchievementForm(request.user)
return render_to_response('achievements/achievement_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_edit(request, achievement_id, response_format='html'):
"""
Opens a form to edit a specific Achievement.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
if request.POST:
if not 'cancel' in request.POST:
form = AchievementForm(request.user.get_profile(), request.POST, files=request.FILES, instance=achievement)
if form.is_valid():
achievement = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
else:
return HttpResponseRedirect(reverse('achievements'))
else:
form = AchievementForm(request.user, instance=achievement)
return render_to_response('achievements/achievement_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_detail(request, achievement_id, response_format='html'):
"""
Opens a simple overview for one Achievement.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
return render_to_response('achievements/achievement_detail', {'achievement': achievement},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_delete(request, achievement_id, response_format='html'):
"""
Simply deletes a Achievement and redirects to the list. If the permissions are alright, of course.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
if request.user.get_profile().has_permission(Prototype, mode='w'):
achievement.delete()
else:
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
return HttpResponseRedirect(reverse('achievements'))
@handle_response_format
@treeio_login_required
def widget_achievement_stream(request, response_format='html'):
"""
Gets the last three Achievements and gives them to the widget template. This will be rendered as the Widget.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
achievements = Achievement.objects.all()[:3]
return render_to_response('achievements/widgets/newest', {'achievements': achievements},
context_instance=RequestContext(request), response_format=response_format)
| bsd-2-clause | 7,595,113,110,143,997,000 | 39.040816 | 119 | 0.66754 | false | 4.232357 | false | false | false |
gdestuynder/MozDef | alerts/cloudtrail_logging_disabled.py | 1 | 1132 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class AlertCloudtrailLoggingDisabled(AlertTask):
def main(self):
search_query = SearchQuery(minutes=30)
search_query.add_must([
TermMatch('source', 'cloudtrail'),
TermMatch('eventname', 'StopLogging')
])
search_query.add_must_not(TermMatch('errorcode', 'AccessDenied'))
self.filtersManual(search_query)
self.searchEventsSimple()
self.walkEvents()
def onEvent(self, event):
category = 'AWSCloudtrail'
tags = ['cloudtrail', 'aws', 'cloudtrailpagerduty']
severity = 'CRITICAL'
summary = 'Cloudtrail Logging Disabled: ' + event['_source']['requestparameters']['name']
return self.createAlertDict(summary, category, tags, [event], severity)
| mpl-2.0 | -3,568,129,083,789,123,600 | 32.294118 | 97 | 0.671378 | false | 3.903448 | false | false | false |
Thortoise/Super-Snake | Blender/animation_nodes-master/operators/profiling.py | 1 | 2009 | import bpy
import cProfile
from bpy.props import *
from io import StringIO
from contextlib import redirect_stdout
class ProfileAnimationNodes(bpy.types.Operator):
bl_idname = "an.profile"
bl_label = "Profile"
function = StringProperty()
output = StringProperty()
sort = StringProperty()
def execute(self, context):
result = self.getProfilingResult()
if self.output == "CONSOLE":
print(result)
elif self.output == "TEXT_BLOCK":
textBlock = self.getOutputTextBlock()
textBlock.clear()
textBlock.write(result)
return {"FINISHED"}
def getProfilingResult(self):
resultBuffer = StringIO()
with redirect_stdout(resultBuffer):
d = {"function" : self.executeFunction}
cProfile.runctx("function()", d, d, sort = self.sort)
self.executeFunction()
return resultBuffer.getvalue()
def executeFunction(self):
if self.function == "EXECUTION":
execute_TreeExecutiong()
elif self.function == "TREE_ANALYSIS":
execute_TreeAnalysis()
elif self.function == "UPDATE_EVERYTHING":
execute_UpdateEverything()
elif self.function == "SCRIPT_GENERATION":
execute_ScriptGeneration()
def getOutputTextBlock(self):
textBlockName = "Profiling"
if textBlockName in bpy.data.texts:
return bpy.data.texts[textBlockName]
else:
return bpy.data.texts.new(textBlockName)
def execute_TreeExecutiong():
bpy.context.space_data.edit_tree.execute()
def execute_TreeAnalysis():
from .. import tree_info
tree_info.update()
def execute_UpdateEverything():
from .. import update
update.updateEverything()
def execute_ScriptGeneration():
from .. execution import units
from .. utils.nodes import createNodeByIdDict
nodeByID = createNodeByIdDict()
units.createExecutionUnits(nodeByID)
nodeByID.clear()
| gpl-3.0 | -5,066,411,222,461,000,000 | 28.115942 | 65 | 0.645595 | false | 4.185417 | false | false | false |
eepgwde/pyeg0 | eg/dispatch0.py | 1 | 1483 | ## @file dispatch0.py
# @author weaves
# @brief Demonstrate extrinisc visitor pattern.
#
# The visitor pattern (or double dispatch) is well-known. In languages
# that support reflection you can use a single dispatch like this
# method.
#
# You can test this package on the command-line with
# <code>python dispatch0.py</code>
#
# @note
# This implementation uses PEAK. And this can be loaded using the
# python-peak.rules packages. Guido van Nossum (Python originator)
# recommends multimethod. In the posting there is a mention of using
# decorators called @when. This version is similiar and uses a
# standard package.
#
# @note
# Python doesn't have function overloading. It is interpreted and
# loosely typed so the concept isn't applicable, so you can
# can achieve run-time overload resolution based on type (and other
# conditions) using these rules. It is probably a form of late-binding
# similar to that of SmallTalk and CLOS.
#
# @see
# www.artima.com/forums/flat.jsp?forum=106&thread=101605
from __future__ import print_function
from peak.rules import abstract, when, around, before, after
@abstract()
def pprint(ob):
"""A pretty-printing generic function"""
@when(pprint, (list,))
def pprint_list(ob):
print("pretty-printing a list")
@when(pprint, "isinstance(ob,list) and len(ob)>=4")
def pprint_long_list(ob):
print("pretty-printing a long list")
if __name__ == '__main__':
pprint(['this', 'that', 'those'])
pprint(['this', 'that', 'those', 'these'])
| gpl-3.0 | -5,117,124,997,730,889,000 | 30.553191 | 70 | 0.727579 | false | 3.370455 | false | false | false |
allenlavoie/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py | 1 | 11315 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
tfd = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
tfd.Normal,
tfd.Bernoulli,
tfd.Beta,
tfd.Chi2,
tfd.Exponential,
tfd.Gamma,
tfd.InverseGamma,
tfd.Laplace,
tfd.StudentT,
tfd.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = tfd.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = tfd.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = tfd.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.test_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
def testStrWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("tf.distributions.Normal("
"\"Normal\", "
"batch_shape=(), "
"event_shape=(), "
"dtype=float16)"), # Got the dtype right.
str(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("tf.distributions.Chi2("
"\"silly\", " # What a silly name that is!
"batch_shape=(2,), "
"event_shape=(), "
"dtype=float32)"),
str(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("tf.distributions.Exponential(\"Exponential\", "
# No batch shape.
"event_shape=(), "
"dtype=float32)"),
str(exp))
def testStrWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("tf.distributions.MultivariateNormalDiag("
"\"MVN\", "
"batch_shape=(2,), "
"event_shape=(2,), "
"dtype=float64)"),
str(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
self.assertEqual(
("tf.distributions.MultivariateNormalDiag("
"\"MVN2\", "
"batch_shape=(?,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)"),
str(mvn_dynamic))
def testReprWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("<tf.distributions.Normal"
" 'Normal'"
" batch_shape=()"
" event_shape=()"
" dtype=float16>"), # Got the dtype right.
repr(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("<tf.distributions.Chi2"
" 'silly'" # What a silly name that is!
" batch_shape=(2,)"
" event_shape=()"
" dtype=float32>"),
repr(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("<tf.distributions.Exponential"
" 'Exponential'"
" batch_shape=<unknown>"
" event_shape=()"
" dtype=float32>"),
repr(exp))
def testReprWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("<tf.distributions.MultivariateNormalDiag"
" 'MVN'"
" batch_shape=(2,)"
" event_shape=(2,)"
" dtype=float64>"),
repr(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
self.assertEqual(
("<tf.distributions.MultivariateNormalDiag"
" 'MVN2'"
" batch_shape=(?,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>"),
repr(mvn_dynamic))
if __name__ == "__main__":
test.main()
| apache-2.0 | -136,941,286,766,181,460 | 37.486395 | 80 | 0.62563 | false | 3.703764 | true | false | false |
aamirmajeedkhan/P4-conference-central | main.py | 1 | 2470 | #!/usr/bin/env python
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail,memcache
from conference import ConferenceApi,MEMCACHE_FEATURED_SPEAKER_KEY
from google.appengine.ext import ndb
from models import Session, Speaker
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class SetFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
"""Set featured speaker in Memcache.
Note:
The featured speaker is updated if there is more than
one session by the given speaker in the provided conference (websafeConferenceKey)
Params:
- websafeConferenceKey
The conference to check for the given speaker
- speaker
The possibly new featured speaker name
"""
# get conference key
key = ndb.Key(urlsafe=self.request.get('websafeConferenceKey'))
# get speaker
speaker = Speaker(name=self.request.get('speaker'))
# get all sessions in the given conference filtered by speaker
featured_sessions = Session.query(ancestor=key).filter(Session.speaker == speaker).fetch()
# If speaker is registered to more than one session, update featured speaker
if len(featured_sessions) >= 2:
session_names = [session.name for session in featured_sessions]
message = speaker.name + ': ' + ', '.join(session_names)
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, message)
self.response.set_status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/set_featured_speaker', SetFeaturedSpeaker)
], debug=True) | apache-2.0 | -3,581,929,336,231,128,000 | 40.881356 | 98 | 0.639271 | false | 4.151261 | false | false | false |
yuwen41200/biodiversity-analysis | utils/roadkill-packer/htmlGenerator.py | 1 | 4419 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
html_beginning = '''<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.11.0/bootstrap-table.min.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css">
</head>
<body>
<table data-toggle="table" data-pagination="true">
<thead>
<tr>
<th data-sortable="true">id</th>
<th data-sortable="true">family</th>
<th data-sortable="true">taxonRemarks</th>
<th data-sortable="true">scientificName</th>
<th data-sortable="true">vernacularName</th>
<th data-sortable="true">previousIdentifications</th>
<th data-sortable="true">individualCount</th>
<th data-sortable="true">occurrenceRemarks</th>
<th data-sortable="true">modified</th>
<th data-sortable="true">eventRemarks</th>
<th data-sortable="true">institutionCode</th>
<th data-sortable="true">eventDate</th>
<th data-sortable="true">recordedBy</th>
<th data-sortable="true">rightsHolder</th>
<th data-sortable="true">municipality</th>
<th data-sortable="true">rights</th>
<th data-sortable="true">decimalLongitude</th>
<th data-sortable="true">decimalLatitude</th>
<th data-sortable="true">fieldNotes</th>
<th data-sortable="true">identificationVerificationStatus</th>
<th data-sortable="true">recordNumber</th>
<th data-sortable="true">materialSampleID</th>
<th data-sortable="true">locationRemarks</th>
<th data-sortable="true">associatedReferences</th>
<th data-sortable="true">associatedMedia</th>
<th data-sortable="true">basisOfRecord</th>
<th data-sortable="true">language</th>
<th data-sortable="true">continent</th>
<th data-sortable="true">country</th>
<th data-sortable="true">countryCode</th>
</tr>
</thead>
<tbody>
'''
html_ending = ''' </tbody>
</table>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.0/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.11.0/bootstrap-table.min.js"></script>
</body>
</html>
'''
def generate(input_path, output_path):
"""
Generate an HTML web page from a CSV file.
:param input_path: Path of input (CSV) file.
:param output_path: Path of output (HTML) file.
:return: None.
"""
with open(input_path, newline='') as input_file:
rows = csv.reader(input_file)
next(rows, None)
with open(output_path, 'w') as output_file:
output_file.write(html_beginning)
for row in rows:
output_file.write(' <tr>\n')
for no, column in enumerate(row):
if no < 23:
output_file.write(' <td>' + column + '</td>\n')
else:
link = 'http:' + column if column.startswith('//') else column
link = link.replace('http://', 'https://', 1) if 'facebook' in column else link
output_file.write(' <td><a href="' + link + '" target="_blank">' + column + '</a></td>\n')
output_file.write(' <td>HumanObservation</td>\n')
output_file.write(' <td>zh-Hant-TW</td>\n')
output_file.write(' <td>Asia</td>\n')
output_file.write(' <td>Taiwan</td>\n')
output_file.write(' <td>TW</td>\n')
output_file.write(' </tr>\n')
output_file.write(html_ending)
| gpl-3.0 | -7,277,954,826,530,736,000 | 48.1 | 133 | 0.505318 | false | 3.845953 | false | false | false |
grupodyd/python-filapp | setup.py | 1 | 1785 | #!/usr/bin/env python
#
# Copyright 2015 DyD Dinámica y Desarrollo SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The setup and build script for the python-filapp library.'''
import os
from setuptools import setup, find_packages
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='python-filapp',
version='0.1',
author='DyD Dinamica y Desarrollo SAS',
author_email='[email protected]',
license='Apache License 2.0',
url='https://github.com/grupodyd/python-filapp',
keywords='filapp api',
description='A Python wrapper around the Filapp API',
long_description=(read('CHANGES')),
packages=find_packages(exclude=['tests*']),
install_requires=['requests'],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
| apache-2.0 | 4,565,196,317,821,026,300 | 32.660377 | 74 | 0.67657 | false | 3.929515 | false | false | false |
wtsi-hgi/CoGS-Webapp | cogs/db/interface.py | 1 | 14479 | """
Copyright (c) 2017, 2018 Genome Research Ltd.
Authors:
* Christopher Harrison <[email protected]>
* Simon Beal <[email protected]>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import atexit
from datetime import datetime
from typing import Dict, List, Optional, overload
from sqlalchemy import create_engine, desc
from sqlalchemy.engine import Engine
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy.exc import ProgrammingError
from cogs.common import logging
from cogs.common.constants import PERMISSIONS, ROTATION_TEMPLATE_IDS
from .models import Base, EmailTemplate, Project, ProjectGroup, User
import cogs.mail.postman
class Database(logging.LogWriter):
""" Database interface """
_engine:Engine
_session:Session
def __init__(self, config:Dict) -> None:
"""
Constructor: Connect to and initialise the database session
:param config:
:return:
"""
# Connect to database and instantiate models
self.log(logging.DEBUG, "Connecting to PostgreSQL database \"{name}\" at {host}:{port}".format(**config))
self._engine = create_engine("postgresql://{user}:{passwd}@{host}:{port}/{name}".format(**config))
Base.metadata.create_all(self._engine)
# Start session (and register close on exit)
Session = sessionmaker(bind=self._engine)
self._session = Session()
atexit.register(self._session.close)
self._create_minimal()
def _create_minimal(self) -> None:
"""
Create minimal data in the database for a working system
"""
# Set up the e-mail template placeholders for rotation
# invitations, if they don't already exist
all_db_templates = [template.name for template in self.get_all_templates()]
for template in ROTATION_TEMPLATE_IDS:
if template not in all_db_templates:
self._session.add(EmailTemplate(name=template,
subject=f"Placeholder subject for {template}",
content=f"Placeholder content for {template}"))
for name, subject, content in cogs.mail.postman.get_filesystem_templates(exclude=all_db_templates):
self._session.add(EmailTemplate(name=name,
subject=subject,
content=content))
# TODO Tidy the below up / set the defaults more appropriately
if not self.get_all_users():
self.log(logging.INFO, "No users found. Adding admins.")
_admin_args = {"user_type": "grad_office", "priority": 0, "email_personal": None}
self._session.add(User(name="Simon Beal", email="[email protected]", **_admin_args))
self._session.add(User(name="Carl Anderson", email="[email protected]", **_admin_args))
self._session.add(User(name="Christopher Harrison", email="[email protected]", **_admin_args))
if not self._session.query(ProjectGroup).all():
self.log(logging.INFO, "No groups found. Adding rotation 1 2017.")
self._session.add(ProjectGroup(series=2017,
part=1,
supervisor_submit=datetime.strptime("18/07/2017", "%d/%m/%Y"),
student_invite=datetime.strptime("08/08/2017", "%d/%m/%Y"),
student_choice=datetime.strptime("30/08/2017", "%d/%m/%Y"),
student_complete=datetime.strptime("20/12/2017", "%d/%m/%Y"),
marking_complete=datetime.strptime("15/01/2018", "%d/%m/%Y"),
student_viewable=True,
student_choosable=True,
student_uploadable=False,
can_finalise=True,
read_only=False))
self._session.commit()
def reset_all(self) -> None:
"""
Reset everything in the database
For debugging use only!
"""
for table in Base.metadata.tables.values():
try:
self.engine.execute(f"DROP TABLE {table} CASCADE;")
except ProgrammingError:
try:
self.engine.execute(f'DROP TABLE "{table}" CASCADE;')
except ProgrammingError:
pass
Base.metadata.create_all(self._engine)
self._create_minimal()
## Convenience methods and properties ##############################
@property
def engine(self) -> Engine:
return self._engine
@property
def session(self) -> Session:
return self._session
def add(self, model:Base) -> None:
self._session.add(model)
def commit(self) -> None:
self._session.commit()
## E-Mail Template Methods #########################################
def get_template_by_name(self, name:str) -> Optional[EmailTemplate]:
"""
Get an e-mail template by its name
:param name:
:return:
"""
q = self._session.query(EmailTemplate)
return q.filter(EmailTemplate.name == name) \
.first()
def get_all_templates(self) -> List[EmailTemplate]:
"""
Get all e-mail templates in the system
:return:
"""
return self._session.query(EmailTemplate) \
.order_by(EmailTemplate.name) \
.all()
## Project Methods #################################################
def get_project_by_id(self, project_id:int) -> Optional[Project]:
"""
Get a project by its ID
:param project_id:
:return:
"""
q = self._session.query(Project)
return q.filter(Project.id == project_id) \
.first()
def get_project_by_name(self, project_name:str) -> Optional[Project]:
"""
Get the newest project by its name
TODO Do we need this? Fetching something by an arbitrary string
(i.e., non-key) seems like a bit of an antipattern...
:param project_name:
:return:
"""
q = self._session.query(Project)
return q.filter(Project.title == project_name) \
.order_by(desc(Project.id)) \
.first()
@overload
def get_projects_by_student(self, student:User, group:None = None) -> List[Project]:
...
@overload
def get_projects_by_student(self, student:User, group:ProjectGroup) -> Optional[Project]:
...
def get_projects_by_student(self, student, group = None):
"""
Get the list of projects for the specified student or, if a
project group is specified, that student's project in that group
:param student:
:param group:
:return:
"""
q = self._session.query(Project)
attr = "all"
clause = (Project.student == student)
if group:
clause &= (Project.group == group)
attr = "first"
return getattr(q.filter(clause) \
.order_by(Project.group_id), attr)()
def get_projects_by_supervisor(self, supervisor:User, group:Optional[ProjectGroup] = None) -> List[Project]:
"""
Get the list of projects set by the specified supervisor,
optionally restricted to a given project group
:param supervisor:
:param group:
:return:
"""
q = self._session.query(Project)
clause = (Project.supervisor == supervisor)
if group:
clause &= (Project.group == group)
return q.filter(clause) \
.order_by(Project.id) \
.all()
def get_projects_by_cogs_marker(self, cogs_marker:User, group:Optional[ProjectGroup] = None) -> List[Project]:
"""
Get the list of projects set by the specified CoGS marker,
optionally restricted to a given project group
:param cogs_marker:
:param group:
:return:
"""
q = self._session.query(Project)
clause = (Project.cogs_marker == cogs_marker)
if group:
clause &= (Project.group == group)
return q.filter(clause) \
.order_by(Project.id) \
.all()
## Project Group Methods ###########################################
def get_project_group(self, series:int, part:int) -> Optional[ProjectGroup]:
"""
Get the project group for the specified series and part
:param series:
:param part:
:return:
"""
q = self._session.query(ProjectGroup)
return q.filter(
(ProjectGroup.series == series) & (ProjectGroup.part == part)
).first()
def get_project_groups_by_series(self, series:int) -> List[ProjectGroup]:
"""
Get all project groups for the specified series
:param series:
:return:
"""
q = self._session.query(ProjectGroup)
return q.filter(ProjectGroup.series == series) \
.order_by(ProjectGroup.part) \
.all()
def get_most_recent_group(self) -> Optional[ProjectGroup]:
"""
Get the most recently created project group
:return ProjectGroup:
"""
q = self._session.query(ProjectGroup)
return q.order_by(desc(ProjectGroup.id)) \
.first()
## Series Methods ##################################################
# FIXME "Series" broadly represents academic years (i.e., a set of
# rotations/project groups). Currently these don't exist as a
# database entity; they just implicitly exist by virtue of their ID
# corresponding to the calendar year at the start of the series.
# This comes with a lot of assumptions, that could be done away with
# by explicitly defining series. This would have the additional
# benefit of defining a proper object hierarchy, which is where most
# of these methods belong (rather than in this database God-object).
def get_students_in_series(self, series:int) -> List[User]:
"""
Get the list of all students who are enrolled on projects in the
given series
:param series:
:return:
"""
# TODO This would be better implemented as a join in the
# database, rather than rolling our own.
return list({
project.student
for rotation in self.get_project_groups_by_series(series)
for project in rotation.projects
if project.student is not None})
def get_all_years(self) -> List[int]:
"""
Get the complete, sorted list of years
:return:
"""
q = self._session.query(ProjectGroup)
return [
group.series
for group in q.distinct(ProjectGroup.series) \
.order_by(desc(ProjectGroup.series)) \
.all()]
def get_all_series(self) -> List[ProjectGroup]:
"""
Get every series
:return ProjectGroup:
"""
q = self._session.query(ProjectGroup)
return q.order_by(desc(ProjectGroup.id)) \
.all()
## User Methods ####################################################
def get_user_by_id(self, uid:int) -> Optional[User]:
"""
Get a user by their ID
:param uid:
:return:
"""
q = self._session.query(User)
return q.filter(User.id == uid) \
.first()
def get_user_by_email(self, email:str) -> Optional[User]:
"""
Get a user by their e-mail address
:param email:
:return:
"""
q = self._session.query(User)
return q.filter((User.email == email) | (User.email_personal == email)) \
.first()
def get_users_by_permission(self, *permissions:str) -> List[User]:
"""
Return the users who have any of the specified permissions
:param permissions:
:return:
"""
# We must have at least one permission and our given permissions
# must be a subset of the valid permissions
assert permissions
assert set(permissions) <= set(PERMISSIONS)
return [
user
for user in self.get_all_users()
if any(getattr(user.role, p) for p in permissions)]
def get_all_users(self) -> List[User]:
"""
Get all users in the system
:return:
"""
return self._session.query(User).all()
def can_student_choose_project(self, user:User, project:Project) -> bool:
"""
Can the given user (student) choose the specified project? Only
if their role allows and, for their final project, they've done
at least one computational and wetlab project
:param user:
:param project:
:return:
"""
if user.role.join_projects:
if project.group.part != 3:
# If it's not the final rotation,
# then the student can pick any project
return True
all_projects = [project] + [
p for p in self.get_projects_by_student(user)
if p.group.series == project.group.series]
done_computational = any(p.is_computational for p in all_projects)
done_wetlab = any(p.is_wetlab for p in all_projects)
return done_computational and done_wetlab
return False
| agpl-3.0 | -7,453,707,756,658,393,000 | 33.889157 | 114 | 0.558602 | false | 4.411639 | false | false | false |
sloria/osf.io | api/nodes/permissions.py | 1 | 9092 | # -*- coding: utf-8 -*-
from rest_framework import permissions
from rest_framework import exceptions
from addons.base.models import BaseAddonSettings
from osf.models import (
AbstractNode,
Contributor,
DraftRegistration,
Institution,
Node,
NodeRelation,
OSFUser,
PreprintService,
PrivateLink,
)
from osf.utils import permissions as osf_permissions
from website.project.metadata.utils import is_prereg_admin
from api.base.utils import get_user_auth, is_deprecated
class ContributorOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
from api.nodes.views import NodeProvider
if isinstance(obj, BaseAddonSettings):
obj = obj.owner
if isinstance(obj, (NodeProvider, PreprintService)):
obj = obj.node
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node, NodeProvider, NodeRelation, PreprintService, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.can_edit(auth)
class IsPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node got {}'.format(obj)
auth = get_user_auth(request)
return obj.is_public or obj.can_view(auth)
class IsAdmin(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class IsContributor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_contributor(auth.user)
else:
return obj.has_permission(auth.user, 'write')
class IsAdminOrReviewer(permissions.BasePermission):
"""
Prereg admins can update draft registrations.
"""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, DraftRegistration, PrivateLink)), 'obj must be an Node, Draft Registration, or PrivateLink, got {}'.format(obj)
auth = get_user_auth(request)
if request.method != 'DELETE' and is_prereg_admin(auth.user):
return True
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class AdminOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Institution, BaseAddonSettings, DraftRegistration, PrivateLink)), 'obj must be an Node, User, Institution, Draft Registration, PrivateLink, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class ExcludeWithdrawals(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Node):
node = obj
else:
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
if node.is_retracted:
return False
return True
class ContributorDetailPermissions(permissions.BasePermission):
"""Permissions for contributor detail page."""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Contributor)), 'obj must be User, Contributor, or Node, got {}'.format(obj)
auth = get_user_auth(request)
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
user = OSFUser.load(context['user_id'])
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
elif request.method == 'DELETE':
return node.has_permission(auth.user, osf_permissions.ADMIN) or auth.user == user
else:
return node.has_permission(auth.user, osf_permissions.ADMIN)
class ContributorOrPublicForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node or NodeRelation, got {}'.format(obj)
auth = get_user_auth(request)
parent_node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
pointer_node = NodeRelation.load(request.parser_context['kwargs']['node_link_id']).child
if request.method in permissions.SAFE_METHODS:
has_parent_auth = parent_node.can_view(auth)
has_pointer_auth = pointer_node.can_view(auth)
public = pointer_node.is_public
has_auth = public or (has_parent_auth and has_pointer_auth)
return has_auth
else:
has_auth = parent_node.can_edit(auth)
return has_auth
class ContributorOrPublicForRelationshipPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
parent_node = obj['self']
if request.method in permissions.SAFE_METHODS:
return parent_node.can_view(auth)
elif request.method == 'DELETE':
return parent_node.can_edit(auth)
else:
has_parent_auth = parent_node.can_edit(auth)
if not has_parent_auth:
return False
pointer_nodes = []
for pointer in request.data.get('data', []):
node = AbstractNode.load(pointer['id'])
if not node or node.is_collection:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(pointer['id']))
pointer_nodes.append(node)
has_pointer_auth = True
for pointer in pointer_nodes:
if not pointer.can_view(auth):
has_pointer_auth = False
break
return has_pointer_auth
class RegistrationAndPermissionCheckForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
node_link = NodeRelation.load(request.parser_context['kwargs']['node_link_id'])
node = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
auth = get_user_auth(request)
if request.method == 'DELETE'and node.is_registration:
raise exceptions.MethodNotAllowed(method=request.method)
if node.is_collection or node.is_registration:
raise exceptions.NotFound
if node != node_link.parent:
raise exceptions.NotFound
if request.method == 'DELETE' and not node.can_edit(auth):
return False
return True
class WriteOrPublicForRelationshipInstitutions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
node = obj['self']
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.has_permission(auth.user, osf_permissions.WRITE)
class ReadOnlyIfRegistration(permissions.BasePermission):
"""Makes PUT and POST forbidden for registrations."""
def has_object_permission(self, request, view, obj):
if not isinstance(obj, AbstractNode):
obj = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
assert isinstance(obj, AbstractNode), 'obj must be an Node'
if obj.is_registration:
return request.method in permissions.SAFE_METHODS
return True
class ShowIfVersion(permissions.BasePermission):
def __init__(self, min_version, max_version, deprecated_message):
super(ShowIfVersion, self).__init__()
self.min_version = min_version
self.max_version = max_version
self.deprecated_message = deprecated_message
def has_object_permission(self, request, view, obj):
if is_deprecated(request.version, self.min_version, self.max_version):
raise exceptions.NotFound(detail=self.deprecated_message)
return True
class NodeLinksShowIfVersion(ShowIfVersion):
def __init__(self):
min_version = '2.0'
max_version = '2.0'
deprecated_message = 'This feature is deprecated as of version 2.1'
super(NodeLinksShowIfVersion, self).__init__(min_version, max_version, deprecated_message)
| apache-2.0 | -5,648,631,933,092,123,000 | 39.408889 | 232 | 0.66366 | false | 4.088129 | false | false | false |
rbn920/feebb | feebb/test.py | 1 | 1640 | from feebb import *
import matplotlib.pyplot as plt
pre = Preprocessor()
pre.load_json('ex_json/test2.json')
elems = [Element(elem) for elem in pre.elements]
print(pre.supports)
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2m.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2mm.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2mmm.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
| mit | -3,904,780,058,405,213,000 | 25.885246 | 48 | 0.714634 | false | 2.827586 | false | false | false |
sradanov/flyingpigeon | setup.py | 1 | 1385 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'cdo',
'bokeh',
'ocgis',
'pandas',
'nose',
]
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='flyingpigeon',
version='0.2.0',
description='Processes for climate data, indices and extrem events',
long_description=README + '\n\n' + CHANGES,
classifiers=classifiers,
author='Nils Hempelmann',
author_email='[email protected]',
url='http://www.lsce.ipsl.fr/',
license = "http://www.apache.org/licenses/LICENSE-2.0",
keywords='wps flyingpigeon pywps malleefowl ipsl birdhouse conda anaconda',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=requires,
entry_points = {
'console_scripts': [
]}
,
)
| apache-2.0 | 871,063,087,530,311,400 | 29.108696 | 81 | 0.607942 | false | 3.524173 | false | false | false |
apurtell/phoenix | bin/psql.py | 1 | 2718 | #!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
import os
import subprocess
import sys
import phoenix_utils
phoenix_utils.setPath()
args = phoenix_utils.shell_quote(sys.argv[1:])
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
java_home = os.getenv('JAVA_HOME')
# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
hbase_env_path = None
hbase_env_cmd = None
if os.name == 'posix':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
elif os.name == 'nt':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
if not hbase_env_path or not hbase_env_cmd:
print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
sys.exit(-1)
hbase_env = {}
if os.path.isfile(hbase_env_path):
p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
for x in p.stdout:
(k, _, v) = x.partition('=')
hbase_env[k.strip()] = v.strip()
if hbase_env.has_key('JAVA_HOME'):
java_home = hbase_env['JAVA_HOME']
if java_home:
java = os.path.join(java_home, 'bin', 'java')
else:
java = 'java'
java_cmd = java + ' $PHOENIX_OPTS ' + \
' -cp "' + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_client_jar + \
os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" org.apache.phoenix.util.PhoenixRuntime " + args
os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
| apache-2.0 | -4,959,389,883,573,509,000 | 37.28169 | 126 | 0.654157 | false | 3.10984 | false | false | false |
BenWiederhake/House-Of-Tweets | backend/vomit.py | 1 | 1212 | #!/usr/bin/env python3
import json
import mq
from sys import argv
from time import sleep
def pull(filename):
with open(filename, 'r') as fp:
return json.load(fp)
def check(tweets):
assert len(tweets) > 0
first_batch = tweets[0]
assert len(first_batch) > 0
first_tweet = first_batch[0]
EXPECT_KEYS = {'content', 'hashtags', 'id', 'image', 'name',
'partycolor', 'retweet', 'sound', 'time', 'twitterName'}
# Implicit assertion: first_tweet is a dict
assert EXPECT_KEYS.issubset(first_tweet.keys()), first_tweet.keys()
# Waiting period, in milliseconds, between each sent batch
PERIOD_MS = 3000
def vomit(tweets):
print('Now vomiting {} tweet-batches all over the place.'.format(len(tweets)))
q = mq.RealQueue('tweets')
for batch in tweets:
q.post(batch)
sleep(PERIOD_MS / 1000.0)
def transfer_file(filename):
tweets = pull(filename)
check(tweets)
vomit(tweets)
if __name__ == '__main__':
if len(argv) != 1 + 1:
print('{}: need precisely one argument: the name of the tweets JSON file.'.format(argv[0]))
exit(1)
else:
transfer_file(argv[1]) # argv[0] is the program name
| gpl-3.0 | 2,856,715,746,268,283,000 | 24.25 | 99 | 0.626238 | false | 3.320548 | false | false | false |
zzqcn/wireshark | tools/make-plugin-reg.py | 1 | 5547 | #!/usr/bin/env python3
#
# Looks for registration routines in the plugins
# and assembles C code to call all the routines.
#
import os
import sys
import re
#
# The first argument is the directory in which the source files live.
#
srcdir = sys.argv[1]
#
# The second argument is either "plugin", "plugin_wtap" or "plugin_codec".
#
registertype = sys.argv[2]
#
# All subsequent arguments are the files to scan.
#
files = sys.argv[3:]
final_filename = "plugin.c"
preamble = """\
/*
* Do not modify this file. Changes will be overwritten.
*
* Generated automatically from %s.
*/
""" % (sys.argv[0])
# Create the proper list of filenames
filenames = []
for file in files:
if os.path.isfile(file):
filenames.append(file)
else:
filenames.append(os.path.join(srcdir, file))
if len(filenames) < 1:
print("No files found")
sys.exit(1)
# Look through all files, applying the regex to each line.
# If the pattern matches, save the "symbol" section to the
# appropriate set.
regs = {
'proto_reg': set(),
'handoff_reg': set(),
'wtap_register': set(),
'codec_register': set(),
}
# For those that don't know Python, r"" indicates a raw string,
# devoid of Python escapes.
proto_regex = r"\bproto_register_(?P<symbol>[_A-Za-z0-9]+)\s*\(\s*void\s*\)[^;]*$"
handoff_regex = r"\bproto_reg_handoff_(?P<symbol>[_A-Za-z0-9]+)\s*\(\s*void\s*\)[^;]*$"
wtap_reg_regex = r"\bwtap_register_(?P<symbol>[_A-Za-z0-9]+)\s*\([^;]+$"
codec_reg_regex = r"\bcodec_register_(?P<symbol>[_A-Za-z0-9]+)\s*\([^;]+$"
# This table drives the pattern-matching and symbol-harvesting
patterns = [
( 'proto_reg', re.compile(proto_regex, re.MULTILINE) ),
( 'handoff_reg', re.compile(handoff_regex, re.MULTILINE) ),
( 'wtap_register', re.compile(wtap_reg_regex, re.MULTILINE) ),
( 'codec_register', re.compile(codec_reg_regex, re.MULTILINE) ),
]
# Grep
for filename in filenames:
file = open(filename)
# Read the whole file into memory
contents = file.read()
for action in patterns:
regex = action[1]
for match in regex.finditer(contents):
symbol = match.group("symbol")
sym_type = action[0]
regs[sym_type].add(symbol)
# We're done with the file contents
del contents
file.close()
# Make sure we actually processed something
if (len(regs['proto_reg']) < 1 and len(regs['wtap_register']) < 1 and len(regs['codec_register']) < 1):
print("No plugin registrations found")
sys.exit(1)
# Convert the sets into sorted lists to make the output pretty
regs['proto_reg'] = sorted(regs['proto_reg'])
regs['handoff_reg'] = sorted(regs['handoff_reg'])
regs['wtap_register'] = sorted(regs['wtap_register'])
regs['codec_register'] = sorted(regs['codec_register'])
reg_code = ""
reg_code += preamble
reg_code += """
#include "config.h"
#include <gmodule.h>
/* plugins are DLLs */
#define WS_BUILD_DLL
#include "ws_symbol_export.h"
"""
if registertype == "plugin":
reg_code += "#include \"epan/proto.h\"\n\n"
if registertype == "plugin_wtap":
reg_code += "#include \"wiretap/wtap.h\"\n\n"
if registertype == "plugin_codec":
reg_code += "#include \"wsutil/codecs.h\"\n\n"
for symbol in regs['proto_reg']:
reg_code += "void proto_register_%s(void);\n" % (symbol)
for symbol in regs['handoff_reg']:
reg_code += "void proto_reg_handoff_%s(void);\n" % (symbol)
for symbol in regs['wtap_register']:
reg_code += "void wtap_register_%s(void);\n" % (symbol)
for symbol in regs['codec_register']:
reg_code += "void codec_register_%s(void);\n" % (symbol)
reg_code += """
WS_DLL_PUBLIC_DEF const gchar plugin_version[] = PLUGIN_VERSION;
WS_DLL_PUBLIC_DEF const int plugin_want_major = VERSION_MAJOR;
WS_DLL_PUBLIC_DEF const int plugin_want_minor = VERSION_MINOR;
WS_DLL_PUBLIC void plugin_register(void);
void plugin_register(void)
{
"""
if registertype == "plugin":
for symbol in regs['proto_reg']:
reg_code +=" static proto_plugin plug_%s;\n\n" % (symbol)
reg_code +=" plug_%s.register_protoinfo = proto_register_%s;\n" % (symbol, symbol)
if symbol in regs['handoff_reg']:
reg_code +=" plug_%s.register_handoff = proto_reg_handoff_%s;\n" % (symbol, symbol)
else:
reg_code +=" plug_%s.register_handoff = NULL;\n" % (symbol)
reg_code += " proto_register_plugin(&plug_%s);\n" % (symbol)
if registertype == "plugin_wtap":
for symbol in regs['wtap_register']:
reg_code += " static wtap_plugin plug_%s;\n\n" % (symbol)
reg_code += " plug_%s.register_wtap_module = wtap_register_%s;\n" % (symbol, symbol)
reg_code += " wtap_register_plugin(&plug_%s);\n" % (symbol)
if registertype == "plugin_codec":
for symbol in regs['codec_register']:
reg_code += " static codecs_plugin plug_%s;\n\n" % (symbol)
reg_code += " plug_%s.register_codec_module = codec_register_%s;\n" % (symbol, symbol)
reg_code += " codecs_register_plugin(&plug_%s);\n" % (symbol)
reg_code += "}\n"
try:
fh = open(final_filename, 'w')
fh.write(reg_code)
fh.close()
print('Generated {} for {}.'.format(final_filename, os.path.basename(srcdir)))
except OSError:
sys.exit('Unable to write ' + final_filename + '.\n')
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#
| gpl-2.0 | 3,218,243,372,919,853,000 | 29.478022 | 103 | 0.628989 | false | 3.05957 | false | false | false |
alphagov/notifications-api | app/dao/api_key_dao.py | 1 | 1564 | import uuid
from datetime import datetime, timedelta
from sqlalchemy import func, or_
from app import db
from app.dao.dao_utils import autocommit, version_class
from app.models import ApiKey
@autocommit
@version_class(ApiKey)
def save_model_api_key(api_key):
if not api_key.id:
api_key.id = uuid.uuid4() # must be set now so version history model can use same id
api_key.secret = uuid.uuid4()
db.session.add(api_key)
@autocommit
@version_class(ApiKey)
def expire_api_key(service_id, api_key_id):
api_key = ApiKey.query.filter_by(id=api_key_id, service_id=service_id).one()
api_key.expiry_date = datetime.utcnow()
db.session.add(api_key)
def get_model_api_keys(service_id, id=None):
if id:
return ApiKey.query.filter_by(id=id, service_id=service_id, expiry_date=None).one()
seven_days_ago = datetime.utcnow() - timedelta(days=7)
return ApiKey.query.filter(
or_(ApiKey.expiry_date == None, func.date(ApiKey.expiry_date) > seven_days_ago), # noqa
ApiKey.service_id == service_id
).all()
def get_unsigned_secrets(service_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_keys = ApiKey.query.filter_by(service_id=service_id, expiry_date=None).all()
keys = [x.secret for x in api_keys]
return keys
def get_unsigned_secret(key_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_key = ApiKey.query.filter_by(id=key_id, expiry_date=None).one()
return api_key.secret
| mit | 3,697,148,109,029,428,000 | 29.076923 | 96 | 0.686061 | false | 3.146881 | false | false | false |
moreati/pydgin | scripts/build.py | 1 | 4989 | #!/usr/bin/env python
#=========================================================================
# build.py
#=========================================================================
# Builds pydgin.
import multiprocessing
import os
import shutil
import sys
import subprocess
import distutils.spawn
usage = """Usage:
./build.py [flags] [targets]
Flags: -h,--help this help message
-jN parallelize for N cores (omit N for # of processors)
"""
all_targets = [ "pydgin-parc-jit", "pydgin-parc-nojit-debug",
"pydgin-arm-jit", "pydgin-arm-nojit-debug" ]
def build_target( name, pypy_dir, build_dir ):
# use the name to determine the arch, jit and debug
arch = None
if "parc" in name:
arch = "parc"
if "arm" in name:
assert arch is None, "conflicting arch definitions {} and {}" \
.format( arch, "arm" )
arch = "arm"
assert arch is not None, "could not determine arch from name"
if "jit" in name and "nojit" not in name:
jit = True
elif "nojit" in name:
jit = False
else:
# default behavior if neither jit or nojit in name
jit = True
if "debug" in name and "nodebug" not in name:
debug = True
elif "nodebug" in name:
debug = False
else:
# default behavior if neither debug or nodebug in name
debug = False
print "Building {}\n arch: {}\n jit: {}\n debug: {}\n" \
.format( name, arch, jit, debug )
# check for the pypy executable, if it doesn't exist warn
python_bin = distutils.spawn.find_executable('pypy')
if not python_bin:
print ('WARNING: Cannot find a pypy executable!\n'
' Proceeding to translate with CPython.\n'
' Note that this will be *much* slower than using pypy.\n'
' Please install pypy for faster translation times!\n')
python_bin = 'python'
# create the translation command and execute it
os.chdir('../{}'.format( arch ) )
cmd = ( '{4} {1}/rpython/bin/rpython {2} {0}-sim.py {3}'
.format( arch, pypy_dir,
"--opt=jit" if jit else "",
"--debug" if debug else "",
python_bin )
)
print cmd
ret = subprocess.call( cmd, shell=True )
# check for success and cleanup
if ret != 0:
print "{} failed building, aborting!".format( name )
sys.exit( ret )
shutil.copy( name, '../scripts/{}'.format( build_dir ) )
symlink_name = '../scripts/builds/{}'.format( name )
if os.path.lexists( symlink_name ):
os.remove( symlink_name )
os.symlink( '../{}/{}'.format( build_dir, name ), symlink_name )
def setup_environment():
# assume if arg starts with a dash, it's a flag
args = sys.argv[1:]
flags = filter( lambda x: x.startswith('-'), args )
targets = filter( lambda x: not x.startswith('-'), args )
# don't parallelize by default
num_processes = 1
for flag in flags:
if flag == '-h' or flag == '--help':
print usage
sys.exit( 1 )
elif flag.startswith( '-j' ):
if flag == '-j':
# get the cpu count
num_processes = multiprocessing.cpu_count()
else:
num_processes = int( flag[2:] )
else:
print "Unknown flag:", flag
print usage
sys.exit( 1 )
# ensure we know where the pypy source code is
try:
pypy_dir = os.environ['PYDGIN_PYPY_SRC_DIR']
except KeyError as e:
raise ImportError( 'Please define the PYDGIN_PYPY_SRC_DIR '
'environment variable!')
# all includes all_targets
if "all" in targets:
targets += all_targets
targets.remove( "all" )
# unique-ify
targets = list( set( targets ) )
# if there are no targets, we add all
if len( targets ) == 0:
targets = all_targets
# get the version number
pydgin_ver = subprocess.check_output(
"./vcs-version.sh", shell=True ).rstrip()
print "Building Pydgin..."
print "Version: {}".format( pydgin_ver )
print "PyPy source: {}".format( pypy_dir )
print "Targets: {}".format( targets )
print "Number of processes: {}".format( num_processes )
# create build dir
build_dir = "builds/pydgin-{}/bin".format( pydgin_ver )
subprocess.call( "mkdir -p {}".format( build_dir ), shell=True )
return targets, pypy_dir, build_dir, num_processes
def main():
# get targets and environment
targets, pypy_dir, build_dir, num_processes = setup_environment()
# don't parallelize for 1 process
if num_processes <= 1:
for target in targets:
build_target( target, pypy_dir, build_dir )
else:
# build targets in parallel
pool = multiprocessing.Pool( processes=num_processes )
try:
for target in targets:
pool.apply_async( build_target, [target, pypy_dir, build_dir])
pool.close()
pool.join()
except KeyboardInterrupt:
print "Terminating workers!"
pool.terminate()
pool.join()
print 'Parallel builds complete.'
if __name__ == "__main__":
main()
| bsd-3-clause | -7,807,380,443,629,281,000 | 26.871508 | 74 | 0.588495 | false | 3.679204 | false | false | false |
alpodrezov/ordering_lunch | xavchik/settings.py | 1 | 2241 | """
Django settings for xavchik project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(#@6cjvgq1^pp0*o*^8hs20ozo!27do1&-^nqc92ol%4d8)(5l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'xavchik',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'xavchik.urls'
WSGI_APPLICATION = 'xavchik.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('static', '/Volumes/Macintosh HD 2 2/work_projects/python/ordering_lunch/xavchik/static'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) | unlicense | -7,114,326,989,939,682,000 | 23.637363 | 95 | 0.720214 | false | 3.192308 | false | false | false |
splotz90/urh | src/urh/signalprocessing/SimulatorItem.py | 1 | 2278 | class SimulatorItem(object):
protocol_manager = None
expression_parser = None
def __init__(self):
self.__parentItem = None
self.__childItems = []
self.logging_active = True
self.is_valid = True
def check(self):
return True
def get_pos(self):
if self.parent() is not None:
return self.parent().children.index(self)
return 0
def index(self):
if self.parent() is None:
return ""
item = self
result = str(item.get_pos() + 1)
while item.parent().parent() is not None:
item = item.parent()
result = str(item.get_pos() + 1) + "." + result
return result
def insert_child(self, pos, child):
child.set_parent(self)
self.children.insert(pos, child)
def delete(self):
for child in self.children[:]:
child.set_parent(None)
self.set_parent(None)
def parent(self):
return self.__parentItem
def set_parent(self, value):
if self.parent() is not None:
self.parent().children.remove(self)
self.__parentItem = value
@property
def children(self):
return self.__childItems
def child_count(self) -> int:
return len(self.children)
def next_sibling(self):
result = None
index = self.get_pos()
if self.parent() and index < self.parent().child_count() - 1:
result = self.parent().children[index + 1]
return result
def prev_sibling(self):
result = None
index = self.get_pos()
if self.parent() and index > 0:
result = self.parent().children[index - 1]
return result
def next(self):
if self.child_count():
return self.children[0]
curr = self
while curr is not None:
if curr.next_sibling() is not None:
return curr.next_sibling()
curr = curr.parent()
return None
def prev(self):
if self.prev_sibling() is not None:
curr = self.prev_sibling()
else:
return self.parent()
while curr.child_count():
curr = curr.children[-1]
return curr | gpl-3.0 | 3,334,993,551,861,423,000 | 21.79 | 69 | 0.53863 | false | 4.172161 | false | false | false |
JacobFischer/Joueur.py | games/anarchy/building.py | 1 | 3963 | # Building: A basic building. It does nothing besides burn down. Other Buildings inherit from this class.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from games.anarchy.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Building(GameObject):
"""The class representing the Building in the Anarchy game.
A basic building. It does nothing besides burn down. Other Buildings inherit from this class.
"""
def __init__(self):
"""Initializes a Building with basic logic as provided by the Creer code generator."""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._bribed = False
self._building_east = None
self._building_north = None
self._building_south = None
self._building_west = None
self._fire = 0
self._health = 0
self._is_headquarters = False
self._owner = None
self._x = 0
self._y = 0
@property
def bribed(self):
"""When True this building has already been bribed this turn and cannot be bribed again this turn.
:rtype: bool
"""
return self._bribed
@property
def building_east(self):
"""The Building directly to the east of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_east
@property
def building_north(self):
"""The Building directly to the north of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_north
@property
def building_south(self):
"""The Building directly to the south of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_south
@property
def building_west(self):
"""The Building directly to the west of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_west
@property
def fire(self):
"""How much fire is currently burning the building, and thus how much damage it will take at the end of its owner's turn. 0 means no fire.
:rtype: int
"""
return self._fire
@property
def health(self):
"""How much health this building currently has. When this reaches 0 the Building has been burned down.
:rtype: int
"""
return self._health
@property
def is_headquarters(self):
"""True if this is the Headquarters of the owning player, False otherwise. Burning this down wins the game for the other Player.
:rtype: bool
"""
return self._is_headquarters
@property
def owner(self):
"""The player that owns this building. If it burns down (health reaches 0) that player gets an additional bribe(s).
:rtype: games.anarchy.player.Player
"""
return self._owner
@property
def x(self):
"""The location of the Building along the x-axis.
:rtype: int
"""
return self._x
@property
def y(self):
"""The location of the Building along the y-axis.
:rtype: int
"""
return self._y
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| mit | -4,873,034,364,806,907,000 | 30.452381 | 146 | 0.633611 | false | 4.189218 | false | false | false |
gento/dionaea | modules/python/scripts/pptp/include/packets.py | 1 | 4630 | #********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2015 Tan Kean Siong
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact [email protected]
#*
#*******************************************************************************/
from dionaea.smb.include.packet import *
from dionaea.smb.include.fieldtypes import *
#PPTP Control Message Types
PPTP_CTRMSG_TYPE_STARTCTRCON_REQUEST = 0x01
PPTP_CTRMSG_TYPE_STARTCTRCON_REPLY = 0x02
PPTP_CTRMSG_TYPE_OUTGOINGCALL_REQUEST = 0x07
PPTP_CTRMSG_TYPE_OUTGOINGCALL_REPLY = 0x08
#PPP Link Control Protocol Types
PPP_LCP_Configuration_Request = 0x01
# https://www.ietf.org/rfc/rfc2637.txt
class PPTP_StartControlConnection_Request(Packet):
name="PPTP Start-Control-Connection-Request"
controlmessage_type = PPTP_CTRMSG_TYPE_STARTCTRCON_REQUEST
fields_desc =[
XShortField("Length",0),
XShortField("MessageType",0),
XIntField("MagicCookie",0),
XShortField("ControlMessageType",0),
XShortField("Reserved",0),
XShortField("ProtocolVersion",0),
XShortField("Reserved",0),
XIntField("FramingCapabilites",0),
XIntField("BearerCapabilites",0),
XShortField("MaxChannels",0),
XShortField("FirmwareRevision",0),
StrFixedLenField("HostName", "", 64),
StrFixedLenField("VendorName", "", 64),
]
class PPTP_StartControlConnection_Reply(Packet):
name="PPTP Start-Control-Connection-Reply"
controlmessage_type = PPTP_CTRMSG_TYPE_STARTCTRCON_REPLY
fields_desc =[
XShortField("Length",0x9c),
XShortField("MessageType",0x01),
XIntField("MagicCookie",0x1a2b3c4d),
XShortField("ControlMessageType",0x02),
XShortField("Reserved",0),
LEShortField("ProtocolVersion",0x01),
ByteField("ResultCode",0x01),
ByteField("ErrorCode",0x00),
LEIntField("FramingCapabilites",0),
LEIntField("BearerCapabilites",0),
XShortField("MaxChannels",1),
XShortField("FirmwareRevision",1),
StrFixedLenField("HostName", "", 64),
StrFixedLenField("VendorName", "", 64),
]
class PPTP_OutgoingCall_Request(Packet):
name="PPTP Outgoing-Call-Request"
controlmessage_type = PPTP_CTRMSG_TYPE_OUTGOINGCALL_REQUEST
fields_desc =[
XShortField("Length",0),
XShortField("MessageType",0),
XIntField("MagicCookie",0),
XShortField("ControlMessageType",0),
XShortField("Reserved",0),
XShortField("CallID",0),
XShortField("CallSerialNumber",0),
XIntField("MinBPS",0),
XIntField("MaxBPS",0),
XIntField("BearerType",0),
XIntField("FramingType",0),
XShortField("PacketWindowSize",0),
XShortField("PacketProcessingDelay",0),
XShortField("PacketNumberLength",0),
XShortField("Reserved",0),
StrFixedLenField("PhoneNumber", "", 64),
StrFixedLenField("Subaddress", "", 64),
]
class PPTP_OutgoingCall_Reply(Packet):
name="PPTP Outgoing-Call-Reply"
controlmessage_type = PPTP_CTRMSG_TYPE_OUTGOINGCALL_REPLY
fields_desc =[
XShortField("Length",0x20),
XShortField("MessageType",0x01),
XIntField("MagicCookie",0x1a2b3c4d),
XShortField("ControlMessageType",0x08),
XShortField("Reserved",0),
XShortField("CallID",0x480),
XShortField("PeerCallID",0),
ByteField("ResultCode",0x01),
ByteField("ErrorCode",0x00),
XShortField("CauseCode",0),
XIntField("ConnectSpeed",0x05F5E100),
XShortField("PacketWindowSize",0x2000),
XShortField("PacketProcessingDelay",0),
XShortField("PacketNumberLength",0),
XShortField("PhysicalChannelID",0),
]
class PPTP(Packet):
name="PPTP"
fields_desc =[
ByteField("Address",0),
ByteField("Control",0),
XShortField("Protocol",0),
]
class PPP_LCP_Configuration_Request(Packet):
name="PPP LCP_Configuration_Request"
controlmessage_type = PPP_LCP_Configuration_Request
fields_desc =[
ByteField("Code",0),
ByteField("Identifier",0),
XShortField("Length",0),
StrFixedLenField("Options", b"", length_from=lambda pkt: pkt.Length-4),
]
| gpl-2.0 | -748,663,431,541,799,300 | 31.605634 | 82 | 0.700432 | false | 3.012362 | false | false | false |
dmilith/SublimeText3-dmilith | Packages/pyte/all/pyte/graphics.py | 1 | 3441 | # -*- coding: utf-8 -*-
"""
pyte.graphics
~~~~~~~~~~~~~
This module defines graphic-related constants, mostly taken from
:manpage:`console_codes(4)` and
http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html.
:copyright: (c) 2011-2012 by Selectel.
:copyright: (c) 2012-2017 by pyte authors and contributors,
see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import unicode_literals
#: A mapping of ANSI text style codes to style names, "+" means the:
#: attribute is set, "-" -- reset; example:
#:
#: >>> text[1]
#: '+bold'
#: >>> text[9]
#: '+strikethrough'
TEXT = {
1: "+bold",
3: "+italics",
4: "+underscore",
7: "+reverse",
9: "+strikethrough",
22: "-bold",
23: "-italics",
24: "-underscore",
27: "-reverse",
29: "-strikethrough",
}
#: A mapping of ANSI foreground color codes to color names.
#:
#: >>> FG_ANSI[30]
#: 'black'
#: >>> FG_ANSI[38]
#: 'default'
FG_ANSI = {
30: "black",
31: "red",
32: "green",
33: "brown",
34: "blue",
35: "magenta",
36: "cyan",
37: "white",
39: "default" # white.
}
#: An alias to :data:`~pyte.graphics.FG_ANSI` for compatibility.
FG = FG_ANSI
#: A mapping of non-standard ``aixterm`` foreground color codes to
#: color names. These are high intensity colors and thus should be
#: complemented by ``+bold``.
FG_AIXTERM = {
90: "black",
91: "red",
92: "green",
93: "brown",
94: "blue",
95: "magenta",
96: "cyan",
97: "white"
}
#: A mapping of ANSI background color codes to color names.
#:
#: >>> BG_ANSI[40]
#: 'black'
#: >>> BG_ANSI[48]
#: 'default'
BG_ANSI = {
40: "black",
41: "red",
42: "green",
43: "brown",
44: "blue",
45: "magenta",
46: "cyan",
47: "white",
49: "default" # black.
}
#: An alias to :data:`~pyte.graphics.BG_ANSI` for compatibility.
BG = BG_ANSI
#: A mapping of non-standard ``aixterm`` background color codes to
#: color names. These are high intensity colors and thus should be
#: complemented by ``+bold``.
BG_AIXTERM = {
100: "black",
101: "red",
102: "green",
103: "brown",
104: "blue",
105: "magenta",
106: "cyan",
107: "white"
}
#: SGR code for foreground in 256 or True color mode.
FG_256 = 38
#: SGR code for background in 256 or True color mode.
BG_256 = 48
#: A table of 256 foreground or background colors.
# The following code is part of the Pygments project (BSD licensed).
FG_BG_256 = [
(0x00, 0x00, 0x00), # 0
(0xcd, 0x00, 0x00), # 1
(0x00, 0xcd, 0x00), # 2
(0xcd, 0xcd, 0x00), # 3
(0x00, 0x00, 0xee), # 4
(0xcd, 0x00, 0xcd), # 5
(0x00, 0xcd, 0xcd), # 6
(0xe5, 0xe5, 0xe5), # 7
(0x7f, 0x7f, 0x7f), # 8
(0xff, 0x00, 0x00), # 9
(0x00, 0xff, 0x00), # 10
(0xff, 0xff, 0x00), # 11
(0x5c, 0x5c, 0xff), # 12
(0xff, 0x00, 0xff), # 13
(0x00, 0xff, 0xff), # 14
(0xff, 0xff, 0xff), # 15
]
# colors 16..231: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(216):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
FG_BG_256.append((r, g, b))
# colors 232..255: grayscale
for i in range(24):
v = 8 + i * 10
FG_BG_256.append((v, v, v))
FG_BG_256 = ["{0:02x}{1:02x}{2:02x}".format(r, g, b) for r, g, b in FG_BG_256]
| mit | 1,003,358,980,977,163,300 | 22.09396 | 78 | 0.561465 | false | 2.681995 | false | false | false |
BrainTech/openbci | obci/logic/logic_speller_peer.py | 1 | 1099 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:
# Mateusz Kruszyński <[email protected]>
#
import time
from obci.utils import tags_helper
from multiplexer.multiplexer_constants import peers, types
from obci.logic import logic_helper
from obci.logic.logic_decision_peer import LogicDecision
from obci.logic.engines.speller_engine import SpellerEngine
from obci.utils import context as ctx
from obci.configs import settings, variables_pb2
from obci.utils.openbci_logging import log_crash
class LogicSpeller(LogicDecision, SpellerEngine):
"""A class for creating a manifest file with metadata."""
@log_crash
def __init__(self, addresses):
LogicDecision.__init__(self, addresses=addresses)
context = ctx.get_new_context()
context['logger'] = self.logger
SpellerEngine.__init__(self, self.config.param_values(), context)
self.ready()
self._update_letters()
def _run_post_actions(self, p_decision):
self._update_letters()
if __name__ == "__main__":
LogicSpeller(settings.MULTIPLEXER_ADDRESSES).loop()
| gpl-3.0 | -3,186,616,234,418,498,600 | 33.3125 | 73 | 0.711293 | false | 3.409938 | false | false | false |
kevinsung/OpenFermion | src/openfermion/utils/_testing_utils.py | 1 | 13228 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions useful for tests."""
import collections
import itertools
import numpy
from scipy.linalg import qr
from openfermion.ops import (DiagonalCoulombHamiltonian,
InteractionOperator,
QuadraticHamiltonian,
QubitOperator)
def random_qubit_operator(n_qubits=16,
max_num_terms=16,
max_many_body_order=16,
seed=None):
prng = numpy.random.RandomState(seed)
op = QubitOperator()
num_terms = prng.randint(1, max_num_terms+1)
for _ in range(num_terms):
many_body_order = prng.randint(max_many_body_order+1)
term = []
for _ in range(many_body_order):
index = prng.randint(n_qubits)
action = prng.choice(('X', 'Y', 'Z'))
term.append((index, action))
coefficient = prng.randn()
op += QubitOperator(term, coefficient)
return op
def haar_random_vector(n, seed=None):
"""Generate an n dimensional Haar randomd vector."""
if seed is not None:
numpy.random.seed(seed)
vector = numpy.random.randn(n).astype(complex)
vector += 1.j * numpy.random.randn(n).astype(complex)
normalization = numpy.sqrt(vector.dot(numpy.conjugate(vector)))
return vector / normalization
def random_antisymmetric_matrix(n, real=False, seed=None):
"""Generate a random n x n antisymmetric matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
antisymmetric_mat = rand_mat - rand_mat.T
return antisymmetric_mat
def random_diagonal_coulomb_hamiltonian(n_qubits, real=False, seed=None):
"""Generate a random instance of DiagonalCoulombHamiltonian.
Args:
n_qubits: The number of qubits
real: Whether to use only real numbers in the one-body term
"""
if seed is not None:
numpy.random.seed(seed)
one_body = random_hermitian_matrix(n_qubits, real=real)
two_body = random_hermitian_matrix(n_qubits, real=True)
constant = numpy.random.randn()
return DiagonalCoulombHamiltonian(one_body, two_body, constant)
def random_hermitian_matrix(n, real=False, seed=None):
"""Generate a random n x n Hermitian matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
hermitian_mat = rand_mat + rand_mat.T.conj()
return hermitian_mat
def random_interaction_operator(
n_orbitals, expand_spin=False, real=True, seed=None):
"""Generate a random instance of InteractionOperator.
Args:
n_orbitals: The number of orbitals.
expand_spin: Whether to expand each orbital symmetrically into two
spin orbitals. Note that if this option is set to True, then
the total number of orbitals will be doubled.
real: Whether to use only real numbers.
seed: A random number generator seed.
"""
if seed is not None:
numpy.random.seed(seed)
if real:
dtype = float
else:
dtype = complex
# The constant has to be real.
constant = numpy.random.randn()
# The one-body tensor is a random Hermitian matrix.
one_body_coefficients = random_hermitian_matrix(n_orbitals, real)
# Generate random two-body coefficients.
two_body_coefficients = numpy.zeros((n_orbitals, n_orbitals,
n_orbitals, n_orbitals), dtype)
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
coeff = numpy.random.randn()
if not real and len(set([p,q,r,s])) >= 3:
coeff += 1.j * numpy.random.randn()
# Four point symmetry.
two_body_coefficients[p, q, r, s] = coeff
two_body_coefficients[q, p, s, r] = coeff
two_body_coefficients[s, r, q, p] = coeff.conjugate()
two_body_coefficients[r, s, p, q] = coeff.conjugate()
# Eight point symmetry.
if real:
two_body_coefficients[r, q, p, s] = coeff
two_body_coefficients[p, s, r, q] = coeff
two_body_coefficients[s, p, q, r] = coeff
two_body_coefficients[q, r, s, p] = coeff
# If requested, expand to spin orbitals.
if expand_spin:
n_spin_orbitals = 2 * n_orbitals
# Expand one-body tensor.
one_body_coefficients = numpy.kron(one_body_coefficients, numpy.eye(2))
# Expand two-body tensor.
new_two_body_coefficients = numpy.zeros((
n_spin_orbitals, n_spin_orbitals,
n_spin_orbitals, n_spin_orbitals), dtype=complex)
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
coefficient = two_body_coefficients[p, q, r, s]
# Mixed spin.
new_two_body_coefficients[2 * p, 2 * q + 1, 2 * r + 1, 2 * s] = (
coefficient)
new_two_body_coefficients[2 * p + 1, 2 * q, 2 * r, 2 * s + 1] = (
coefficient)
# Same spin.
new_two_body_coefficients[2 * p, 2 * q, 2 * r, 2 * s] = coefficient
new_two_body_coefficients[2 * p + 1, 2 * q + 1,
2 * r + 1, 2 * s + 1] = coefficient
two_body_coefficients = new_two_body_coefficients
# Create the InteractionOperator.
interaction_operator = InteractionOperator(
constant, one_body_coefficients, two_body_coefficients)
return interaction_operator
def random_quadratic_hamiltonian(n_orbitals,
conserves_particle_number=False,
real=False,
expand_spin=False,
seed=None):
"""Generate a random instance of QuadraticHamiltonian.
Args:
n_orbitals(int): the number of orbitals
conserves_particle_number(bool): whether the returned Hamiltonian
should conserve particle number
real(bool): whether to use only real numbers
expand_spin: Whether to expand each orbital symmetrically into two
spin orbitals. Note that if this option is set to True, then
the total number of orbitals will be doubled.
Returns:
QuadraticHamiltonian
"""
if seed is not None:
numpy.random.seed(seed)
constant = numpy.random.randn()
chemical_potential = numpy.random.randn()
hermitian_mat = random_hermitian_matrix(n_orbitals, real)
if conserves_particle_number:
antisymmetric_mat = None
else:
antisymmetric_mat = random_antisymmetric_matrix(n_orbitals, real)
if expand_spin:
hermitian_mat = numpy.kron(hermitian_mat, numpy.eye(2))
if antisymmetric_mat is not None:
antisymmetric_mat = numpy.kron(antisymmetric_mat, numpy.eye(2))
return QuadraticHamiltonian(hermitian_mat, antisymmetric_mat,
constant, chemical_potential)
def random_unitary_matrix(n, real=False, seed=None):
"""Obtain a random n x n unitary matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
Q, _ = qr(rand_mat)
return Q
class EqualsTester(object):
"""Tests equality against user-provided disjoint equivalence groups."""
def __init__(self, test_case):
self.groups = [(_ClassUnknownToSubjects(),)]
self.test_case = test_case
def add_equality_group(self, *group_items):
"""Tries to add a disjoint equivalence group to the equality tester.
This methods asserts that items within the group must all be equal to
each other, but not equal to any items in other groups that have been
or will be added.
Args:
*group_items: The items making up the equivalence group.
Raises:
AssertError: Items within the group are not equal to each other, or
items in another group are equal to items within the new group,
or the items violate the equals-implies-same-hash rule.
"""
self.test_case.assertIsNotNone(group_items)
# Check that group items are equivalent to each other.
for v1, v2 in itertools.product(group_items, repeat=2):
# Binary operators should always work.
self.test_case.assertTrue(v1 == v2)
self.test_case.assertTrue(not v1 != v2)
# __eq__ and __ne__ should both be correct or not implemented.
self.test_case.assertTrue(
hasattr(v1, '__eq__') == hasattr(v1, '__ne__'))
# Careful: python2 int doesn't have __eq__ or __ne__.
if hasattr(v1, '__eq__'):
eq = v1.__eq__(v2)
ne = v1.__ne__(v2)
self.test_case.assertIn(
(eq, ne),
[(True, False),
(NotImplemented, False),
(NotImplemented, NotImplemented)])
# Check that this group's items don't overlap with other groups.
for other_group in self.groups:
for v1, v2 in itertools.product(group_items, other_group):
# Binary operators should always work.
self.test_case.assertTrue(not v1 == v2)
self.test_case.assertTrue(v1 != v2)
# __eq__ and __ne__ should both be correct or not implemented.
self.test_case.assertTrue(
hasattr(v1, '__eq__') == hasattr(v1, '__ne__'))
# Careful: python2 int doesn't have __eq__ or __ne__.
if hasattr(v1, '__eq__'):
eq = v1.__eq__(v2)
ne = v1.__ne__(v2)
self.test_case.assertIn(
(eq, ne),
[(False, True),
(NotImplemented, True),
(NotImplemented, NotImplemented)])
# Check that group items hash to the same thing, or are all unhashable.
hashes = [hash(v) if isinstance(v, collections.Hashable) else None
for v in group_items]
if len(set(hashes)) > 1:
examples = ((v1, h1, v2, h2)
for v1, h1 in zip(group_items, hashes)
for v2, h2 in zip(group_items, hashes)
if h1 != h2)
example = next(examples)
raise AssertionError(
'Items in the same group produced different hashes. '
'Example: hash({}) is {} but hash({}) is {}.'.format(*example))
# Remember this group, to enable disjoint checks vs later groups.
self.groups.append(group_items)
def make_equality_pair(self, factory):
"""Tries to add a disjoint (item, item) group to the equality tester.
Uses the factory method to produce two different objects containing
equal items. Asserts that the two object are equal, but not equal to
any items in other groups that have been or will be added. Adds the
pair as a group.
Args:
factory (Callable[[], Any]): A method for producing independent
copies of an item.
Raises:
AssertError: The factory produces items not equal to each other, or
items in another group are equal to items from the factory, or
the items violate the equal-implies-same-hash rule.
"""
self.add_equality_group(factory(), factory())
class _ClassUnknownToSubjects(object):
"""Equality methods should be able to deal with the unexpected."""
def __eq__(self, other):
return isinstance(other, _ClassUnknownToSubjects)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(_ClassUnknownToSubjects)
def module_importable(module):
"""Without importing it, returns whether python module is importable.
Args:
module (string): Name of module.
Returns:
bool
"""
import sys
if sys.version_info >= (3, 4):
from importlib import util
plug_spec = util.find_spec(module)
else:
import pkgutil
plug_spec = pkgutil.find_loader(module)
if plug_spec is None:
return False
else:
return True
| apache-2.0 | 2,406,659,572,818,670,000 | 35.541436 | 79 | 0.590036 | false | 3.914768 | true | false | false |
kdart/pycopia | core/pycopia/stringmatch.py | 1 | 4548 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Match plain strings like they were re module objects.
The StringExpression object implements a subset of re compile expressions.
This allows for a more consistent interface for the match types. Since
string.find is about 10 times faster than an RE search with a plain string,
this should speed up matches in that case by about that much, while
keeping a consistent interface.
"""
from __future__ import print_function
from __future__ import division
class StringMatchObject(object):
def __init__(self, start, end, string, pos, endpos, re):
self._start = start
self._end = end
self.string = string
self.pos = pos
self.endpos = endpos
self.lastgroup = None
self.lastindex = None
self.re = re # not really an RE.
def __repr__(self):
return "{0}(start={1!r}, end={2!r}, string={3!r}, pos={4!r}, endpos={5!r}, re={6!r})".format(self.__class__.__name__,
self._start, self._end, self.string, self.pos, self.endpos, self.re)
def expand(self, template):
raise NotImplementedError
def group(self, *args):
if args and args[0] == 0:
return self.string[self._start:self._end]
else:
raise IndexError("no such group")
def groups(self, default=None):
return ()
def groupdict(self, default=None):
return {}
def start(self, group=0):
if group == 0:
return self._start
else:
raise IndexError("no such group")
def end(self, group=0):
if group == 0:
return self._end
else:
raise IndexError("no such group")
def span(self, group=0):
if group == 0:
return self._start, self._end
else:
return -1, -1
def __nonzero__(self):
return 1
# an object that looks like a compiled regular expression, but does exact
# string matching. should be much faster in that case.
class StringExpression(object):
def __init__(self, patt, flags=0):
self.pattern = patt
# bogus attributes to simulate compiled REs from re module.
self.flags = flags
self.groupindex = {}
def __repr__(self):
return "{0}(patt={1!r}, flags={2!r})".format(self.__class__.__name__,
self.pattern, self.flags)
def search(self, text, pos=0, endpos=2147483647):
n = text.find(self.pattern, pos, endpos)
if n >= 0:
return StringMatchObject(n, n+len(self.pattern), text, pos, endpos, self)
else:
return None
match = search # match is same as search for strings
def split(self, text, maxsplit=0):
return text.split(self.pattern, maxsplit)
def findall(self, string, pos=0, endpos=2147483647):
rv = []
i = 0
while i >= 0:
i = string.find(self.pattern, i)
if i >= 0:
rv.append(self.pattern)
return rv
def finditer(self, string, pos=0, endpos=2147483647):
while 1:
mo = self.search(string, pos, endpos)
if mo:
yield mo
else:
return
def sub(self, repl, string, count=2147483647):
return string.replace(self.pattern, repl, count)
def subn(repl, string, count=2147483647):
i = 0
N = 0
while i >= 0:
i = string.find(self.pattern, i)
if i >= 0:
N += 1
return string.replace(self.pattern, repl, count), N
# factory function to "compile" EXACT patterns (which are strings)
def compile_exact(string, flags=0):
return StringExpression(string, flags)
def _test(argv):
cs = compile_exact("me")
mo = cs.search("matchme")
assert mo is not None
print(mo.span())
assert mo.span() == (5,7)
if __name__ == "__main__":
import sys
_test(sys.argv)
| apache-2.0 | -7,621,170,586,221,871,000 | 28.921053 | 125 | 0.599824 | false | 3.867347 | false | false | false |
germanovm/vdsm | vdsm/v2v.py | 1 | 26018 | # Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
When importing a VM a thread start with a new process of virt-v2v.
The way to feedback the information on the progress and the status of the
process (ie job) is via getVdsStats() with the fields progress and status.
progress is a number which represent percentage of a single disk copy,
status is a way to feedback information on the job (init, error etc)
"""
from collections import namedtuple
from contextlib import closing, contextmanager
import errno
import logging
import os
import re
import signal
import threading
import xml.etree.ElementTree as ET
import libvirt
from vdsm.constants import P_VDSM_RUN
from vdsm.define import errCode, doneCode
from vdsm import libvirtconnection, response
from vdsm.infra import zombiereaper
from vdsm.utils import traceback, CommandPath, execCmd, NICENESS, IOCLASS
import caps
_lock = threading.Lock()
_jobs = {}
_V2V_DIR = os.path.join(P_VDSM_RUN, 'v2v')
_VIRT_V2V = CommandPath('virt-v2v', '/usr/bin/virt-v2v')
_OVF_RESOURCE_CPU = 3
_OVF_RESOURCE_MEMORY = 4
_OVF_RESOURCE_NETWORK = 10
# OVF Specification:
# https://www.iso.org/obp/ui/#iso:std:iso-iec:17203:ed-1:v1:en
_OVF_NS = 'http://schemas.dmtf.org/ovf/envelope/1'
_RASD_NS = 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' \
'CIM_ResourceAllocationSettingData'
ImportProgress = namedtuple('ImportProgress',
['current_disk', 'disk_count', 'description'])
DiskProgress = namedtuple('DiskProgress', ['progress'])
class STATUS:
'''
STARTING: request granted and starting the import process
COPYING_DISK: copying disk in progress
ABORTED: user initiated aborted
FAILED: error during import process
DONE: convert process successfully finished
'''
STARTING = 'starting'
COPYING_DISK = 'copying_disk'
ABORTED = 'aborted'
FAILED = 'error'
DONE = 'done'
class V2VError(Exception):
''' Base class for v2v errors '''
class ClientError(Exception):
''' Base class for client error '''
class InvalidVMConfiguration(ValueError):
''' Unexpected error while parsing libvirt domain xml '''
class OutputParserError(V2VError):
''' Error while parsing virt-v2v output '''
class JobExistsError(ClientError):
''' Job already exists in _jobs collection '''
err_name = 'JobExistsError'
class VolumeError(ClientError):
''' Error preparing volume '''
class NoSuchJob(ClientError):
''' Job not exists in _jobs collection '''
err_name = 'NoSuchJob'
class JobNotDone(ClientError):
''' Import process still in progress '''
err_name = 'JobNotDone'
class NoSuchOvf(V2VError):
''' Ovf path is not exists in /var/run/vdsm/v2v/ '''
err_name = 'V2VNoSuchOvf'
class V2VProcessError(V2VError):
''' virt-v2v process had error in execution '''
class InvalidInputError(ClientError):
''' Invalid input received '''
def supported():
return not (caps.getos() in (caps.OSName.RHEVH, caps.OSName.RHEL)
and caps.osversion()['version'].startswith('6'))
def get_external_vms(uri, username, password):
if not supported():
return errCode["noimpl"]
try:
conn = libvirtconnection.open_connection(uri=uri,
username=username,
passwd=password)
except libvirt.libvirtError as e:
logging.error('error connection to hypervisor: %r', e.message)
return {'status': {'code': errCode['V2VConnection']['status']['code'],
'message': e.message}}
with closing(conn):
vms = []
for vm in conn.listAllDomains():
root = ET.fromstring(vm.XMLDesc(0))
params = {}
_add_vm_info(vm, params)
try:
_add_general_info(root, params)
except InvalidVMConfiguration as e:
logging.error('error parsing domain xml, msg: %s xml: %s',
e.message, vm.XMLDesc(0))
continue
_add_networks(root, params)
_add_disks(root, params)
for disk in params['disks']:
_add_disk_info(conn, disk)
vms.append(params)
return {'status': doneCode, 'vmList': vms}
def convert_external_vm(uri, username, password, vminfo, job_id, irs):
job = ImportVm.from_libvirt(uri, username, password, vminfo, job_id, irs)
job.start()
_add_job(job_id, job)
return {'status': doneCode}
def convert_ova(ova_path, vminfo, job_id, irs):
job = ImportVm.from_ova(ova_path, vminfo, job_id, irs)
job.start()
_add_job(job_id, job)
return response.success()
def get_ova_info(ova_path):
ns = {'ovf': _OVF_NS, 'rasd': _RASD_NS}
try:
root = ET.fromstring(_read_ovf_from_ova(ova_path))
except ET.ParseError as e:
raise V2VError('Error reading ovf from ova, position: %r' % e.position)
vm = {}
_add_general_ovf_info(vm, root, ns)
_add_disks_ovf_info(vm, root, ns)
_add_networks_ovf_info(vm, root, ns)
return response.success(vmList=vm)
def get_converted_vm(job_id):
try:
job = _get_job(job_id)
_validate_job_done(job)
ovf = _read_ovf(job_id)
except ClientError as e:
logging.info('Converted VM error %s', e)
return errCode[e.err_name]
except V2VError as e:
logging.error('Converted VM error %s', e)
return errCode[e.err_name]
return {'status': doneCode, 'ovf': ovf}
def delete_job(job_id):
try:
job = _get_job(job_id)
_validate_job_finished(job)
_remove_job(job_id)
except ClientError as e:
logging.info('Cannot delete job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def abort_job(job_id):
try:
job = _get_job(job_id)
job.abort()
except ClientError as e:
logging.info('Cannot abort job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def get_jobs_status():
ret = {}
with _lock:
items = tuple(_jobs.items())
for job_id, job in items:
ret[job_id] = {
'status': job.status,
'description': job.description,
'progress': job.progress
}
return ret
def _add_job(job_id, job):
with _lock:
if job_id in _jobs:
raise JobExistsError("Job %r exists" % job_id)
_jobs[job_id] = job
def _get_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
return _jobs[job_id]
def _remove_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
del _jobs[job_id]
def _validate_job_done(job):
if job.status != STATUS.DONE:
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _validate_job_finished(job):
if job.status not in (STATUS.DONE, STATUS.FAILED, STATUS.ABORTED):
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _read_ovf(job_id):
file_name = os.path.join(_V2V_DIR, "%s.ovf" % job_id)
try:
with open(file_name, 'r') as f:
return f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise NoSuchOvf("No such ovf %r" % file_name)
def get_storage_domain_path(path):
'''
prepareImage returns /prefix/sdUUID/images/imgUUID/volUUID
we need storage domain absolute path so we go up 3 levels
'''
return path.rsplit(os.sep, 3)[0]
@contextmanager
def password_file(job_id, file_name, password):
fd = os.open(file_name, os.O_WRONLY | os.O_CREAT, 0o600)
try:
os.write(fd, password.value)
finally:
os.close(fd)
try:
yield
finally:
try:
os.remove(file_name)
except Exception:
logging.exception("Job %r error removing passwd file: %s",
job_id, file_name)
class ImportVm(object):
TERM_DELAY = 30
PROC_WAIT_TIMEOUT = 30
def __init__(self, vminfo, job_id, irs):
'''
do not use directly, use a factory method instead!
'''
self._vminfo = vminfo
self._id = job_id
self._irs = irs
self._status = STATUS.STARTING
self._description = ''
self._disk_progress = 0
self._disk_count = 1
self._current_disk = 1
self._aborted = False
self._prepared_volumes = []
self._uri = None
self._username = None
self._password = None
self._passwd_file = None
self._create_command = None
self._run_command = None
self._ova_path = None
@classmethod
def from_libvirt(cls, uri, username, password, vminfo, job_id, irs):
obj = cls(vminfo, job_id, irs)
obj._uri = uri
obj._username = username
obj._password = password
obj._passwd_file = os.path.join(_V2V_DIR, "%s.tmp" % job_id)
obj._create_command = obj._from_libvirt_command
obj._run_command = obj._run_with_password
return obj
@classmethod
def from_ova(cls, ova_path, vminfo, job_id, irs):
obj = cls(vminfo, job_id, irs)
obj._ova_path = ova_path
obj._create_command = obj._from_ova_command
obj._run_command = obj._run
return obj
def start(self):
t = threading.Thread(target=self._run_command)
t.daemon = True
t.start()
@property
def id(self):
return self._id
@property
def status(self):
return self._status
@property
def description(self):
return self._description
@property
def progress(self):
'''
progress is part of multiple disk_progress its
flat and not 100% accurate - each disk take its
portion ie if we have 2 disks the first will take
0-50 and the second 50-100
'''
completed = (self._disk_count - 1) * 100
return (completed + self._disk_progress) / self._disk_count
def _run_with_password(self):
with password_file(self._id, self._passwd_file, self._password):
self._run()
@traceback(msg="Error importing vm")
def _run(self):
try:
self._import()
except Exception as ex:
if self._aborted:
logging.debug("Job %r was aborted", self._id)
else:
logging.exception("Job %r failed", self._id)
self._status = STATUS.FAILED
self._description = ex.message
try:
self._abort()
except Exception as e:
logging.exception('Job %r, error trying to abort: %r',
self._id, e)
finally:
self._teardown_volumes()
def _import(self):
# TODO: use the process handling http://gerrit.ovirt.org/#/c/33909/
self._prepare_volumes()
cmd = self._create_command()
logging.info('Job %r starting import', self._id)
# This is the way we run qemu-img convert jobs. virt-v2v is invoking
# qemu-img convert to perform the migration.
self._proc = execCmd(cmd, sync=False, deathSignal=signal.SIGTERM,
nice=NICENESS.HIGH, ioclass=IOCLASS.IDLE,
env=self._execution_environments())
self._proc.blocking = True
self._watch_process_output()
self._wait_for_process()
if self._proc.returncode != 0:
raise V2VProcessError('Job %r process failed exit-code: %r'
', stderr: %s' %
(self._id, self._proc.returncode,
self._proc.stderr.read(1024)))
if self._status != STATUS.ABORTED:
self._status = STATUS.DONE
logging.info('Job %r finished import successfully', self._id)
def _execution_environments(self):
env = {'LIBGUESTFS_BACKEND': 'direct'}
if 'virtio_iso_path' in self._vminfo:
env['VIRTIO_WIN'] = self._vminfo['virtio_iso_path']
return env
def _wait_for_process(self):
if self._proc.returncode is not None:
return
logging.debug("Job %r waiting for virt-v2v process", self._id)
if not self._proc.wait(timeout=self.PROC_WAIT_TIMEOUT):
raise V2VProcessError("Job %r timeout waiting for process pid=%s",
self._id, self._proc.pid)
def _watch_process_output(self):
parser = OutputParser()
for event in parser.parse(self._proc.stdout):
if isinstance(event, ImportProgress):
self._status = STATUS.COPYING_DISK
logging.info("Job %r copying disk %d/%d",
self._id, event.current_disk, event.disk_count)
self._disk_progress = 0
self._current_disk = event.current_disk
self._disk_count = event.disk_count
self._description = event.description
elif isinstance(event, DiskProgress):
self._disk_progress = event.progress
if event.progress % 10 == 0:
logging.info("Job %r copy disk %d progress %d/100",
self._id, self._current_disk, event.progress)
else:
raise RuntimeError("Job %r got unexpected parser event: %s" %
(self._id, event))
def _from_libvirt_command(self):
cmd = [_VIRT_V2V.cmd,
'-ic', self._uri,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower()]
cmd.extend(self._generate_disk_parameters())
cmd.extend(['--password-file',
self._passwd_file,
'--vdsm-vm-uuid',
self._id,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
get_storage_domain_path(self._prepared_volumes[0]['path']),
self._vminfo['vmName']])
return cmd
def _from_ova_command(self):
cmd = [_VIRT_V2V.cmd,
'-i', 'ova', self._ova_path,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower(),
'--vdsm-vm-uuid',
self._id,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
get_storage_domain_path(self._prepared_volumes[0]['path'])]
cmd.extend(self._generate_disk_parameters())
return cmd
def abort(self):
self._status = STATUS.ABORTED
logging.info('Job %r aborting...', self._id)
self._abort()
def _abort(self):
self._aborted = True
if self._proc.returncode is None:
logging.debug('Job %r killing virt-v2v process', self._id)
try:
self._proc.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise
logging.debug('Job %r virt-v2v process not running',
self._id)
else:
logging.debug('Job %r virt-v2v process was killed',
self._id)
finally:
zombiereaper.autoReapPID(self._proc.pid)
def _get_disk_format(self):
fmt = self._vminfo.get('format', 'raw').lower()
if fmt == 'cow':
return 'qcow2'
return fmt
def _generate_disk_parameters(self):
parameters = []
for disk in self._vminfo['disks']:
try:
parameters.append('--vdsm-image-uuid')
parameters.append(disk['imageID'])
parameters.append('--vdsm-vol-uuid')
parameters.append(disk['volumeID'])
except KeyError as e:
raise InvalidInputError('Job %r missing required property: %s'
% (self._id, e))
return parameters
def _prepare_volumes(self):
if len(self._vminfo['disks']) < 1:
raise InvalidInputError('Job %r cannot import vm with no disk',
self._id)
for disk in self._vminfo['disks']:
drive = {'poolID': self._vminfo['poolID'],
'domainID': self._vminfo['domainID'],
'volumeID': disk['volumeID'],
'imageID': disk['imageID']}
res = self._irs.prepareImage(drive['domainID'],
drive['poolID'],
drive['imageID'],
drive['volumeID'])
if res['status']['code']:
raise VolumeError('Job %r bad volume specification: %s' %
(self._id, drive))
drive['path'] = res['path']
self._prepared_volumes.append(drive)
def _teardown_volumes(self):
for drive in self._prepared_volumes:
try:
self._irs.teardownImage(drive['domainID'],
drive['poolID'],
drive['imageID'])
except Exception as e:
logging.error('Job %r error tearing down drive: %s',
self._id, e)
class OutputParser(object):
COPY_DISK_RE = re.compile(r'.*(Copying disk (\d+)/(\d+)).*')
DISK_PROGRESS_RE = re.compile(r'\s+\((\d+).*')
def parse(self, stream):
for line in stream:
if 'Copying disk' in line:
description, current_disk, disk_count = self._parse_line(line)
yield ImportProgress(int(current_disk), int(disk_count),
description)
for chunk in self._iter_progress(stream):
progress = self._parse_progress(chunk)
yield DiskProgress(progress)
if progress == 100:
break
def _parse_line(self, line):
m = self.COPY_DISK_RE.match(line)
if m is None:
raise OutputParserError('unexpected format in "Copying disk"'
', line: %r' % line)
return m.group(1), m.group(2), m.group(3)
def _iter_progress(self, stream):
chunk = ''
while True:
c = stream.read(1)
chunk += c
if c == '\r':
yield chunk
chunk = ''
def _parse_progress(self, chunk):
m = self.DISK_PROGRESS_RE.match(chunk)
if m is None:
raise OutputParserError('error parsing progress, chunk: %r'
% chunk)
try:
return int(m.group(1))
except ValueError:
raise OutputParserError('error parsing progress regex: %r'
% m.groups)
def _mem_to_mib(size, unit):
lunit = unit.lower()
if lunit in ('bytes', 'b'):
return size / 1024 / 1024
elif lunit in ('kib', 'k'):
return size / 1024
elif lunit in ('mib', 'm'):
return size
elif lunit in ('gib', 'g'):
return size * 1024
elif lunit in ('tib', 't'):
return size * 1024 * 1024
else:
raise InvalidVMConfiguration("Invalid currentMemory unit attribute:"
" %r" % unit)
def _add_vm_info(vm, params):
params['vmName'] = vm.name()
if vm.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
params['status'] = "Down"
else:
params['status'] = "Up"
def _add_general_info(root, params):
e = root.find('./uuid')
if e is not None:
params['vmId'] = e.text
e = root.find('./currentMemory')
if e is not None:
try:
size = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'currentMemory' value: %r"
% e.text)
unit = e.get('unit', 'KiB')
params['memSize'] = _mem_to_mib(size, unit)
e = root.find('./vcpu')
if e is not None:
try:
params['smp'] = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'vcpu' value: %r" % e.text)
e = root.find('./os/type/[@arch]')
if e is not None:
params['arch'] = e.get('arch')
def _add_disk_info(conn, disk):
if 'alias' in disk.keys():
try:
vol = conn.storageVolLookupByPath(disk['alias'])
_, capacity, alloc = vol.info()
except libvirt.libvirtError:
logging.exception("Error getting disk size")
disk['capacity'] = str(capacity)
disk['allocation'] = str(alloc)
def _add_disks(root, params):
params['disks'] = []
disks = root.findall('.//disk[@type="file"]')
for disk in disks:
d = {}
device = disk.get('device')
if device is not None:
d['type'] = device
target = disk.find('./target/[@dev]')
if target is not None:
d['dev'] = target.get('dev')
source = disk.find('./source/[@file]')
if source is not None:
d['alias'] = source.get('file')
params['disks'].append(d)
def _add_networks(root, params):
params['networks'] = []
interfaces = root.findall('.//interface')
for iface in interfaces:
i = {}
if 'type' in iface.attrib:
i['type'] = iface.attrib['type']
mac = iface.find('./mac/[@address]')
if mac is not None:
i['macAddr'] = mac.get('address')
source = iface.find('./source/[@bridge]')
if source is not None:
i['bridge'] = source.get('bridge')
target = iface.find('./target/[@dev]')
if target is not None:
i['dev'] = target.get('dev')
model = iface.find('./model/[@type]')
if model is not None:
i['model'] = model.get('type')
params['networks'].append(i)
def _read_ovf_from_ova(ova_path):
# FIXME: change to tarfile package when support --to-stdout
cmd = ['/usr/bin/tar', 'xf', ova_path, '*.ovf', '--to-stdout']
rc, output, error = execCmd(cmd)
if rc:
raise V2VError(error)
return ''.join(output)
def _add_general_ovf_info(vm, node, ns):
vm['status'] = 'Down'
vmName = node.find('./ovf:VirtualSystem/ovf:Name', ns)
if vmName is not None:
vm['vmName'] = vmName.text
else:
raise V2VError('Error parsing ovf information: no ovf:Name')
memSize = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_MEMORY, ns)
if memSize is not None:
vm['memSize'] = int(memSize.text)
else:
raise V2VError('Error parsing ovf information: no memory size')
smp = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_CPU, ns)
if smp is not None:
vm['smp'] = int(smp.text)
else:
raise V2VError('Error parsing ovf information: no cpu info')
def _add_disks_ovf_info(vm, node, ns):
vm['disks'] = []
for d in node.findall(".//ovf:DiskSection/ovf:Disk", ns):
disk = {'type': 'disk'}
capacity = d.attrib.get('{%s}capacity' % _OVF_NS)
disk['capacity'] = str(int(capacity) * 1024 * 1024 * 1024)
fileref = d.attrib.get('{%s}fileRef' % _OVF_NS)
alias = node.find('.//ovf:References/ovf:File[@ovf:id="%s"]' %
fileref, ns)
if alias is not None:
disk['alias'] = alias.attrib.get('{%s}href' % _OVF_NS)
disk['allocation'] = str(alias.attrib.get('{%s}size' % _OVF_NS))
else:
raise V2VError('Error parsing ovf information: disk href info')
vm['disks'].append(disk)
def _add_networks_ovf_info(vm, node, ns):
vm['networks'] = []
for n in node.findall('.//ovf:Item[rasd:ResourceType="%d"]'
% _OVF_RESOURCE_NETWORK, ns):
net = {}
dev = n.find('./rasd:ElementName', ns)
if dev is not None:
net['dev'] = dev.text
else:
raise V2VError('Error parsing ovf information: '
'network element name')
model = n.find('./rasd:ResourceSubType', ns)
if model is not None:
net['model'] = model.text
else:
raise V2VError('Error parsing ovf information: network model')
bridge = n.find('./rasd:Connection', ns)
if bridge is not None:
net['bridge'] = bridge.text
net['type'] = 'bridge'
else:
net['type'] = 'interface'
vm['networks'].append(net)
| gpl-2.0 | 1,767,331,554,670,716,700 | 31.360697 | 79 | 0.547352 | false | 3.807698 | false | false | false |
omriabnd/UCCA-App | Server/uccaApp/models/Users.py | 1 | 1828 | from datetime import datetime
from rest_framework.exceptions import ValidationError
from uccaApp.models import Tabs, Constants, Roles
from django.db import models
from django.contrib.auth.models import User, Group
class Users(models.Model):
id = models.AutoField(primary_key=True)
user_auth = models.OneToOneField(User,null=False, related_name="base_user", default=1, on_delete=models.CASCADE,unique=True)
# user_group = models.OneToOneField(Group,null=False, related_name="base_user", default=1, on_delete=models.CASCADE,unique=True)
first_name = models.CharField(max_length=100, default='')
last_name = models.CharField(max_length=100, default='')
email = models.EmailField(max_length=100,unique=True)
organization = models.CharField(max_length=Constants.ORGANIZATION_MAX_LENGTH)
affiliation = models.CharField(max_length=Constants.ORGANIZATION_MAX_LENGTH)
role = models.ForeignKey(Roles,max_length=256,db_column="role")
created_by = models.ForeignKey(User,null=True,blank=True, related_name="created_by_user",db_column="created_by")
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(default=datetime.now, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
def __unicode__(self):
return self.first_name
class Meta:
db_table="users"
def set_group(self,user_id,new_role_name):
# remove users permissions
User.objects.get(pk=user_id).groups.clear()
# grant new group to user
Group.objects.get(name=new_role_name).user_set.add(User.objects.get(pk=user_id))
def validate_email_unique(email):
exists = User.objects.filter(email=email)
if exists:
raise ValidationError("Email address %s already exits, must be unique" % email) | gpl-3.0 | -3,633,067,282,102,347,000 | 41.534884 | 132 | 0.721007 | false | 3.619802 | false | false | false |
skosukhin/spack | lib/spack/spack/cmd/compiler.py | 1 | 7883 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import print_function
import argparse
import sys
from six import iteritems
import llnl.util.tty as tty
import spack.compilers
import spack.config
import spack.spec
from llnl.util.lang import index_by
from llnl.util.tty.colify import colify
from llnl.util.tty.color import colorize
from spack.spec import CompilerSpec, ArchSpec
description = "manage compilers"
section = "system"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='compiler_command')
scopes = spack.config.config_scopes
# Find
find_parser = sp.add_parser(
'find', aliases=['add'],
help='search the system for compilers to add to Spack configuration')
find_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
find_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_modify_scope,
help="configuration scope to modify")
# Remove
remove_parser = sp.add_parser(
'remove', aliases=['rm'], help='remove compiler by spec')
remove_parser.add_argument(
'-a', '--all', action='store_true',
help='remove ALL compilers that match spec')
remove_parser.add_argument('compiler_spec')
remove_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_modify_scope,
help="configuration scope to modify")
# List
list_parser = sp.add_parser('list', help='list available compilers')
list_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_list_scope,
help="configuration scope to read from")
# Info
info_parser = sp.add_parser('info', help='show compiler paths')
info_parser.add_argument('compiler_spec')
info_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_list_scope,
help="configuration scope to read from")
def compiler_find(args):
"""Search either $PATH or a list of paths OR MODULES for compilers and
add them to Spack's configuration.
"""
paths = args.add_paths
# Don't initialize compilers config via compilers.get_compiler_config.
# Just let compiler_find do the
# entire process and return an empty config from all_compilers
# Default for any other process is init_config=True
compilers = [c for c in spack.compilers.find_compilers(*paths)]
new_compilers = []
for c in compilers:
arch_spec = ArchSpec(None, c.operating_system, c.target)
same_specs = spack.compilers.compilers_for_spec(
c.spec, arch_spec, init_config=False)
if not same_specs:
new_compilers.append(c)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers,
scope=args.scope,
init_config=False)
n = len(new_compilers)
s = 's' if n > 1 else ''
filename = spack.config.get_config_filename(args.scope, 'compilers')
tty.msg("Added %d new compiler%s to %s" % (n, s, filename))
colify(reversed(sorted(c.spec for c in new_compilers)), indent=4)
else:
tty.msg("Found no new compilers")
tty.msg("Compilers are defined in the following files:")
colify(spack.compilers.compiler_config_files(), indent=4)
def compiler_remove(args):
cspec = CompilerSpec(args.compiler_spec)
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
tty.die("No compilers match spec %s" % cspec)
elif not args.all and len(compilers) > 1:
tty.error("Multiple compilers match spec %s. Choose one:" % cspec)
colify(reversed(sorted([c.spec for c in compilers])), indent=4)
tty.msg("Or, use `spack compiler remove -a` to remove all of them.")
sys.exit(1)
for compiler in compilers:
spack.compilers.remove_compiler_from_config(
compiler.spec, scope=args.scope)
tty.msg("Removed compiler %s" % compiler.spec)
def compiler_info(args):
"""Print info about all compilers matching a spec."""
cspec = CompilerSpec(args.compiler_spec)
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
tty.error("No compilers match spec %s" % cspec)
else:
for c in compilers:
print(str(c.spec) + ":")
print("\ttarget: " + c.target)
print("\toperating_system: " + c.operating_system)
print("\tpaths:")
for cpath in ['cc', 'cxx', 'f77', 'fc']:
print("\t\t%s: %s" % (cpath, getattr(c, cpath, None)))
if any(c.flags):
print("\tflags:")
for flag, flag_value in iteritems(c.flags):
print("\t\t%s: %s" % (flag, flag_value))
else:
print("\tflags: " + str(type(c.flags)()))
if any(c.environment):
print("\tenvironment:")
for command in c.environment:
print("\t\t%s" % command)
else:
print("\tenvironment: " + str(type(c.environment)()))
if any(c.extra_rpaths):
print("\tExtra RPATHs:")
for extra_rpath in c.extra_rpaths:
print("\t\t" + extra_rpath)
else:
print("\tExtra RPATHs: " + str(type(c.extra_rpaths)()))
if any(c.modules):
print("\tmodules:")
for module in c.modules:
print("\t\t" + module)
else:
print("\tmodules: " + str(type(c.modules)()))
def compiler_list(args):
tty.msg("Available compilers")
index = index_by(spack.compilers.all_compilers(scope=args.scope),
lambda c: (c.spec.name, c.operating_system, c.target))
ordered_sections = sorted(index.items(), key=lambda item: item[0])
for i, (key, compilers) in enumerate(ordered_sections):
if i >= 1:
print()
name, os, target = key
os_str = os
if target:
os_str += "-%s" % target
cname = "%s{%s} %s" % (spack.spec.compiler_color, name, os_str)
tty.hline(colorize(cname), char='-')
colify(reversed(sorted(c.spec for c in compilers)))
def compiler(parser, args):
action = {'add': compiler_find,
'find': compiler_find,
'remove': compiler_remove,
'rm': compiler_remove,
'info': compiler_info,
'list': compiler_list}
action[args.compiler_command](args)
| lgpl-2.1 | 1,675,584,116,106,376,700 | 36.899038 | 78 | 0.608905 | false | 3.866111 | true | false | false |
380wmda999/sphinx2.2.11-string-4G | api/sphinxapi.py | 1 | 35093 | #
# $Id$
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006, Mike Osadnik
# Copyright (c) 2006-2016, Andrew Aksyonoff
# Copyright (c) 2008-2016, Sphinx Technologies Inc
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License. You should
# have received a copy of the LGPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
# WARNING!!!
#
# As of 2015, we strongly recommend to use either SphinxQL or REST APIs
# rather than the native SphinxAPI.
#
# While both the native SphinxAPI protocol and the existing APIs will
# continue to exist, and perhaps should not even break (too much), exposing
# all the new features via multiple different native API implementations
# is too much of a support complication for us.
#
# That said, you're welcome to overtake the maintenance of any given
# official API, and remove this warning ;)
#
import sys
import select
import socket
import re
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_STATUS = 5
SEARCHD_COMMAND_FLUSHATTRS = 7
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x11E
VER_COMMAND_EXCERPT = 0x104
VER_COMMAND_UPDATE = 0x103
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_STATUS = 0x101
VER_COMMAND_FLUSHATTRS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_EXPR = 8
SPH_RANK_TOTAL = 9
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
SPH_FILTER_STRING = 3
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_FACTORS = 1001
SPH_ATTR_MULTI = 0X40000001L
SPH_ATTR_MULTI64 = 0X40000002L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI,
SPH_ATTR_MULTI64)
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 9312 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_EXTENDED2 # query matching mode (default is SPH_MATCH_EXTENDED2)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._rankexpr = '' # ranking expression for SPH_RANK_EXPR
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._timeout = 1.0 # connection timeout
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0 # per-query max_predicted_time
self._outerorderby = '' # outer match sort by
self._outeroffset = 0 # outer offset
self._outerlimit = 0 # outer limit
self._hasouter = False # sub-select enabled
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
self._host = host
if isinstance(port, int):
assert(port>0 and port<65536)
self._port = port
self._path = None
def SetConnectTimeout ( self, timeout ):
"""
Set connection timeout ( float second )
"""
assert (isinstance(timeout, float))
# set timeout to 0 make connaection non-blocking that is wrong so timeout got clipped to reasonable minimum
self._timeout = max ( 0.001, timeout )
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.settimeout ( self._timeout )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
self._error = 'unknown status code %d' % status
return None
# check version
if ver<client_ver:
self._warning = 'searchd command v.%d.%d older than client\'s v.%d.%d, some options might not work' \
% (ver>>8, ver&0xff, client_ver>>8, client_ver&0xff)
return response
def _Send ( self, sock, req ):
"""
INTERNAL METHOD, DO NOT CALL. send request to searchd server.
"""
total = 0
while True:
sent = sock.send ( req[total:] )
if sent<=0:
break
total = total + sent
return total
def SetLimits (self, offset, limit, maxmatches=0, cutoff=0):
"""
Set offset and count into result set, and optionally set max-matches and cutoff limits.
"""
assert ( type(offset) in [int,long] and 0<=offset<16777216 )
assert ( type(limit) in [int,long] and 0<limit<16777216 )
assert(maxmatches>=0)
self._offset = offset
self._limit = limit
if maxmatches>0:
self._maxmatches = maxmatches
if cutoff>=0:
self._cutoff = cutoff
def SetMaxQueryTime (self, maxquerytime):
"""
Set maximum query time, in milliseconds, per-index. 0 means 'do not limit'.
"""
assert(isinstance(maxquerytime,int) and maxquerytime>0)
self._maxquerytime = maxquerytime
def SetMatchMode (self, mode):
"""
Set matching mode.
"""
print >> sys.stderr, 'DEPRECATED: Do not call this method or, even better, use SphinxQL instead of an API'
assert(mode in [SPH_MATCH_ALL, SPH_MATCH_ANY, SPH_MATCH_PHRASE, SPH_MATCH_BOOLEAN, SPH_MATCH_EXTENDED, SPH_MATCH_FULLSCAN, SPH_MATCH_EXTENDED2])
self._mode = mode
def SetRankingMode ( self, ranker, rankexpr='' ):
"""
Set ranking mode.
"""
assert(ranker>=0 and ranker<SPH_RANK_TOTAL)
self._ranker = ranker
self._rankexpr = rankexpr
def SetSortMode ( self, mode, clause='' ):
"""
Set sorting mode.
"""
assert ( mode in [SPH_SORT_RELEVANCE, SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC, SPH_SORT_TIME_SEGMENTS, SPH_SORT_EXTENDED, SPH_SORT_EXPR] )
assert ( isinstance ( clause, str ) )
self._sort = mode
self._sortby = clause
def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32 ( val )
self._fieldweights = weights
def SetIndexWeights (self, weights):
"""
Bind per-index weights by name; expects (name,index_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32(val)
self._indexweights = weights
def SetIDRange (self, minid, maxid):
"""
Set IDs range to match.
Only match records if document ID is beetwen $min and $max (inclusive).
"""
assert(isinstance(minid, (int, long)))
assert(isinstance(maxid, (int, long)))
assert(minid<=maxid)
self._min_id = minid
self._max_id = maxid
def SetFilter ( self, attribute, values, exclude=0 ):
"""
Set values set filter.
Only match records where 'attribute' value is in given 'values' set.
"""
assert(isinstance(attribute, str))
assert iter(values)
for value in values:
AssertInt32 ( value )
self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
def SetFilterString ( self, attribute, value, exclude=0 ):
"""
Set string filter.
Only match records where 'attribute' value is equal
"""
assert(isinstance(attribute, str))
assert(isinstance(value, str))
print ( "attr='%s' val='%s' " % ( attribute, value ) )
self._filters.append ( { 'type':SPH_FILTER_STRING, 'attr':attribute, 'exclude':exclude, 'value':value } )
def SetFilterRange (self, attribute, min_, max_, exclude=0 ):
"""
Set range filter.
Only match records if 'attribute' value is beetwen 'min_' and 'max_' (inclusive).
"""
assert(isinstance(attribute, str))
AssertInt32(min_)
AssertInt32(max_)
assert(min_<=max_)
self._filters.append ( { 'type':SPH_FILTER_RANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_ } )
def SetFilterFloatRange (self, attribute, min_, max_, exclude=0 ):
assert(isinstance(attribute,str))
assert(isinstance(min_,float))
assert(isinstance(max_,float))
assert(min_ <= max_)
self._filters.append ( {'type':SPH_FILTER_FLOATRANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_} )
def SetGeoAnchor (self, attrlat, attrlong, latitude, longitude):
assert(isinstance(attrlat,str))
assert(isinstance(attrlong,str))
assert(isinstance(latitude,float))
assert(isinstance(longitude,float))
self._anchor['attrlat'] = attrlat
self._anchor['attrlong'] = attrlong
self._anchor['lat'] = latitude
self._anchor['long'] = longitude
def SetGroupBy ( self, attribute, func, groupsort='@group desc' ):
"""
Set grouping attribute and function.
"""
assert(isinstance(attribute, str))
assert(func in [SPH_GROUPBY_DAY, SPH_GROUPBY_WEEK, SPH_GROUPBY_MONTH, SPH_GROUPBY_YEAR, SPH_GROUPBY_ATTR, SPH_GROUPBY_ATTRPAIR] )
assert(isinstance(groupsort, str))
self._groupby = attribute
self._groupfunc = func
self._groupsort = groupsort
def SetGroupDistinct (self, attribute):
assert(isinstance(attribute,str))
self._groupdistinct = attribute
def SetRetries (self, count, delay=0):
assert(isinstance(count,int) and count>=0)
assert(isinstance(delay,int) and delay>=0)
self._retrycount = count
self._retrydelay = delay
def SetOverride (self, name, type, values):
print >> sys.stderr, 'DEPRECATED: Do not call this method. Use SphinxQL REMAP() function instead.'
assert(isinstance(name, str))
assert(type in SPH_ATTR_TYPES)
assert(isinstance(values, dict))
self._overrides[name] = {'name': name, 'type': type, 'values': values}
def SetSelect (self, select):
assert(isinstance(select, str))
self._select = select
def SetQueryFlag ( self, name, value ):
known_names = [ "reverse_scan", "sort_method", "max_predicted_time", "boolean_simplify", "idf", "global_idf" ]
flags = { "reverse_scan":[0, 1], "sort_method":["pq", "kbuffer"],"max_predicted_time":[0], "boolean_simplify":[True, False], "idf":["normalized", "plain", "tfidf_normalized", "tfidf_unnormalized"], "global_idf":[True, False] }
assert ( name in known_names )
assert ( value in flags[name] or ( name=="max_predicted_time" and isinstance(value, (int, long)) and value>=0))
if name=="reverse_scan":
self._query_flags = SetBit ( self._query_flags, 0, value==1 )
if name=="sort_method":
self._query_flags = SetBit ( self._query_flags, 1, value=="kbuffer" )
if name=="max_predicted_time":
self._query_flags = SetBit ( self._query_flags, 2, value>0 )
self._predictedtime = int(value)
if name=="boolean_simplify":
self._query_flags= SetBit ( self._query_flags, 3, value )
if name=="idf" and ( value=="plain" or value=="normalized" ) :
self._query_flags = SetBit ( self._query_flags, 4, value=="plain" )
if name=="global_idf":
self._query_flags= SetBit ( self._query_flags, 5, value )
if name=="idf" and ( value=="tfidf_normalized" or value=="tfidf_unnormalized" ) :
self._query_flags = SetBit ( self._query_flags, 6, value=="tfidf_normalized" )
def SetOuterSelect ( self, orderby, offset, limit ):
assert(isinstance(orderby, str))
assert(isinstance(offset, (int, long)))
assert(isinstance(limit, (int, long)))
assert ( offset>=0 )
assert ( limit>0 )
self._outerorderby = orderby
self._outeroffset = offset
self._outerlimit = limit
self._hasouter = True
def ResetOverrides (self):
self._overrides = {}
def ResetFilters (self):
"""
Clear all filters (for multi-queries).
"""
self._filters = []
self._anchor = {}
def ResetGroupBy (self):
"""
Clear groupby settings (for multi-queries).
"""
self._groupby = ''
self._groupfunc = SPH_GROUPBY_DAY
self._groupsort = '@group desc'
self._groupdistinct = ''
def ResetQueryFlag (self):
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0
def ResetOuterSelect (self):
self._outerorderby = ''
self._outeroffset = 0
self._outerlimit = 0
self._hasouter = False
def Query (self, query, index='*', comment=''):
"""
Connect to searchd server and run given search query.
Returns None on failure; result set hash on success (see documentation for details).
"""
assert(len(self._reqs)==0)
self.AddQuery(query,index,comment)
results = self.RunQueries()
self._reqs = [] # we won't re-run erroneous batch
if not results or len(results)==0:
return None
self._error = results[0]['error']
self._warning = results[0]['warning']
if results[0]['status'] == SEARCHD_ERROR:
return None
return results[0]
def AddQuery (self, query, index='*', comment=''):
"""
Add query to batch.
"""
# build request
req = []
req.append(pack('>5L', self._query_flags, self._offset, self._limit, self._mode, self._ranker))
if self._ranker==SPH_RANK_EXPR:
req.append(pack('>L', len(self._rankexpr)))
req.append(self._rankexpr)
req.append(pack('>L', self._sort))
req.append(pack('>L', len(self._sortby)))
req.append(self._sortby)
if isinstance(query,unicode):
query = query.encode('utf-8')
assert(isinstance(query,str))
req.append(pack('>L', len(query)))
req.append(query)
req.append(pack('>L', len(self._weights)))
for w in self._weights:
req.append(pack('>L', w))
assert(isinstance(index,str))
req.append(pack('>L', len(index)))
req.append(index)
req.append(pack('>L',1)) # id64 range marker
req.append(pack('>Q', self._min_id))
req.append(pack('>Q', self._max_id))
# filters
req.append ( pack ( '>L', len(self._filters) ) )
for f in self._filters:
req.append ( pack ( '>L', len(f['attr'])) + f['attr'])
filtertype = f['type']
req.append ( pack ( '>L', filtertype))
if filtertype == SPH_FILTER_VALUES:
req.append ( pack ('>L', len(f['values'])))
for val in f['values']:
req.append ( pack ('>q', val))
elif filtertype == SPH_FILTER_RANGE:
req.append ( pack ('>2q', f['min'], f['max']))
elif filtertype == SPH_FILTER_FLOATRANGE:
req.append ( pack ('>2f', f['min'], f['max']))
elif filtertype == SPH_FILTER_STRING:
req.append ( pack ( '>L', len(f['value']) ) )
req.append ( f['value'] )
req.append ( pack ( '>L', f['exclude'] ) )
# group-by, max-matches, group-sort
req.append ( pack ( '>2L', self._groupfunc, len(self._groupby) ) )
req.append ( self._groupby )
req.append ( pack ( '>2L', self._maxmatches, len(self._groupsort) ) )
req.append ( self._groupsort )
req.append ( pack ( '>LLL', self._cutoff, self._retrycount, self._retrydelay))
req.append ( pack ( '>L', len(self._groupdistinct)))
req.append ( self._groupdistinct)
# anchor point
if len(self._anchor) == 0:
req.append ( pack ('>L', 0))
else:
attrlat, attrlong = self._anchor['attrlat'], self._anchor['attrlong']
latitude, longitude = self._anchor['lat'], self._anchor['long']
req.append ( pack ('>L', 1))
req.append ( pack ('>L', len(attrlat)) + attrlat)
req.append ( pack ('>L', len(attrlong)) + attrlong)
req.append ( pack ('>f', latitude) + pack ('>f', longitude))
# per-index weights
req.append ( pack ('>L',len(self._indexweights)))
for indx,weight in self._indexweights.items():
req.append ( pack ('>L',len(indx)) + indx + pack ('>L',weight))
# max query time
req.append ( pack ('>L', self._maxquerytime) )
# per-field weights
req.append ( pack ('>L',len(self._fieldweights) ) )
for field,weight in self._fieldweights.items():
req.append ( pack ('>L',len(field)) + field + pack ('>L',weight) )
# comment
comment = str(comment)
req.append ( pack('>L',len(comment)) + comment )
# attribute overrides
req.append ( pack('>L', len(self._overrides)) )
for v in self._overrides.values():
req.extend ( ( pack('>L', len(v['name'])), v['name'] ) )
req.append ( pack('>LL', v['type'], len(v['values'])) )
for id, value in v['values'].iteritems():
req.append ( pack('>Q', id) )
if v['type'] == SPH_ATTR_FLOAT:
req.append ( pack('>f', value) )
elif v['type'] == SPH_ATTR_BIGINT:
req.append ( pack('>q', value) )
else:
req.append ( pack('>l', value) )
# select-list
req.append ( pack('>L', len(self._select)) )
req.append ( self._select )
if self._predictedtime>0:
req.append ( pack('>L', self._predictedtime ) )
# outer
req.append ( pack('>L',len(self._outerorderby)) + self._outerorderby )
req.append ( pack ( '>2L', self._outeroffset, self._outerlimit ) )
if self._hasouter:
req.append ( pack('>L', 1) )
else:
req.append ( pack('>L', 0) )
# send query, get response
req = ''.join(req)
self._reqs.append(req)
return
def RunQueries (self):
"""
Run queries batch.
Returns None on network IO failure; or an array of result set hashes on success.
"""
if len(self._reqs)==0:
self._error = 'no queries defined, issue AddQuery() first'
return None
sock = self._Connect()
if not sock:
return None
req = ''.join(self._reqs)
length = len(req)+8
req = pack('>HHLLL', SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, length, 0, len(self._reqs))+req
self._Send ( sock, req )
response = self._GetResponse(sock, VER_COMMAND_SEARCH)
if not response:
return None
nreqs = len(self._reqs)
# parse response
max_ = len(response)
p = 0
results = []
for i in range(0,nreqs,1):
result = {}
results.append(result)
result['error'] = ''
result['warning'] = ''
status = unpack('>L', response[p:p+4])[0]
p += 4
result['status'] = status
if status != SEARCHD_OK:
length = unpack('>L', response[p:p+4])[0]
p += 4
message = response[p:p+length]
p += length
if status == SEARCHD_WARNING:
result['warning'] = message
else:
result['error'] = message
continue
# read schema
fields = []
attrs = []
nfields = unpack('>L', response[p:p+4])[0]
p += 4
while nfields>0 and p<max_:
nfields -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
fields.append(response[p:p+length])
p += length
result['fields'] = fields
nattrs = unpack('>L', response[p:p+4])[0]
p += 4
while nattrs>0 and p<max_:
nattrs -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
attr = response[p:p+length]
p += length
type_ = unpack('>L', response[p:p+4])[0]
p += 4
attrs.append([attr,type_])
result['attrs'] = attrs
# read match count
count = unpack('>L', response[p:p+4])[0]
p += 4
id64 = unpack('>L', response[p:p+4])[0]
p += 4
# read matches
result['matches'] = []
while count>0 and p<max_:
count -= 1
if id64:
doc, weight = unpack('>QL', response[p:p+12])
p += 12
else:
doc, weight = unpack('>2L', response[p:p+8])
p += 8
match = { 'id':doc, 'weight':weight, 'attrs':{} }
for i in range(len(attrs)):
if attrs[i][1] == SPH_ATTR_FLOAT:
match['attrs'][attrs[i][0]] = unpack('>f', response[p:p+4])[0]
elif attrs[i][1] == SPH_ATTR_BIGINT:
match['attrs'][attrs[i][0]] = unpack('>q', response[p:p+8])[0]
p += 4
elif attrs[i][1] == SPH_ATTR_STRING:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen]
p += slen-4
elif attrs[i][1] == SPH_ATTR_FACTORS:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen-4]
p += slen-4
p -= 4
elif attrs[i][1] == SPH_ATTR_MULTI:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>L', response[p:p+4])[0])
p += 4
p -= 4
elif attrs[i][1] == SPH_ATTR_MULTI64:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
nvals = nvals/2
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>q', response[p:p+8])[0])
p += 8
p -= 4
else:
match['attrs'][attrs[i][0]] = unpack('>L', response[p:p+4])[0]
p += 4
result['matches'].append ( match )
result['total'], result['total_found'], result['time'], words = unpack('>4L', response[p:p+16])
result['time'] = '%.3f' % (result['time']/1000.0)
p += 16
result['words'] = []
while words>0:
words -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
word = response[p:p+length]
p += length
docs, hits = unpack('>2L', response[p:p+8])
p += 8
result['words'].append({'word':word, 'docs':docs, 'hits':hits})
self._reqs = []
return results
def BuildExcerpts (self, docs, index, words, opts=None):
"""
Connect to searchd server and generate exceprts from given documents.
"""
if not opts:
opts = {}
if isinstance(words,unicode):
words = words.encode('utf-8')
assert(isinstance(docs, list))
assert(isinstance(index, str))
assert(isinstance(words, str))
assert(isinstance(opts, dict))
sock = self._Connect()
if not sock:
return None
# fixup options
opts.setdefault('before_match', '<b>')
opts.setdefault('after_match', '</b>')
opts.setdefault('chunk_separator', ' ... ')
opts.setdefault('html_strip_mode', 'index')
opts.setdefault('limit', 256)
opts.setdefault('limit_passages', 0)
opts.setdefault('limit_words', 0)
opts.setdefault('around', 5)
opts.setdefault('start_passage_id', 1)
opts.setdefault('passage_boundary', 'none')
# build request
# v.1.0 req
flags = 1 # (remove spaces)
if opts.get('exact_phrase'): flags |= 2
if opts.get('single_passage'): flags |= 4
if opts.get('use_boundaries'): flags |= 8
if opts.get('weight_order'): flags |= 16
if opts.get('query_mode'): flags |= 32
if opts.get('force_all_words'): flags |= 64
if opts.get('load_files'): flags |= 128
if opts.get('allow_empty'): flags |= 256
if opts.get('emit_zones'): flags |= 512
if opts.get('load_files_scattered'): flags |= 1024
# mode=0, flags
req = [pack('>2L', 0, flags)]
# req index
req.append(pack('>L', len(index)))
req.append(index)
# req words
req.append(pack('>L', len(words)))
req.append(words)
# options
req.append(pack('>L', len(opts['before_match'])))
req.append(opts['before_match'])
req.append(pack('>L', len(opts['after_match'])))
req.append(opts['after_match'])
req.append(pack('>L', len(opts['chunk_separator'])))
req.append(opts['chunk_separator'])
req.append(pack('>L', int(opts['limit'])))
req.append(pack('>L', int(opts['around'])))
req.append(pack('>L', int(opts['limit_passages'])))
req.append(pack('>L', int(opts['limit_words'])))
req.append(pack('>L', int(opts['start_passage_id'])))
req.append(pack('>L', len(opts['html_strip_mode'])))
req.append((opts['html_strip_mode']))
req.append(pack('>L', len(opts['passage_boundary'])))
req.append((opts['passage_boundary']))
# documents
req.append(pack('>L', len(docs)))
for doc in docs:
if isinstance(doc,unicode):
doc = doc.encode('utf-8')
assert(isinstance(doc, str))
req.append(pack('>L', len(doc)))
req.append(doc)
req = ''.join(req)
# send query, get response
length = len(req)
# add header
req = pack('>2HL', SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, length)+req
self._Send ( sock, req )
response = self._GetResponse(sock, VER_COMMAND_EXCERPT )
if not response:
return []
# parse response
pos = 0
res = []
rlen = len(response)
for i in range(len(docs)):
length = unpack('>L', response[pos:pos+4])[0]
pos += 4
if pos+length > rlen:
self._error = 'incomplete reply'
return []
res.append(response[pos:pos+length])
pos += length
return res
def UpdateAttributes ( self, index, attrs, values, mva=False, ignorenonexistent=False ):
"""
Update given attribute values on given documents in given indexes.
Returns amount of updated documents (0 or more) on success, or -1 on failure.
'attrs' must be a list of strings.
'values' must be a dict with int key (document ID) and list of int values (new attribute values).
optional boolean parameter 'mva' points that there is update of MVA attributes.
In this case the 'values' must be a dict with int key (document ID) and list of lists of int values
(new MVA attribute values).
Optional boolean parameter 'ignorenonexistent' points that the update will silently ignore any warnings about
trying to update a column which is not exists in current index schema.
Example:
res = cl.UpdateAttributes ( 'test1', [ 'group_id', 'date_added' ], { 2:[123,1000000000], 4:[456,1234567890] } )
"""
assert ( isinstance ( index, str ) )
assert ( isinstance ( attrs, list ) )
assert ( isinstance ( values, dict ) )
for attr in attrs:
assert ( isinstance ( attr, str ) )
for docid, entry in values.items():
AssertUInt32(docid)
assert ( isinstance ( entry, list ) )
assert ( len(attrs)==len(entry) )
for val in entry:
if mva:
assert ( isinstance ( val, list ) )
for vals in val:
AssertInt32(vals)
else:
AssertInt32(val)
# build request
req = [ pack('>L',len(index)), index ]
req.append ( pack('>L',len(attrs)) )
ignore_absent = 0
if ignorenonexistent: ignore_absent = 1
req.append ( pack('>L', ignore_absent ) )
mva_attr = 0
if mva: mva_attr = 1
for attr in attrs:
req.append ( pack('>L',len(attr)) + attr )
req.append ( pack('>L', mva_attr ) )
req.append ( pack('>L',len(values)) )
for docid, entry in values.items():
req.append ( pack('>Q',docid) )
for val in entry:
val_len = val
if mva: val_len = len ( val )
req.append ( pack('>L',val_len ) )
if mva:
for vals in val:
req.append ( pack ('>L',vals) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, length ) + req
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_UPDATE )
if not response:
return -1
# parse response
updated = unpack ( '>L', response[0:4] )[0]
return updated
def BuildKeywords ( self, query, index, hits ):
"""
Connect to searchd server, and generate keywords list for a given query.
Returns None on failure, or a list of keywords on success.
"""
assert ( isinstance ( query, str ) )
assert ( isinstance ( index, str ) )
assert ( isinstance ( hits, int ) )
# build request
req = [ pack ( '>L', len(query) ) + query ]
req.append ( pack ( '>L', len(index) ) + index )
req.append ( pack ( '>L', hits ) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, length ) + req
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_KEYWORDS )
if not response:
return None
# parse response
res = []
nwords = unpack ( '>L', response[0:4] )[0]
p = 4
max_ = len(response)
while nwords>0 and p<max_:
nwords -= 1
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
tokenized = response[p:p+length]
p += length
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
normalized = response[p:p+length]
p += length
entry = { 'tokenized':tokenized, 'normalized':normalized }
if hits:
entry['docs'], entry['hits'] = unpack ( '>2L', response[p:p+8] )
p += 8
res.append ( entry )
if nwords>0 or p>max_:
self._error = 'incomplete reply'
return None
return res
def Status ( self, session=False ):
"""
Get the status
"""
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
sess = 1
if session:
sess = 0
req = pack ( '>2HLL', SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, sess )
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_STATUS )
if not response:
return None
# parse response
res = []
p = 8
max_ = len(response)
while p<max_:
length = unpack ( '>L', response[p:p+4] )[0]
k = response[p+4:p+length+4]
p += 4+length
length = unpack ( '>L', response[p:p+4] )[0]
v = response[p+4:p+length+4]
p += 4+length
res += [[k, v]]
return res
### persistent connections
def Open(self):
if self._socket:
self._error = 'already connected'
return None
server = self._Connect()
if not server:
return None
# command, command version = 0, body length = 4, body = 1
request = pack ( '>hhII', SEARCHD_COMMAND_PERSIST, 0, 4, 1 )
self._Send ( server, request )
self._socket = server
return True
def Close(self):
if not self._socket:
self._error = 'not connected'
return
self._socket.close()
self._socket = None
def EscapeString(self, string):
return re.sub(r"([=\(\)|\-!@~\"&/\\\^\$\=\<])", r"\\\1", string)
def FlushAttributes(self):
sock = self._Connect()
if not sock:
return -1
request = pack ( '>hhI', SEARCHD_COMMAND_FLUSHATTRS, VER_COMMAND_FLUSHATTRS, 0 ) # cmd, ver, bodylen
self._Send ( sock, request )
response = self._GetResponse ( sock, VER_COMMAND_FLUSHATTRS )
if not response or len(response)!=4:
self._error = 'unexpected response length'
return -1
tag = unpack ( '>L', response[0:4] )[0]
return tag
def AssertInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=-2**32-1 and value<=2**32-1)
def AssertUInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=0 and value<=2**32-1)
def SetBit ( flag, bit, on ):
if on:
flag += ( 1<<bit )
else:
reset = 255 ^ ( 1<<bit )
flag = flag & reset
return flag
#
# $Id$
#
| gpl-2.0 | -197,369,445,877,807,000 | 27.323648 | 228 | 0.632576 | false | 2.936898 | false | false | false |
nurnbeck/project-2-CMPUT-291 | ret_DATA.py | 1 | 1814 | import os
import time
import bsddb3 as bsddb
'''
Retrieve records with a given data
- Assume that database is closed before calling ret_DATA();
- Writes (append) the result to the file 'answers'.
For now I assume that indexfile = btree, further tests are necessary.
Tested under DB_SIZE = 10
'''
DB_FILE = "/tmp/yishuo_db/sample_db"
SDB_FILE = "/tmp/yishuo_db/IndexFile"
def ret_DATA(filetype):
if filetype == 'btree':
db = bsddb.btopen(DB_FILE, 'r')
elif filetype == 'hash':
db = bsddb.hashopen(DB_FILE, 'r')
elif filetype == 'indexfile':
db = bsddb.btopen(DB_FILE, 'r')
indexfile = bsddb.hashopen(SDB_FILE, 'r')
else:
print("Unknown type, function terminated\n")
return
# open answers for writing, appending to the end of the file
answers = open('answers', 'a')
result_lst = []
data = input("Enter the data you want to search > ")
data = data.encode(encoding = 'UTF-8')
start_time = time.time()
for key in db.keys():
if db[key] == data:
result_lst.append(key.decode(encoding = 'UTF-8'))
end_time = time.time()
elapse_time = (end_time - start_time) * 1000000
print("Result:")
data = data.decode(encoding = 'UTF-8')
if result_lst:
for key in result_lst:
print('Key:', key)
answers.write(key)
answers.write('\n')
print('Data:', data)
answers.write(data)
answers.write('\n')
answers.write('\n')
else:
print("Data not found")
print()
print(len(result_lst), "record(s) received")
print("Used", elapse_time, "micro seconds")
print()
answers.close()
db.close()
if filetype == 'indexfile':
indexfile.close()
return
| mit | 2,081,410,483,051,653,400 | 24.914286 | 69 | 0.582139 | false | 3.529183 | false | false | false |
benjamindeleener/odoo | addons/l10n_ca/__openerp__.py | 1 | 1831 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Canada - Accounting',
'version': '1.0',
'author': 'Savoir-faire Linux',
'website': 'https://www.savoirfairelinux.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the Canadian accounting chart in Odoo.
===========================================================================================
Canadian accounting charts and localizations.
Fiscal positions
----------------
When considering taxes to be applied, it is the province where the delivery occurs that matters.
Therefore we decided to implement the most common case in the fiscal positions: delivery is the
responsibility of the vendor and done at the customer location.
Some examples:
1) You have a customer from another province and you deliver to his location.
On the customer, set the fiscal position to his province.
2) You have a customer from another province. However this customer comes to your location
with their truck to pick up products. On the customer, do not set any fiscal position.
3) An international vendor doesn't charge you any tax. Taxes are charged at customs
by the customs broker. On the vendor, set the fiscal position to International.
4) An international vendor charge you your provincial tax. They are registered with your
position.
""",
'depends': [
'account',
'base_iban',
'base_vat',
'l10n_multilang',
],
'data': [
'account_chart_template.xml',
'account_chart.xml',
'account_chart_template_after.xml',
'account_tax.xml',
'fiscal_templates.xml',
'account_chart_template.yml',
],
'installable': True,
'post_init_hook': 'load_translations',
}
| gpl-3.0 | 8,879,892,428,055,025,000 | 33.54717 | 96 | 0.656472 | false | 4.041943 | false | false | false |
AnotherBobSmith/CLUZ | cluz_dialog3.py | 1 | 15904 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
A QGIS plugin
CLUZ for QGIS
-------------------
begin : 2016-23-02
copyright : (C) 2016 by Bob Smith, DICE
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
import qgis
import os
import csv
import cluz_setup
import cluz_functions1
import cluz_functions3
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/forms")
from cluz_form_target import Ui_targetDialog
from cluz_form_abund_select import Ui_abundSelectDialog
from cluz_form_abund import Ui_abundDialog
from cluz_form_change import Ui_ChangeStatusDialog
from cluz_form_identify import Ui_identifyDialog
from cluz_form_met import Ui_metDialog
class targetDialog(QDialog, Ui_targetDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.clip = QApplication.clipboard()
targetDict = cluz_setup.makeTargetDict(setupObject)
if targetDict != "blank":
setupObject.targetDict = targetDict
self.loadTargetDictData(setupObject)
def loadTargetDictData(self, setupObject):
decPrec = setupObject.decimalPlaces
targetCSVFilePath = setupObject.targetPath
decPrecHeaderNameList = ["target", "spf", "conserved", "total"] # List of columns that will be changed to decimal precision
pcValueUpdate = False
with open(targetCSVFilePath, 'rb') as f:
targetReader = csv.reader(f)
targetHeaderList = targetReader.next()
lowerHeaderList = []
for aHeader in targetHeaderList:
lowerHeaderList.append(aHeader.lower())
self.targetTableWidget.clear()
self.targetTableWidget.setColumnCount(len(targetHeaderList))
insertRowNumber = 0
for aRow in targetReader:
pcValue = aRow[lowerHeaderList.index("pc_target")]
targetValue = float(aRow[lowerHeaderList.index("target")])
consValue = float(aRow[lowerHeaderList.index("conserved")])
if targetValue <= 0:
limboPCValue = "-1"
else:
limboPCValue = consValue / targetValue
limboPCValue *= 100
limboPCValue = cluz_setup.returnRoundedValue(setupObject, limboPCValue)
if float(limboPCValue) != float(pcValue):
pcValueUpdate = True
aRow[lowerHeaderList.index("pc_target")] = limboPCValue
addTargetTableRow(self, aRow, targetHeaderList, decPrecHeaderNameList, insertRowNumber, decPrec)
insertRowNumber += 1
self.targetTableWidget.setHorizontalHeaderLabels(targetHeaderList)
for aColValue in range(len(targetHeaderList)):
self.targetTableWidget.resizeColumnToContents(aColValue)
if pcValueUpdate == True:
cluz_setup.updateTargetCSVFromTargetDict(setupObject, setupObject.targetDict)
# http://stackoverflow.com/questions/24971305/copy-pyqt-table-selection-including-column-and-row-headers
def keyPressEvent(self, e):
if (e.modifiers() & Qt.ControlModifier):
selected = self.targetTableWidget.selectedRanges()
if e.key() == Qt.Key_C: #copy
s = ""
for r in xrange(selected[0].topRow(), selected[0].bottomRow() + 1):
for c in xrange(selected[0].leftColumn(), selected[0].rightColumn()+1):
try:
s += str(self.targetTableWidget.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" #eliminate last '\t'
self.clip.setText(s)
def addTargetTableRow(self, aRow, targetHeaderList, decPrecHeaderNameList, insertRowNumber, decPrec):
self.targetTableWidget.insertRow(insertRowNumber)
for aColValue in range(len(targetHeaderList)):
headerName = targetHeaderList[aColValue].lower()
tableValue = aRow[aColValue]
if headerName in decPrecHeaderNameList:
tableValue = round(float(tableValue), decPrec)
tableValue = format(tableValue, "." + str(decPrec) + "f")
targTableItem = QTableWidgetItem(str(tableValue))
if headerName == "target":
targetValue = tableValue
elif headerName == "conserved":
conservedValue = tableValue
if headerName == "pc_target" and str(tableValue) == "-1":
targTableItem.setTextColor(QColor.fromRgb(128, 128, 128))
elif headerName == "pc_target" and float(tableValue) >= 0:
if float(conservedValue) < float(targetValue):
targTableItem.setTextColor(QColor.fromRgb(255, 0, 0))
else:
targTableItem.setTextColor(QColor.fromRgb(0, 102, 51))
self.targetTableWidget.setItem(insertRowNumber, aColValue, targTableItem)
class abundSelectDialog(QDialog, Ui_abundSelectDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
featStringDict = self.loadAbundSelectFeatureList(setupObject)
QObject.connect(self.okButton, SIGNAL("clicked()"), lambda: self.displayAbundValues(setupObject, featStringDict))
def loadAbundSelectFeatureList(self, setupObject):
featIDList = setupObject.targetDict.keys()
featIDList.sort()
featStringList = []
featStringDict = {}
for aFeat in featIDList:
aString = str(aFeat) + " - " + setupObject.targetDict[aFeat][0]
featStringList.append(aString)
featStringDict[aString] = aFeat
self.featListWidget.addItems(featStringList)
return featStringDict
def displayAbundValues(self, setupObject, featStringDict):
selectedFeatIDList = [featStringDict[item.text()] for item in self.featListWidget.selectedItems()]
if len(selectedFeatIDList) == 0:
selectedFeatIDList = setupObject.targetDict.keys()
self.close()
self.abundDialog = abundDialog(self, setupObject, selectedFeatIDList)
# show the dialog
self.abundDialog.show()
# Run the dialog event loop
result = self.abundDialog.exec_()
class abundDialog(QDialog, Ui_abundDialog):
def __init__(self, iface, setupObject, selectedFeatIDList):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.clip = QApplication.clipboard()
self.loadAbundDictData(setupObject, selectedFeatIDList)
def loadAbundDictData(self, setupObject, selectedFeatIDList):
decPrec = setupObject.decimalPlaces
abundPUKeyDict = setupObject.abundPUKeyDict
featSet = set(selectedFeatIDList)
abundHeaderList = ["PU_ID"]
for aFeatID in featSet:
abundHeaderList.append("F_" + str(aFeatID))
self.abundTableWidget.clear()
self.abundTableWidget.setColumnCount(len(abundHeaderList))
insertRowNumber = 0
for puID in abundPUKeyDict:
self.abundTableWidget.insertRow(insertRowNumber)
zeroValue = round(0.0, decPrec)
zeroValue = format(zeroValue, "." + str(decPrec) + "f")
blankString = str(zeroValue)
puStringList = [blankString] * len(featSet)
puAbundDict = abundPUKeyDict[puID]
for featID in puAbundDict:
if featID in featSet:
featAmount = puAbundDict[featID]
featAmount = round(float(featAmount), decPrec)
featAmount = format(featAmount, "." + str(decPrec) + "f")
featIndex = list(featSet).index(featID)
puStringList[featIndex] = str(featAmount)
puStringList.insert(0, str(puID))
for aColValue in range(len(puStringList)):
featValue = puStringList[aColValue]
abundTableItem = QTableWidgetItem(str(featValue))
self.abundTableWidget.setItem(insertRowNumber, aColValue, abundTableItem)
insertRowNumber += 1
self.abundTableWidget.setHorizontalHeaderLabels(abundHeaderList)
for aColValue in range(len(abundHeaderList)):
self.abundTableWidget.resizeColumnToContents(aColValue)
# http://stackoverflow.com/questions/24971305/copy-pyqt-table-selection-including-column-and-row-headers
def keyPressEvent(self, e):
if (e.modifiers() & Qt.ControlModifier):
selected = self.abundTableWidget.selectedRanges()
if e.key() == Qt.Key_C: #copy
s = ""
for r in xrange(selected[0].topRow(), selected[0].bottomRow() + 1):
for c in xrange(selected[0].leftColumn(), selected[0].rightColumn()+1):
try:
s += str(self.abundTableWidget.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" #eliminate last '\t'
self.clip.setText(s)
class changeStatusDialog(QDialog, Ui_ChangeStatusDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self, None, Qt.WindowStaysOnTopHint)
self.iface = iface
self.setupUi(self)
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
self.undoButton.setEnabled(False)
QObject.connect(self.changeButton, SIGNAL("clicked()"), lambda: self.changeStatus(setupObject))
QObject.connect(self.undoButton, SIGNAL("clicked()"), lambda: self.undoStatusChange(setupObject))
QObject.connect(self.closeButton, SIGNAL("clicked()"), lambda: self.closeStatusDialog(setupObject))
def changeStatus(self, setupObject):
if self.availableButton.isChecked():
statusType = "Available"
elif self.earmarkedButton.isChecked():
statusType = "Earmarked"
elif self.conservedButton.isChecked():
statusType = "Conserved"
elif self.excludedButton.isChecked():
statusType = "Excluded"
changeLockedPUsBool = self.changeCheckBox.isChecked()
selectedPUIDStatusDict = cluz_functions3.changeStatusPuLayer(setupObject, statusType, changeLockedPUsBool)
changeAbundDict = cluz_functions3.calcChangeAbundDict(setupObject, selectedPUIDStatusDict, statusType)
targetDict = cluz_functions3.updateTargetDictWithChanges(setupObject, changeAbundDict)
setupObject.targetDict = targetDict
cluz_setup.updateTargetCSVFromTargetDict(setupObject, targetDict)
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
setupObject.selectedPUIDStatusDict = selectedPUIDStatusDict
self.undoButton.setEnabled(True)
def undoStatusChange(self, setupObject):
canvas = qgis.utils.iface.mapCanvas()
cluz_functions3.undoStatusChangeInPuLayer(setupObject)
newConTotDict = cluz_functions1.returnConTotDict(setupObject)
targetDict = cluz_functions1.updateConTotFieldsTargetDict(setupObject, newConTotDict)
cluz_setup.updateTargetCSVFromTargetDict(setupObject, targetDict)
setupObject.targetDict = targetDict
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
setupObject.selectedPUIDStatusDict = "blank"
self.undoButton.setEnabled(False)
canvas.refresh()
def closeStatusDialog(self, setupObject):
self.close()
class identifyDialog(QDialog, Ui_identifyDialog):
def __init__(self, iface, setupObject, point):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
selectedPUIDList = cluz_functions3.returnPointPUIDList(setupObject, point)
identDict, targetMetDict = cluz_functions3.makeIdentifyData(setupObject, selectedPUIDList)
titleString = cluz_functions3.setIdentifyDialogWindowTitle(selectedPUIDList, identDict)
if len(identDict.keys()) > 0:
self.identDict = identDict
self.targetMetDict = targetMetDict
self.showIdentifyData()
self.setWindowTitle(titleString)
self.setWindowTitle(titleString)
def showIdentifyData(self):
self.identifyTableWidget.clear()
self.identifyTableWidget.setColumnCount(7)
cluz_functions3.addIdenitfyDataToTableWidget(self.identifyTableWidget, self.targetMetDict, self.identDict)
headerList = ["ID ", "Name ", "Amount ", "As % of total ", "Target ", "As % of target ", "% of target currently met "]
self.identifyTableWidget.setHorizontalHeaderLabels(headerList)
for aColValue in range(len(headerList)):
self.identifyTableWidget.resizeColumnToContents(aColValue)
class metDialog(QDialog, Ui_metDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
outputPath = setupObject.outputPath
outputName = setupObject.outputName + "_mvbest.txt"
self.metTargetFile = outputPath + os.sep + outputName
self.iface = iface
self.setupUi(self)
self.metLoadTargetDictData()
self.setWindowTitle("Marxan Targets Met table for analysis " + setupObject.outputName)
def metLoadTargetDictData(self):
targetMetDict = {}
with open(self.metTargetFile, 'rb') as f:
targetMetReader = csv.reader(f)
targetMetHeaderList = next(targetMetReader, None)
for row in targetMetReader:
puID = int(row.pop(0))
targetMetDict[puID] = row
targetIDList = targetMetDict.keys()
targetIDList.sort()
self.metTableWidget.clear()
self.metTableWidget.setColumnCount(len(targetMetHeaderList))
insertRowNumber = 0
for aFeat in targetIDList:
self.metTableWidget.insertRow(insertRowNumber)
aRowList = targetMetDict[aFeat]
aRowList.insert(0, aFeat)
for aColValue in range(len(targetMetHeaderList)):
featValue = aRowList[aColValue]
metTableItem = QTableWidgetItem(str(featValue))
self.metTableWidget.setItem(insertRowNumber,aColValue,metTableItem)
insertRowNumber += 1
self.metTableWidget.setHorizontalHeaderLabels(targetMetHeaderList)
for aColValue in range(len(targetMetHeaderList)):
self.metTableWidget.resizeColumnToContents(aColValue)
| gpl-2.0 | -8,681,222,330,805,281,000 | 42.453552 | 131 | 0.624245 | false | 4.084232 | false | false | false |
yakky/django-cms | cms/models/static_placeholder.py | 1 | 3681 | import uuid
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from six import text_type, python_2_unicode_compatible
from cms.models.fields import PlaceholderField
from cms.utils.copy_plugins import copy_plugins_to
def static_slotname(instance):
"""
Returns a string to be used as the slot
for the static placeholder field.
"""
return instance.code
@python_2_unicode_compatible
class StaticPlaceholder(models.Model):
CREATION_BY_TEMPLATE = 'template'
CREATION_BY_CODE = 'code'
CREATION_METHODS = (
(CREATION_BY_TEMPLATE, _('by template')),
(CREATION_BY_CODE, _('by code')),
)
name = models.CharField(
verbose_name=_(u'static placeholder name'), max_length=255, blank=True, default='',
help_text=_(u'Descriptive name to identify this static placeholder. Not displayed to users.'))
code = models.CharField(
verbose_name=_(u'placeholder code'), max_length=255, blank=True,
help_text=_(u'To render the static placeholder in templates.'))
draft = PlaceholderField(static_slotname, verbose_name=_(u'placeholder content'), related_name='static_draft')
public = PlaceholderField(static_slotname, editable=False, related_name='static_public')
dirty = models.BooleanField(default=False, editable=False)
creation_method = models.CharField(
verbose_name=_('creation_method'), choices=CREATION_METHODS,
default=CREATION_BY_CODE, max_length=20, blank=True,
)
site = models.ForeignKey(Site, on_delete=models.CASCADE, null=True, blank=True)
class Meta:
verbose_name = _(u'static placeholder')
verbose_name_plural = _(u'static placeholders')
app_label = 'cms'
unique_together = (('code', 'site'),)
def __str__(self):
return self.get_name()
def get_name(self):
return self.name or self.code or text_type(self.pk)
get_name.short_description = _(u'static placeholder name')
def clean(self):
# TODO: check for clashes if the random code is already taken
if not self.code:
self.code = u'static-%s' % uuid.uuid4()
if not self.site:
placeholders = StaticPlaceholder.objects.filter(code=self.code, site__isnull=True)
if self.pk:
placeholders = placeholders.exclude(pk=self.pk)
if placeholders.exists():
raise ValidationError(_("A static placeholder with the same site and code already exists"))
def publish(self, request, language, force=False):
if force or self.has_publish_permission(request):
self.public.clear(language=language)
self.public.clear_cache(language=language)
plugins = self.draft.get_plugins_list(language=language)
copy_plugins_to(plugins, self.public, no_signals=True)
self.dirty = False
self.save()
return True
return False
def has_change_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
def has_publish_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts)) and \
request.user.has_perm(opts.app_label + '.' + 'publish_page')
| bsd-3-clause | -8,891,000,634,964,670,000 | 39.01087 | 114 | 0.659332 | false | 4.018559 | false | false | false |
nickgentoo/LSTM-timepredictionPMdata | code/nick_evaluate_suffix_and_remaining_time_only_time_OHenc.py | 1 | 15048 | '''
this script takes as input the LSTM or RNN weights found by train.py
change the path in line 178 of this script to point to the h5 file
with LSTM or RNN weights generated by train.py
Author: Niek Tax
'''
from __future__ import division
from keras.models import load_model
import csv
import copy
import numpy as np
import distance
from itertools import izip
from jellyfish._jellyfish import damerau_levenshtein_distance
import unicodecsv
from sklearn import metrics
from math import sqrt
import time
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from collections import Counter
from keras.models import model_from_json
import sys
fileprefix=sys.argv[1]
eventlog = sys.argv[2]
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
lastcase = ''
line = ''
firstLine = True
lines = []
timeseqs = []
timeseqs2 = []
timeseqs3 = []
timeseqs4 = []
y_times = []
times = []
times2 = []
times3 = []
times4 = []
# nick
attributes = []
attributes_dict = []
attributes_sizes = []
numlines = 0
casestarttime = None
lasteventtime = None
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
y = []
for row in spamreader:
#print(row)
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
#test different format
#t = 0#time.strptime(row[2], "%Y/%m/%d %H:%M:%S")
if row[0]!=lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not firstLine:
#print (line)
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
#target
y_times.extend([times2[-1]-k for k in times2])
timeseqs3.append(times3)
timeseqs4.append(times4)
for i in xrange(len(attributes)):
#print(attributesvalues[i])
attributes[i].append(attributesvalues[i])
else:
#if firstline. I have to add te elements to attributes
for a in row[3:]:
attributes.append([])
attributes_dict.append({})
attributes_sizes.append(0)
#print(attributes)
n_events_in_trace=0
line = ''
times = []
times2 = []
times3 = []
times4 = []
attributesvalues = [ ]
numlines+=1
n_events_in_trace+=1
line+=unichr(int(row[1])+ascii_offset)
timesincelastevent = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(casestarttime))
midnight = datetime.fromtimestamp(time.mktime(t)).replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = datetime.fromtimestamp(time.mktime(t))-midnight
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
timediff3 = timesincemidnight.seconds
timediff4 = datetime.fromtimestamp(time.mktime(t)).weekday()
times.append(timediff)
times2.append(timediff2)
times3.append(timediff3)
times4.append(timediff4)
lasteventtime = t
firstLine = False
indexnick=0
for a in row[3:]:
if len(attributesvalues)<=indexnick:
attributesvalues.append([])
a=a.strip('"')
#todo cast a intero se e intero if
if a!="":
try:
attr=float(a)
attributesvalues[indexnick].append(attr)
#print("float attr")
#print(a)
except:
if a not in attributes_dict[indexnick]:
attributes_dict[indexnick][a]=attributes_sizes[indexnick]+1
attributes_sizes[indexnick]=attributes_sizes[indexnick]+1
attributesvalues[indexnick].append(attributes_dict[indexnick][a])
else:
attributesvalues[indexnick].append(-1)
# if a in attributes_dict[indexnick]:
# attributesvalues.append(attributes_dict[indexnick][a])
# else:
# attributes_dict[indexnick][a]=attributes_sizes[indexnick]
# attributes_sizes[indexnick]+=1
# attributesvalues.append(attributes_dict[indexnick][a])
indexnick+=1
# add last case
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
y_times.extend([times2[-1] - k for k in times2])
for i in xrange(len(attributes)):
attributes[i].append(attributesvalues[i])
numlines+=1
divisor = np.mean([item for sublist in timeseqs for item in sublist])
print('divisor: {}'.format(divisor))
divisor2 = np.mean([item for sublist in timeseqs2 for item in sublist])
print('divisor2: {}'.format(divisor2))
step = 1
sentences = []
softness = 0
next_chars = []
lines = map(lambda x: x + '!', lines)
maxlen = max(map(lambda x: len(x), lines))
chars = map(lambda x: set(x), lines)
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
chars.remove('!')
lines = map(lambda x: x[:-2], lines)
print('total chars: {}, target chars: {}'.format(len(chars), len(target_chars)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
#print(indices_char)
elems_per_fold = int(round(numlines / 3))
fold1 = lines[:elems_per_fold]
fold1_t = timeseqs[:elems_per_fold]
fold1_t2 = timeseqs2[:elems_per_fold]
fold1_t3 = timeseqs3[:elems_per_fold]
fold1_t4 = timeseqs4[:elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold1.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold1, fold1_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold2 = lines[elems_per_fold:2 * elems_per_fold]
fold2_t = timeseqs[elems_per_fold:2 * elems_per_fold]
fold2_t2 = timeseqs2[elems_per_fold:2 * elems_per_fold]
fold2_t3 = timeseqs3[elems_per_fold:2 * elems_per_fold]
fold2_t4 = timeseqs4[elems_per_fold:2 * elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold2.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold2, fold2_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold3 = lines[2 * elems_per_fold:]
fold3_t = timeseqs[2 * elems_per_fold:]
fold3_t2 = timeseqs2[2 * elems_per_fold:]
fold3_t3 = timeseqs3[2 * elems_per_fold:]
fold3_t4 = timeseqs4[2 * elems_per_fold:]
fold3_a=[a[2*elems_per_fold:] for a in attributes]
with open('output_files/folds/' + eventlog + 'fold3.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold3, fold3_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
y_t_seq=[]
for line in fold1+fold2:
for i in range(0, len(line), 1):
if i == 0:
continue
y_t_seq.append(y_times[0:i])
divisory = np.mean([item for sublist in y_t_seq for item in sublist])
print('divisory: {}'.format(divisory))
lines = fold3
lines_t = fold3_t
lines_t2 = fold3_t2
lines_t3 = fold3_t3
lines_t4 = fold3_t4
attributes=fold3_a
# set parameters
predict_size = maxlen
# load json and create model
json_file = open('output_files/models/'+fileprefix+'_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("output_files/models/"+fileprefix+"_weights_best.h5")
print("Loaded model from disk")
y_t_seq=[]
# load model, set this to the model generated by train.py
#model = load_model('output_files/models/200_model_59-1.50.h5')
# define helper functions
def encode(ex, sentence, times,times2, times3,times4, sentences_attributes,maxlen=maxlen):
#num_features = len(chars)+5+len(sentences_attributes)
num_features = len(chars) + 5
for idx in xrange(len(attributes)):
num_features += attributes_sizes[idx] + 1
#print(num_features)
X = np.zeros((1, maxlen, num_features), dtype=np.float32)
leftpad = maxlen-len(sentence)
times2 = np.cumsum(times)
#print "sentence",len(sentence)
for t, char in enumerate(sentence):
#print(t)
#midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
#timesincemidnight = times3[t]-midnight
multiset_abstraction = Counter(sentence[:t+1])
for c in chars:
if c==char:
X[0, t+leftpad, char_indices[c]] = 1
X[0, t+leftpad, len(chars)] = t+1
X[0, t+leftpad, len(chars)+1] = times[t]/divisor
X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
X[0, t+leftpad, len(chars)+3] = times3[t]/86400
X[0, t+leftpad, len(chars)+4] = times4[t]/7
# for i in xrange(len(sentences_attributes)):
# #print(str(i)+" "+str(t))
# #print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5 + i] = sentences_attributes[i][t]
startoh = 0
for j in xrange(len(attributes)):
# X[i, t + leftpad, len(chars) + 5+j]=sentences_attributes[j][i][t]
if attributes_sizes[j] > 0:
X[0, t + leftpad, len(chars) + 5 + startoh + sentences_attributes[j][t]] = 1
else:
X[0, t + leftpad, len(chars) + 5 + startoh] = sentences_attributes[j][t]
startoh += (attributes_sizes[j] + 1)
return X
# # define helper functions
# def encode(sentence, times, times3, sentences_attributes,maxlen=maxlen):
# num_features = len(chars)+5+len(sentences_attributes)
# X = np.zeros((1, maxlen, num_features), dtype=np.float32)
# leftpad = maxlen-len(sentence)
# times2 = np.cumsum(times)
# print "sentence",len(sentence)
# for t, char in enumerate(sentence):
# midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
# timesincemidnight = times3[t]-midnight
# multiset_abstraction = Counter(sentence[:t+1])
# for c in chars:
# if c==char:
# X[0, t+leftpad, char_indices[c]] = 1
# X[0, t+leftpad, len(chars)] = t+1
# X[0, t+leftpad, len(chars)+1] = times[t]/divisor
# X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
# X[0, t+leftpad, len(chars)+3] = timesincemidnight.seconds/86400
# X[0, t+leftpad, len(chars)+4] = times3[t].weekday()/7
# for i in xrange(len(sentences_attributes)):
# print(str(i)+" "+str(t))
# print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5+i]=sentences_attributes[i][t]
# return X,y
def getSymbol(predictions):
maxPrediction = 0
symbol = ''
i = 0;
for prediction in predictions:
if(prediction>=maxPrediction):
maxPrediction = prediction
symbol = target_indices_char[i]
i += 1
return symbol
one_ahead_gt = []
one_ahead_pred = []
two_ahead_gt = []
two_ahead_pred = []
three_ahead_gt = []
three_ahead_pred = []
y_t_seq=[]
# make predictions
with open('output_files/results/'+fileprefix+'_suffix_and_remaining_time_%s' % eventlog, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["Prefix length", "Groud truth", "Ground truth times", "Predicted times", "RMSE", "MAE", "Median AE"])
#considering also size 1 prefixes
#for prefix_size in range(1,maxlen):
#print(prefix_size)
#print(len(lines),len(attributes[0]))
for ex, (line, times, times2, times3, times4) in enumerate(izip(lines, lines_t, lines_t2, lines_t3, lines_t3)):
for prefix_size in range(1, len(line)):#aggiunto -1 perche non voglio avere 0 nel ground truth
#print(line,ex,len(line), len(attributes[0][ex]))
times.append(0)
cropped_line = ''.join(line[:prefix_size])
cropped_times = times[:prefix_size]
#print "times_len",len(cropped_times)
cropped_times2 = times2[:prefix_size]
cropped_times4 = times4[:prefix_size]
cropped_times3 = times3[:prefix_size]
cropped_attributes = [[] for i in xrange(len(attributes))]
for j in xrange(len(attributes)):
#print(attributes[j][ex])
cropped_attributes[j].extend(attributes[j][ex][0:prefix_size])
#print cropped_attributes
#y_t_seq.append(y_times[0:prefix_size])
#cropped_attributes= [a[:prefix_size] for a in attributes]
#print cropped_attribute
ground_truth = ''.join(line[prefix_size:prefix_size+predict_size])
ground_truth_t = times2[prefix_size-1] # era -1
#print(prefix_size,len(times2)-1)
case_end_time = times2[len(times2)-1]
ground_truth_t = case_end_time-ground_truth_t
predicted = ''
total_predicted_time = 0
#perform single prediction
enc = encode(ex,cropped_line, cropped_times,cropped_times2, cropped_times3,cropped_times4, cropped_attributes)
y = model.predict(enc, verbose=0) # make predictions
# split predictions into seperate activity and time predictions
#print y
y_t = y[0][0]
#prediction = getSymbol(y_char) # undo one-hot encoding
#cropped_line += prediction
if y_t<0:
y_t=0
cropped_times.append(y_t)
y_t = y_t * divisor
#cropped_times3.append(cropped_times3[-1] + timedelta(seconds=y_t))
total_predicted_time = total_predicted_time + y_t
output = []
if len(ground_truth)>0:
output.append(prefix_size)
output.append(unicode(ground_truth).encode("utf-8"))
output.append(ground_truth_t)
output.append(total_predicted_time)
output.append(metrics.mean_squared_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.mean_absolute_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.median_absolute_error([ground_truth_t], [total_predicted_time]))
spamwriter.writerow(output)
| gpl-3.0 | -5,484,811,627,186,544,000 | 37.192893 | 126 | 0.624003 | false | 3.246602 | false | false | false |
shoyer/xarray | xarray/tests/test_variable.py | 1 | 87655 | import warnings
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import pytz
from xarray import Coordinate, Dataset, IndexVariable, Variable, set_options
from xarray.core import dtypes, duck_array_ops, indexing
from xarray.core.common import full_like, ones_like, zeros_like
from xarray.core.indexing import (
BasicIndexer,
CopyOnWriteArray,
DaskIndexingAdapter,
LazilyOuterIndexedArray,
MemoryCachedArray,
NumpyIndexingAdapter,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
)
from xarray.core.pycompat import dask_array_type
from xarray.core.utils import NDArrayMixin
from xarray.core.variable import as_compatible_data, as_variable
from xarray.tests import requires_bottleneck
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
raises_regex,
requires_dask,
requires_sparse,
source_ndarray,
)
_PAD_XR_NP_ARGS = [
[{"x": (2, 1)}, ((2, 1), (0, 0), (0, 0))],
[{"x": 1}, ((1, 1), (0, 0), (0, 0))],
[{"y": (0, 3)}, ((0, 0), (0, 3), (0, 0))],
[{"x": (3, 1), "z": (2, 0)}, ((3, 1), (0, 0), (2, 0))],
[{"x": (3, 1), "z": 2}, ((3, 1), (0, 0), (2, 2))],
]
class VariableSubclassobjects:
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(["time"], data, {"foo": "bar"})
assert v.dims == ("time",)
assert_array_equal(v.values, data)
assert v.dtype == float
assert v.shape == (10,)
assert v.size == 10
assert v.sizes == {"time": 10}
assert v.nbytes == 80
assert v.ndim == 1
assert len(v) == 10
assert v.attrs == {"foo": "bar"}
def test_attrs(self):
v = self.cls(["time"], 0.5 * np.arange(10))
assert v.attrs == {}
attrs = {"foo": "bar"}
v.attrs = attrs
assert v.attrs == attrs
assert isinstance(v.attrs, dict)
v.attrs["foo"] = "baz"
assert v.attrs["foo"] == "baz"
def test_getitem_dict(self):
v = self.cls(["x"], np.random.randn(5))
actual = v[{"x": 0}]
expected = v[0]
assert_identical(expected, actual)
def test_getitem_1d(self):
data = np.array([0, 1, 2])
v = self.cls(["x"], data)
v_new = v[dict(x=[0, 1])]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=slice(None))]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
v_new = v[dict(x=Variable("a", [0, 1]))]
assert v_new.dims == ("a",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=1)]
assert v_new.dims == ()
assert_array_equal(v_new, data[1])
# tuple argument
v_new = v[slice(None)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
def test_getitem_1d_fancy(self):
v = self.cls(["x"], [0, 1, 2])
# 1d-variable should be indexable by multi-dimensional Variable
ind = Variable(("a", "b"), [[0, 1], [0, 1]])
v_new = v[ind]
assert v_new.dims == ("a", "b")
expected = np.array(v._data)[([0, 1], [0, 1]), ...]
assert_array_equal(v_new, expected)
# boolean indexing
ind = Variable(("x",), [True, False, True])
v_new = v[ind]
assert_identical(v[[0, 2]], v_new)
v_new = v[[True, False, True]]
assert_identical(v[[0, 2]], v_new)
with raises_regex(IndexError, "Boolean indexer should"):
ind = Variable(("a",), [True, False, True])
v[ind]
def test_getitem_with_mask(self):
v = self.cls(["x"], [0, 1, 2])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([0, -1, 1]), self.cls(["x"], [0, np.nan, 1])
)
assert_identical(v._getitem_with_mask(slice(2)), self.cls(["x"], [0, 1]))
assert_identical(
v._getitem_with_mask([0, -1, 1], fill_value=-99),
self.cls(["x"], [0, -99, 1]),
)
def test_getitem_with_mask_size_zero(self):
v = self.cls(["x"], [])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([-1, -1, -1]),
self.cls(["x"], [np.nan, np.nan, np.nan]),
)
def test_getitem_with_mask_nd_indexer(self):
v = self.cls(["x"], [0, 1, 2])
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(v._getitem_with_mask(indexer, fill_value=-1), indexer)
def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
assert variable[0].shape == ()
assert variable[0].ndim == 0
assert variable[0].size == 1
# test identity
assert variable.equals(variable.copy())
assert variable.identical(variable.copy())
# check value is equal for both ndarray and Variable
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
np.testing.assert_equal(variable.values[0], expected_value0)
np.testing.assert_equal(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
assert type(variable.values[0]) == type(expected_value0)
assert type(variable[0].values) == type(expected_value0)
elif expected_dtype is not False:
assert variable.values[0].dtype == expected_dtype
assert variable[0].values.dtype == expected_dtype
def test_index_0d_int(self):
for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_), (np.float32(0.5), np.float32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
value = "foo"
dtype = np.dtype("U3")
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(["x"], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
x = self.cls(["x"], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(["x"], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
x = self.cls(["x"], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
def test_index_0d_not_a_time(self):
d = np.datetime64("NaT", "ns")
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper:
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return "{}(item={!r})".format(type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls("x", [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls("x", listarray)
assert_array_equal(x.data, listarray)
assert_array_equal(x[0].data, listarray.squeeze())
assert_array_equal(x.squeeze().data, listarray.squeeze())
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range("2011-09-01", periods=10)
for dates in [date_range, date_range.values, date_range.to_pydatetime()]:
expected = self.cls("t", dates)
for times in [
[expected[i] for i in range(10)],
[expected[i : (i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)],
]:
actual = Variable.concat(times, "t")
assert expected.dtype == actual.dtype
assert_array_equal(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls("time", pd.date_range("2000-01-01", periods=5))
expected = np.datetime64("2000-01-01", "ns")
assert x[0].values == expected
def test_datetime64_conversion(self):
times = pd.date_range("2000-01-01", periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("datetime64[s]"), False),
(times.to_pydatetime(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("datetime64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("timedelta64[s]"), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("timedelta64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls("x", data)
assert actual.dtype == data.dtype
def test_pandas_data(self):
v = self.cls(["x"], pd.Series([0, 1, 2], index=[3, 2, 1]))
assert_identical(v, v[[0, 1, 2]])
v = self.cls(["x"], pd.Index([0, 1, 2]))
assert v[0].values == v.values[0]
def test_pandas_period_index(self):
v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="B"))
v = v.load() # for dask-based Variable
assert v[0] == pd.Period("2000", freq="B")
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(["x"], x)
base_v = v.to_base_variable()
# unary ops
assert_identical(base_v, +v)
assert_identical(base_v, abs(v))
assert_array_equal((-v).values, -x)
# binary ops with numbers
assert_identical(base_v, v + 0)
assert_identical(base_v, 0 + v)
assert_identical(base_v, v * 1)
# binary ops with numpy arrays
assert_array_equal((v * x).values, x ** 2)
assert_array_equal((x * v).values, x ** 2)
assert_array_equal(v - y, v - 1)
assert_array_equal(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(["x"], x, {"units": "meters"})
assert_identical(base_v, +v2)
# binary ops with all variables
assert_array_equal(v + v, 2 * v)
w = self.cls(["x"], y, {"foo": "bar"})
assert_identical(v + w, self.cls(["x"], x + y).to_base_variable())
assert_array_equal((v * w).values, x * y)
# something complicated
assert_array_equal((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
assert float == (+v).dtype
assert float == (+v).values.dtype
assert float == (0 + v).dtype
assert float == (0 + v).values.dtype
# check types of returned data
assert isinstance(+v, Variable)
assert not isinstance(+v, IndexVariable)
assert isinstance(0 + v, Variable)
assert not isinstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(["x"], x)
actual = v.sum()
expected = Variable((), 10)
assert_identical(expected, actual)
assert type(actual) is Variable
def test_array_interface(self):
x = np.arange(5)
v = self.cls(["x"], x)
assert_array_equal(np.asarray(v), x)
# test patched in methods
assert_array_equal(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
assert_identical(v.argsort(), v.to_base_variable())
assert_identical(v.clip(2, 3), self.cls("x", x.clip(2, 3)).to_base_variable())
# test ufuncs
assert_identical(np.sin(v), self.cls(["x"], np.sin(x)).to_base_variable())
assert isinstance(np.sin(v), Variable)
assert not isinstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [
range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range("2000-01-01", periods=3),
np.array(["a", "b", "c"], dtype=object),
]:
yield (self.cls("x", data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
assert_array_equal(v.values, np.asarray(data))
assert_array_equal(np.asarray(v), np.asarray(data))
assert v[0].values == np.asarray(data)[0]
assert np.asarray(v[0]) == np.asarray(data)[0]
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
assert v.equals(v2)
assert v.identical(v2)
assert v.no_conflicts(v2)
assert v[0].equals(v2[0])
assert v[0].identical(v2[0])
assert v[0].no_conflicts(v2[0])
assert v[:2].equals(v2[:2])
assert v[:2].identical(v2[:2])
assert v[:2].no_conflicts(v2[:2])
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable("x", 3 * [False])
for v, _ in self.example_1d_objects():
actual = "z" == v
assert_identical(expected, actual)
actual = ~("z" != v)
assert_identical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls("x", range(3), {"foo": 1}, {"bar": 2})
for actual in [
expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({"x": 3}),
expected.copy(deep=True),
expected.copy(deep=False),
]:
assert_identical(expected.to_base_variable(), actual.to_base_variable())
assert expected.encoding == actual.encoding
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(["a"], x)
w = self.cls(["a"], y)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat([v, w], "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
with raises_regex(ValueError, "Variable has dimensions"):
Variable.concat([v, Variable(["c"], y)], "b")
# test indexers
actual = Variable.concat(
[v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim="a"
)
expected = Variable("a", np.array([x, y]).ravel(order="F"))
assert_identical(expected, actual)
# test concatenating along a dimension
v = Variable(["time", "x"], np.random.random((10, 8)))
assert_identical(v, Variable.concat([v[:5], v[5:]], "time"))
assert_identical(v, Variable.concat([v[:5], v[5:6], v[6:]], "time"))
assert_identical(v, Variable.concat([v[:1], v[1:]], "time"))
# test dimension order
assert_identical(v, Variable.concat([v[:, :5], v[:, 5:]], "x"))
with raises_regex(ValueError, "all input arrays must have"):
Variable.concat([v[:, 0], v[:, 1:]], "x")
def test_concat_attrs(self):
# always keep attrs from first variable
v = self.cls("a", np.arange(5), {"foo": "bar"})
w = self.cls("a", np.ones(5))
expected = self.cls(
"a", np.concatenate([np.arange(5), np.ones(5)])
).to_base_variable()
expected.attrs["foo"] = "bar"
assert_identical(expected, Variable.concat([v, w], "a"))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ["S", "U"]:
x = self.cls("animal", np.array(["horse"], dtype=kind))
y = self.cls("animal", np.array(["aardvark"], dtype=kind))
actual = Variable.concat([x, y], "animal")
expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind))
assert_equal(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls("x", ["0", "1", "2"])
b = self.cls("x", ["3", "4"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.arange(5).astype(str))
assert_identical(expected, actual)
assert actual.dtype.kind == expected.dtype.kind
def test_concat_mixed_dtypes(self):
a = self.cls("x", [0, 1])
b = self.cls("x", ["two"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.array([0, 1, "two"], dtype=object))
assert_identical(expected, actual)
assert actual.dtype == object
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize("astype", [float, int, str])
def test_copy(self, deep, astype):
v = self.cls("x", (0.5 * np.arange(10)).astype(astype), {"foo": "bar"})
w = v.copy(deep=deep)
assert type(v) is type(w)
assert_identical(v, w)
assert v.dtype == w.dtype
if self.cls is Variable:
if deep:
assert source_ndarray(v.values) is not source_ndarray(w.values)
else:
assert source_ndarray(v.values) is source_ndarray(w.values)
assert_identical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
v = self.cls("x", midx)
for deep in [True, False]:
w = v.copy(deep=deep)
assert isinstance(w._data, PandasIndexAdapter)
assert isinstance(w.to_index(), pd.MultiIndex)
assert_array_equal(v._data.array, w._data.array)
def test_copy_with_data(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = np.array([[2.5, 5.0], [7.1, 43]])
actual = orig.copy(data=new_data)
expected = orig.copy()
expected.data = new_data
assert_identical(expected, actual)
def test_copy_with_data_errors(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = [2.5, 5.0]
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
def test_copy_index_with_data(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 10)
actual = orig.copy(data=new_data)
expected = IndexVariable("x", np.arange(5, 10))
assert_identical(expected, actual)
def test_copy_index_with_data_errors(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 20)
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
with raises_regex(ValueError, "Cannot assign to the .data"):
orig.data = new_data
with raises_regex(ValueError, "Cannot assign to the .values"):
orig.values = new_data
def test_replace(self):
var = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
result = var._replace()
assert_identical(result, var)
new_data = np.arange(4).reshape(2, 2)
result = var._replace(data=new_data)
assert_array_equal(result.data, new_data)
def test_real_and_imag(self):
v = self.cls("x", np.arange(3) - 1j * np.arange(3), {"foo": "bar"})
expected_re = self.cls("x", np.arange(3), {"foo": "bar"})
assert_identical(v.real, expected_re)
expected_im = self.cls("x", -np.arange(3), {"foo": "bar"})
assert_identical(v.imag, expected_im)
expected_abs = self.cls("x", np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
assert_allclose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls("x", [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
assert_allclose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype="int64"))
v = self.cls("x", data)
print(v) # should not error
assert v.dtype == "int64"
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(
start="2000-01-01",
tz=pytz.timezone("America/New_York"),
periods=10,
freq="1h",
)
v = self.cls("x", data)
print(v) # should not error
if "America/New_York" in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == "object"
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list("abc"), [0, 1]])
v = self.cls("x", idx)
assert_identical(Variable((), ("a", 0)), v[0])
assert_identical(v, v[:])
def test_load(self):
array = self.cls("x", np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
if array.chunks is None:
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
assert_identical(array, copied)
def test_getitem_advanced(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
# orthogonal indexing
v_new = v[([0, 1], [1, 0])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]][:, [1, 0]])
v_new = v[[0, 1]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]])
# with mixed arguments
ind = Variable(["a"], [0, 1])
v_new = v[dict(x=[0, 1], y=ind)]
assert v_new.dims == ("x", "a")
assert_array_equal(v_new, v_data[[0, 1]][:, [0, 1]])
# boolean indexing
v_new = v[dict(x=[True, False], y=[False, True, False])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[0][1])
# with scalar variable
ind = Variable((), 2)
v_new = v[dict(y=ind)]
expected = v[dict(y=2)]
assert_array_equal(v_new, expected)
# with boolean variable with wrong shape
ind = np.array([True, False])
with raises_regex(IndexError, "Boolean array size 2 is "):
v[Variable(("a", "b"), [[0, 1]]), ind]
# boolean indexing with different dimension
ind = Variable(["a"], [True, False, False])
with raises_regex(IndexError, "Boolean indexer should be"):
v[dict(y=ind)]
def test_getitem_uint_1d(self):
# regression test for #1405
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[0])
def test_getitem_uint(self):
# regression test for #1405
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.uint64(0)]
assert_array_equal(v_new, v_data[0, :])
def test_getitem_0d_array(self):
# make sure 0d-np.array can be used as an indexer
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])[0]]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array(0)]
assert_array_equal(v_new, v_data[0])
v_new = v[Variable((), np.array(0))]
assert_array_equal(v_new, v_data[0])
def test_getitem_fancy(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
ind = Variable(["a", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
# It would be ok if indexed with the multi-dimensional array including
# the same name
ind = Variable(["x", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("x", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
ind = Variable(["a", "b"], [[0, 1, 2], [2, 1, 0]])
v_new = v[dict(y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, ([0, 1, 2], [2, 1, 0])])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=[1, 0], y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[[1, 0]][:, ind])
# along diagonal
ind = Variable(["a"], [0, 1])
v_new = v[ind, ind]
assert v_new.dims == ("a",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with integer
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=0, y=ind)]
assert v_new.dims == ("a", "b")
assert_array_equal(v_new[0], v_data[0][[0, 0]])
assert_array_equal(v_new[1], v_data[0][[1, 1]])
# with slice
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=slice(None), y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, [[0, 0], [1, 1]]])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], :])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None, 1))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], slice(None, 1)])
# slice matches explicit dimension
ind = Variable(["y"], [0, 1])
v_new = v[ind, :2]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with multiple slices
v = self.cls(["x", "y", "z"], [[[1, 2, 3], [4, 5, 6]]])
ind = Variable(["a", "b"], [[0]])
v_new = v[ind, :, :]
expected = Variable(["a", "b", "y", "z"], v.data[np.newaxis, ...])
assert_identical(v_new, expected)
v = Variable(["w", "x", "y", "z"], [[[[1, 2, 3], [4, 5, 6]]]])
ind = Variable(["y"], [0])
v_new = v[ind, :, 1:2, 2]
expected = Variable(["y", "x"], [[6]])
assert_identical(v_new, expected)
# slice and vector mixed indexing resulting in the same dimension
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1, 2])
v_new = v[:, ind]
expected = Variable(("x", "z"), np.zeros((3, 5)))
expected[0] = v.data[0, 0]
expected[1] = v.data[1, 1]
expected[2] = v.data[2, 2]
assert_identical(v_new, expected)
v_new = v[:, ind.data]
assert v_new.shape == (3, 3, 5)
def test_getitem_error(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
with raises_regex(IndexError, "labeled multi-"):
v[[[0, 1], [1, 2]]]
ind_x = Variable(["a"], [0, 1, 1])
ind_y = Variable(["a"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers "):
v[ind_x, ind_y]
ind = Variable(["a", "b"], [[True, False], [False, True]])
with raises_regex(IndexError, "2-dimensional boolean"):
v[dict(x=ind)]
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers mis"):
v[:, ind]
@pytest.mark.parametrize(
"mode",
[
"mean",
pytest.param(
"median",
marks=pytest.mark.xfail(reason="median is not implemented by Dask"),
),
pytest.param(
"reflect", marks=pytest.mark.xfail(reason="dask.array.pad bug")
),
"edge",
pytest.param(
"linear_ramp",
marks=pytest.mark.xfail(
reason="pint bug: https://github.com/hgrecco/pint/issues/1026"
),
),
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode=mode, **xr_arg)
expected = np.pad(data, np_arg, mode=mode)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(**xr_arg)
expected = np.pad(
np.array(v.data.astype(float)),
np_arg,
mode="constant",
constant_values=np.nan,
)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
# for the boolean array, we pad False
data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode="constant", constant_values=False, **xr_arg)
expected = np.pad(
np.array(v.data), np_arg, mode="constant", constant_values=False
)
assert_array_equal(actual, expected)
def test_rolling_window(self):
# Just a working test. See test_nputils for the algorithm validation
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for (d, w) in [("x", 3), ("y", 5)]:
v_rolling = v.rolling_window(d, w, d + "_window")
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
v_rolling = v.rolling_window(d, w, d + "_window", center=True)
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
# dask and numpy result should be the same
v_loaded = v.load().rolling_window(d, w, d + "_window", center=True)
assert_array_equal(v_rolling, v_loaded)
# numpy backend should not be over-written
if isinstance(v._data, np.ndarray):
with pytest.raises(ValueError):
v_loaded[0] = 1.0
class TestVariable(VariableSubclassobjects):
cls = staticmethod(Variable)
@pytest.fixture(autouse=True)
def setup(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(["time", "x"], self.d)
assert_array_equal(v.data, self.d)
assert_array_equal(v.values, self.d)
assert source_ndarray(v.values) is self.d
with pytest.raises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
assert source_ndarray(v.values) is d2
d3 = np.random.random((10, 3))
v.data = d3
assert source_ndarray(v.data) is d3
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
assert v.item() == 0
assert type(v.item()) is float
v = IndexVariable("x", np.arange(5))
assert 2 == v.searchsorted(2)
def test_datetime64_conversion_scalar(self):
expected = np.datetime64("2000-01-01", "ns")
for values in [
np.datetime64("2000-01-01"),
pd.Timestamp("2000-01-01T00"),
datetime(2000, 1, 1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("datetime64[ns]")
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, "ns")
for values in [
np.timedelta64(1, "D"),
pd.Timedelta("1 day"),
timedelta(days=1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("timedelta64[ns]")
def test_0d_str(self):
v = Variable([], "foo")
assert v.dtype == np.dtype("U3")
assert v.values == "foo"
v = Variable([], np.string_("foo"))
assert v.dtype == np.dtype("S3")
assert v.values == bytes("foo", "ascii")
def test_0d_datetime(self):
v = Variable([], pd.Timestamp("2000-01-01"))
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == np.datetime64("2000-01-01", "ns")
def test_0d_timedelta(self):
for td in [pd.to_timedelta("1s"), np.timedelta64(1, "s")]:
v = Variable([], td)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == np.timedelta64(10 ** 9, "ns")
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
v2 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
assert v1.equals(v2)
assert v1.identical(v2)
v3 = Variable(("dim1", "dim3"), data=d)
assert not v1.equals(v3)
v4 = Variable(("dim1", "dim2"), data=d)
assert v1.equals(v4)
assert not v1.identical(v4)
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
assert not v1.equals(v5)
assert not v1.equals(None)
assert not v1.equals(d)
assert not v1.identical(None)
assert not v1.identical(d)
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(("x"), [np.nan, np.nan])
assert v1.broadcast_equals(v2)
assert not v1.equals(v2)
assert not v1.identical(v2)
v3 = Variable(("x"), [np.nan])
assert v1.broadcast_equals(v3)
assert not v1.equals(v3)
assert not v1.identical(v3)
assert not v1.broadcast_equals(None)
v4 = Variable(("x"), [np.nan] * 3)
assert not v2.broadcast_equals(v4)
def test_no_conflicts(self):
v1 = Variable(("x"), [1, 2, np.nan, np.nan])
v2 = Variable(("x"), [np.nan, 2, 3, np.nan])
assert v1.no_conflicts(v2)
assert not v1.equals(v2)
assert not v1.broadcast_equals(v2)
assert not v1.identical(v2)
assert not v1.no_conflicts(None)
v3 = Variable(("y"), [np.nan, 2, 3, np.nan])
assert not v3.no_conflicts(v1)
d = np.array([1, 2, np.nan, np.nan])
assert not v1.no_conflicts(d)
assert not v2.no_conflicts(d)
v4 = Variable(("w", "x"), [d])
assert v1.no_conflicts(v4)
def test_as_variable(self):
data = np.arange(10)
expected = Variable("x", data)
expected_extra = Variable(
"x", data, attrs={"myattr": "val"}, encoding={"scale_factor": 1}
)
assert_identical(expected, as_variable(expected))
ds = Dataset({"x": expected})
var = as_variable(ds["x"]).to_base_variable()
assert_identical(expected, var)
assert not isinstance(ds["x"], Variable)
assert isinstance(as_variable(ds["x"]), Variable)
xarray_tuple = (
expected_extra.dims,
expected_extra.values,
expected_extra.attrs,
expected_extra.encoding,
)
assert_identical(expected_extra, as_variable(xarray_tuple))
with raises_regex(TypeError, "tuple of form"):
as_variable(tuple(data))
with raises_regex(ValueError, "tuple of form"): # GH1016
as_variable(("five", "six", "seven"))
with raises_regex(TypeError, "without an explicit list of dimensions"):
as_variable(data)
actual = as_variable(data, name="x")
assert_identical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
assert_identical(expected, actual)
data = np.arange(9).reshape((3, 3))
expected = Variable(("x", "y"), data)
with raises_regex(ValueError, "without explicit dimension names"):
as_variable(data, name="x")
with raises_regex(ValueError, "has more than 1-dimension"):
as_variable(expected, name="x")
# test datetime, timedelta conversion
dt = np.array([datetime(1999, 1, 1) + timedelta(days=x) for x in range(10)])
assert as_variable(dt, "time").dtype.kind == "M"
td = np.array([timedelta(days=x) for x in range(10)])
assert as_variable(td, "time").dtype.kind == "m"
def test_repr(self):
v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
expected = dedent(
"""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
"""
).strip()
assert expected == repr(v)
def test_repr_lazy_data(self):
v = Variable("x", LazilyOuterIndexedArray(np.arange(2e5)))
assert "200000 values with dtype" in repr(v)
assert isinstance(v._data, LazilyOuterIndexedArray)
def test_detect_indexer_type(self):
""" Tests indexer type was correctly detected. """
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
_, ind, _ = v._broadcast_indexes((0, 1))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, slice(0, 8, 2)))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, [0, 1]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], 1))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], [1, 2]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("y",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, vind))
assert type(ind) == indexing.VectorizedIndexer
vind = Variable(("a", "b"), [[0, 2], [1, 3]])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.VectorizedIndexer
def test_indexer_type(self):
# GH:issue:1688. Wrong indexer type induces NotImplementedError
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
def assert_indexer_type(key, object_type):
dims, index_tuple, new_order = v._broadcast_indexes(key)
assert isinstance(index_tuple, object_type)
# should return BasicIndexer
assert_indexer_type((0, 1), BasicIndexer)
assert_indexer_type((0, slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), (Variable([], 6))), BasicIndexer)
# should return OuterIndexer
assert_indexer_type(([0, 1], 1), OuterIndexer)
assert_indexer_type(([0, 1], [1, 2]), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), 1), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), slice(None, None)), OuterIndexer)
assert_indexer_type(
(Variable(("x"), [0, 1]), Variable(("y"), [0, 1])), OuterIndexer
)
# should return VectorizedIndexer
assert_indexer_type((Variable(("y"), [0, 1]), [0, 1]), VectorizedIndexer)
assert_indexer_type(
(Variable(("z"), [0, 1]), Variable(("z"), [0, 1])), VectorizedIndexer
)
assert_indexer_type(
(
Variable(("a", "b"), [[0, 1], [1, 2]]),
Variable(("a", "b"), [[0, 1], [1, 2]]),
),
VectorizedIndexer,
)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
# test slicing
assert_identical(v, v[:])
assert_identical(v, v[...])
assert_identical(Variable(["y"], data[0]), v[0])
assert_identical(Variable(["x"], data[:, 0]), v[:, 0])
assert_identical(Variable(["x", "y"], data[:3, :2]), v[:3, :2])
# test array indexing
x = Variable(["x"], np.arange(10))
y = Variable(["y"], np.arange(11))
assert_identical(v, v[x.values])
assert_identical(v, v[x])
assert_identical(v[:3], v[x < 3])
assert_identical(v[:, 3:], v[:, y >= 3])
assert_identical(v[:3, 3:], v[x < 3, y >= 3])
assert_identical(v[:3, :2], v[x[:3], y[:2]])
assert_identical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
assert_identical(Variable(["y"], data[n]), item)
with raises_regex(TypeError, "iteration over a 0-d"):
iter(Variable([], 0))
# test setting
v.values[:] = 0
assert np.all(v.values == 0)
# test orthogonal setting
v[range(10), range(11)] = 1
assert_array_equal(v.values, np.ones((10, 11)))
def test_getitem_basic(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
# int argument
v_new = v[0]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
# slice argument
v_new = v[:2]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[:2])
# list arguments
v_new = v[[0]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[0]])
v_new = v[[]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[]])
# dict arguments
v_new = v[dict(x=0)]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=slice(None))]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=1)]
assert v_new.dims == ()
assert_array_equal(v_new, v._data[0, 1])
v_new = v[dict(y=1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# tuple argument
v_new = v[(slice(None), 1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# test that we obtain a modifiable view when taking a 0d slice
v_new = v[0, 0]
v_new[...] += 99
assert_array_equal(v_new, v._data[0, 0])
def test_getitem_with_mask_2d_input(self):
v = Variable(("x", "y"), [[0, 1, 2], [3, 4, 5]])
assert_identical(
v._getitem_with_mask(([-1, 0], [1, -1])),
Variable(("x", "y"), [[np.nan, np.nan], [1, np.nan]]),
)
assert_identical(v._getitem_with_mask((slice(2), [0, 1, 2])), v)
def test_isel(self):
v = Variable(["time", "x"], self.d)
assert_identical(v.isel(time=slice(None)), v)
assert_identical(v.isel(time=0), v[0])
assert_identical(v.isel(time=slice(0, 3)), v[:3])
assert_identical(v.isel(x=0), v[:, 0])
assert_identical(v.isel(x=[0, 2]), v[:, [0, 2]])
assert_identical(v.isel(time=[]), v[[]])
with raises_regex(
ValueError,
r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0)
with pytest.warns(
UserWarning,
match=r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0, missing_dims="warn")
assert_identical(v, v.isel(not_a_dim=0, missing_dims="ignore"))
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_("asdf"))
assert_identical(v[()], v)
v = Variable([], np.unicode_("asdf"))
assert_identical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(("x"), ["tmax"])[0][()]
expected = Variable((), "tmax")
assert_identical(actual, expected)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_shift(self, fill_value):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.shift(x=0))
assert v is not v.shift(x=0)
expected = Variable("x", [np.nan, np.nan, 1, 2, 3])
assert_identical(expected, v.shift(x=2))
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_exp = np.nan
else:
fill_value_exp = fill_value
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4])
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
expected = Variable("x", [2, 3, 4, 5, fill_value_exp])
assert_identical(expected, v.shift(x=-1, fill_value=fill_value))
expected = Variable("x", [fill_value_exp] * 5)
assert_identical(expected, v.shift(x=5, fill_value=fill_value))
assert_identical(expected, v.shift(x=6, fill_value=fill_value))
with raises_regex(ValueError, "dimension"):
v.shift(z=0)
v = Variable("x", [1, 2, 3, 4, 5], {"foo": "bar"})
assert_identical(v, v.shift(x=0))
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4], {"foo": "bar"})
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
def test_shift2d(self):
v = Variable(("x", "y"), [[1, 2], [3, 4]])
expected = Variable(("x", "y"), [[np.nan, np.nan], [np.nan, 1]])
assert_identical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.roll(x=0))
assert v is not v.roll(x=0)
expected = Variable("x", [5, 1, 2, 3, 4])
assert_identical(expected, v.roll(x=1))
assert_identical(expected, v.roll(x=-4))
assert_identical(expected, v.roll(x=6))
expected = Variable("x", [4, 5, 1, 2, 3])
assert_identical(expected, v.roll(x=2))
assert_identical(expected, v.roll(x=-3))
with raises_regex(ValueError, "dimension"):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(("x", "y"), np.random.randn(5, 6))
for axis, dim in [(0, "x"), (1, "y")]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
assert_array_equal(expected, actual)
def test_transpose(self):
v = Variable(["time", "x"], self.d)
v2 = Variable(["x", "time"], self.d.T)
assert_identical(v, v2.transpose())
assert_identical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(["a", "b", "c", "d"], x)
w2 = Variable(["d", "b", "c", "a"], np.einsum("abcd->dbca", x))
assert w2.shape == (5, 3, 4, 2)
assert_identical(w2, w.transpose("d", "b", "c", "a"))
assert_identical(w2, w.transpose("d", ..., "a"))
assert_identical(w2, w.transpose("d", "b", "c", ...))
assert_identical(w2, w.transpose(..., "b", "c", "a"))
assert_identical(w, w2.transpose("a", "b", "c", "d"))
w3 = Variable(["b", "c", "d", "a"], np.einsum("abcd->bcda", x))
assert_identical(w, w3.transpose("a", "b", "c", "d"))
def test_transpose_0d(self):
for value in [
3.5,
("a", 1),
np.datetime64("2000-01-01"),
np.timedelta64(1, "h"),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(["x", "y"], [[1]])
assert_identical(Variable([], 1), v.squeeze())
assert_identical(Variable(["y"], [1]), v.squeeze("x"))
assert_identical(Variable(["y"], [1]), v.squeeze(["x"]))
assert_identical(Variable(["x"], [1]), v.squeeze("y"))
assert_identical(Variable([], 1), v.squeeze(["x", "y"]))
v = Variable(["x", "y"], [[1, 2]])
assert_identical(Variable(["y"], [1, 2]), v.squeeze())
assert_identical(Variable(["y"], [1, 2]), v.squeeze("x"))
with raises_regex(ValueError, "cannot select a dimension"):
v.squeeze("y")
def test_get_axis_num(self):
v = Variable(["x", "y", "z"], np.random.randn(2, 3, 4))
assert v.get_axis_num("x") == 0
assert v.get_axis_num(["x"]) == (0,)
assert v.get_axis_num(["x", "y"]) == (0, 1)
assert v.get_axis_num(["z", "y", "x"]) == (2, 1, 0)
with raises_regex(ValueError, "not found in array dim"):
v.get_axis_num("foobar")
def test_set_dims(self):
v = Variable(["x"], [0, 1])
actual = v.set_dims(["x", "y"])
expected = Variable(["x", "y"], [[0], [1]])
assert_identical(actual, expected)
actual = v.set_dims(["y", "x"])
assert_identical(actual, expected.T)
actual = v.set_dims({"x": 2, "y": 2})
expected = Variable(["x", "y"], [[0, 0], [1, 1]])
assert_identical(actual, expected)
v = Variable(["foo"], [0, 1])
actual = v.set_dims("foo")
expected = v
assert_identical(actual, expected)
with raises_regex(ValueError, "must be a superset"):
v.set_dims(["z"])
def test_set_dims_object_dtype(self):
v = Variable([], ("a", 1))
actual = v.set_dims(("x",), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ("a", 1)
expected = Variable(["x"], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
actual = v.stack(z=("x", "y"))
expected = Variable("z", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=("x",))
expected = Variable(("y", "z"), v.data.T, v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=())
assert_identical(actual, v)
actual = v.stack(X=("x",), Y=("y",)).transpose("X", "Y")
expected = Variable(("X", "Y"), v.data, v.attrs)
assert_identical(actual, expected)
def test_stack_errors(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
with raises_regex(ValueError, "invalid existing dim"):
v.stack(z=("x1",))
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(x=("x",))
def test_unstack(self):
v = Variable("z", [0, 1, 2, 3], {"foo": "bar"})
actual = v.unstack(z={"x": 2, "y": 2})
expected = Variable(("x", "y"), [[0, 1], [2, 3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4, "y": 1})
expected = Variable(("x", "y"), [[0], [1], [2], [3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4})
expected = Variable("x", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
def test_unstack_errors(self):
v = Variable("z", [0, 1, 2, 3])
with raises_regex(ValueError, "invalid existing dim"):
v.unstack(foo={"x": 4})
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(z=("z",))
with raises_regex(ValueError, "the product of the new dim"):
v.unstack(z={"x": 5})
def test_unstack_2d(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.unstack(y={"z": 2})
expected = Variable(["x", "z"], v.data)
assert_identical(actual, expected)
actual = v.unstack(x={"z": 2})
expected = Variable(["y", "z"], v.data.T)
assert_identical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.stack(z=("x", "y")).unstack(z={"x": 2, "y": 2})
assert_identical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(["a", "b"], x)
# 1d to 2d broadcasting
assert_identical(v * v, Variable(["a", "b"], np.einsum("ab,ab->ab", x, x)))
assert_identical(v * v[0], Variable(["a", "b"], np.einsum("ab,b->ab", x, x[0])))
assert_identical(v[0] * v, Variable(["b", "a"], np.einsum("b,ab->ba", x[0], x)))
assert_identical(
v[0] * v[:, 0], Variable(["b", "a"], np.einsum("b,a->ba", x[0], x[:, 0]))
)
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(["b", "c", "d"], y)
assert_identical(
v * w, Variable(["a", "b", "c", "d"], np.einsum("ab,bcd->abcd", x, y))
)
assert_identical(
w * v, Variable(["b", "c", "d", "a"], np.einsum("bcd,ab->bcda", y, x))
)
assert_identical(
v * w[0], Variable(["a", "b", "c", "d"], np.einsum("ab,cd->abcd", x, y[0]))
)
def test_broadcasting_failures(self):
a = Variable(["x"], np.arange(10))
b = Variable(["x"], np.arange(5))
c = Variable(["x", "x"], np.arange(100).reshape(10, 10))
with raises_regex(ValueError, "mismatched lengths"):
a + b
with raises_regex(ValueError, "duplicate dimensions"):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(["x"], x)
v2 = v
v2 += 1
assert v is v2
# since we provided an ndarray for data, it is also modified in-place
assert source_ndarray(v.values) is x
assert_array_equal(v.values, np.arange(5) + 1)
with raises_regex(ValueError, "dimensions cannot change"):
v += Variable("y", np.arange(5))
def test_reduce(self):
v = Variable(["x", "y"], self.d, {"ignored": "attributes"})
assert_identical(v.reduce(np.std, "x"), Variable(["y"], self.d.std(axis=0)))
assert_identical(v.reduce(np.std, axis=0), v.reduce(np.std, dim="x"))
assert_identical(
v.reduce(np.std, ["y", "x"]), Variable([], self.d.std(axis=(0, 1)))
)
assert_identical(v.reduce(np.std), Variable([], self.d.std()))
assert_identical(
v.reduce(np.mean, "x").reduce(np.std, "y"),
Variable([], self.d.mean(axis=0).std()),
)
assert_allclose(v.mean("x"), v.reduce(np.mean, "x"))
with raises_regex(ValueError, "cannot supply both"):
v.mean(dim="x", axis=0)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=True)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=False)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize(
"axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]])
)
def test_quantile(self, q, axis, dim, skipna):
v = Variable(["x", "y"], self.d)
actual = v.quantile(q, dim=dim, skipna=skipna)
_percentile_func = np.nanpercentile if skipna else np.percentile
expected = _percentile_func(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize("axis, dim", [[1, "y"], [[1], ["y"]]])
def test_quantile_dask(self, q, axis, dim):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
actual = v.quantile(q, dim=dim)
assert isinstance(actual.data, dask_array_type)
expected = np.nanpercentile(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
def test_quantile_chunked_dim_error(self):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
with raises_regex(ValueError, "dimension 'x'"):
v.quantile(0.5, dim="x")
@pytest.mark.parametrize("q", [-0.1, 1.1, [2], [0.25, 2]])
def test_quantile_out_of_bounds(self, q):
v = Variable(["x", "y"], self.d)
# escape special characters
with raises_regex(ValueError, r"Quantiles must be in the range \[0, 1\]"):
v.quantile(q, dim="x")
@requires_dask
@requires_bottleneck
def test_rank_dask_raises(self):
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0]).chunk(2)
with raises_regex(TypeError, "arrays stored as dask"):
v.rank("x")
@requires_bottleneck
def test_rank(self):
import bottleneck as bn
# floats
v = Variable(["x", "y"], [[3, 4, np.nan, 1]])
expect_0 = bn.nanrankdata(v.data, axis=0)
expect_1 = bn.nanrankdata(v.data, axis=1)
np.testing.assert_allclose(v.rank("x").values, expect_0)
np.testing.assert_allclose(v.rank("y").values, expect_1)
# int
v = Variable(["x"], [3, 2, 1])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# str
v = Variable(["x"], ["c", "b", "a"])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# pct
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0])
v_expect = Variable(["x"], [0.75, 0.25, np.nan, 0.5, 1.0])
assert_equal(v.rank("x", pct=True), v_expect)
# invalid dim
with raises_regex(ValueError, "not found"):
v.rank("y")
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype=">f4")
v = Variable(["x"], data)
expected = Variable([], 5)
assert_identical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable("x", np.array([1, np.nan, 2, 3]))
assert_identical(v.mean(), Variable([], 2))
assert_identical(v.mean(skipna=True), Variable([], 2))
assert_identical(v.mean(skipna=False), Variable([], np.nan))
assert_identical(np.mean(v), Variable([], 2))
assert_identical(v.prod(), Variable([], 6))
assert_identical(v.cumsum(axis=0), Variable("x", np.array([1, 1, 3, 6])))
assert_identical(v.cumprod(axis=0), Variable("x", np.array([1, 1, 2, 6])))
assert_identical(v.var(), Variable([], 2.0 / 3))
assert_identical(v.median(), Variable([], 2))
v = Variable("x", [True, False, False])
assert_identical(v.any(), Variable([], True))
assert_identical(v.all(dim="x"), Variable([], False))
v = Variable("t", pd.date_range("2000-01-01", periods=3))
assert v.argmax(skipna=True) == 2
assert_identical(v.max(), Variable([], pd.Timestamp("2000-01-03")))
def test_reduce_keepdims(self):
v = Variable(["x", "y"], self.d)
assert_identical(
v.mean(keepdims=True), Variable(v.dims, np.mean(self.d, keepdims=True))
)
assert_identical(
v.mean(dim="x", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=0, keepdims=True)),
)
assert_identical(
v.mean(dim="y", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=1, keepdims=True)),
)
assert_identical(
v.mean(dim=["y", "x"], keepdims=True),
Variable(v.dims, np.mean(self.d, axis=(1, 0), keepdims=True)),
)
v = Variable([], 1.0)
assert_identical(
v.mean(keepdims=True), Variable([], np.mean(v.data, keepdims=True))
)
@requires_dask
def test_reduce_keepdims_dask(self):
import dask.array
v = Variable(["x", "y"], self.d).chunk()
actual = v.mean(keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, keepdims=True))
assert_identical(actual, expected)
actual = v.mean(dim="y", keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, axis=1, keepdims=True))
assert_identical(actual, expected)
def test_reduce_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
v = Variable(["x", "y"], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
assert len(vm.attrs) == 0
assert vm.attrs == {}
# Test kept attrs
vm = v.mean(keep_attrs=True)
assert len(vm.attrs) == len(_attrs)
assert vm.attrs == _attrs
def test_binary_ops_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
a = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
b = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
# Test dropped attrs
d = a - b # just one operation
assert d.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
d = a - b
assert d.attrs == _attrs
def test_count(self):
expected = Variable([], 3)
actual = Variable(["x"], [1, 2, 3, np.nan]).count()
assert_identical(expected, actual)
v = Variable(["x"], np.array(["1", "2", "3", np.nan], dtype=object))
actual = v.count()
assert_identical(expected, actual)
actual = Variable(["x"], [True, False, True]).count()
assert_identical(expected, actual)
assert actual.dtype == int
expected = Variable(["x"], [2, 3])
actual = Variable(["x", "y"], [[1, 0, np.nan], [1, 1, 1]]).count("y")
assert_identical(expected, actual)
def test_setitem(self):
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[0, 1] = 1
assert v[0, 1] == 1
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[0, 1])] = 1
assert_array_equal(v[[0, 1]], np.ones_like(v[[0, 1]]))
# boolean indexing
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False])] = 1
assert_array_equal(v[0], np.ones_like(v[0]))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False], y=[False, True, False])] = 1
assert v[0, 1] == 1
def test_setitem_fancy(self):
# assignment which should work as np.ndarray does
def assert_assigned_2d(array, key_x, key_y, values):
expected = array.copy()
expected[key_x, key_y] = values
v = Variable(["x", "y"], array)
v[dict(x=key_x, y=key_y)] = values
assert_array_equal(expected, v)
# 1d vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable((), 0),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=slice(None),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
# 2d-vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=[0],
)
assert_assigned_2d(
np.random.randn(5, 4),
key_x=Variable(["a", "b"], [[0, 1], [2, 3]]),
key_y=Variable(["a", "b"], [[1, 0], [3, 3]]),
values=[2, 3],
)
# vindex with slice
v = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
ind = Variable(["a"], [0, 1])
v[dict(x=ind, z=ind)] = 0
expected = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
expected[0, :, 0] = 0
expected[1, :, 1] = 0
assert_identical(expected, v)
# dimension broadcast
v = Variable(["x", "y"], np.ones((3, 2)))
ind = Variable(["a", "b"], [[0, 1]])
v[ind, :] = 0
expected = Variable(["x", "y"], [[0, 0], [0, 0], [1, 1]])
assert_identical(expected, v)
with raises_regex(ValueError, "shape mismatch"):
v[ind, ind] = np.zeros((1, 2, 1))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] = Variable(["a", "y"], np.ones((2, 3), dtype=int) * 10)
assert_array_equal(v[0], np.ones_like(v[0]) * 10)
assert_array_equal(v[1], np.ones_like(v[1]) * 10)
assert v.dims == ("x", "y") # dimension should not change
# increment
v = Variable(["x", "y"], np.arange(6).reshape(3, 2))
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[1, 2], [3, 4], [4, 5]])
assert_identical(v, expected)
ind = Variable(["a"], [0, 0])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[2, 3], [3, 4], [4, 5]])
assert_identical(v, expected)
def test_coarsen(self):
v = self.cls(["x"], [0, 1, 2, 3, 4])
actual = v.coarsen({"x": 2}, boundary="pad", func="mean")
expected = self.cls(["x"], [0.5, 2.5, 4])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func="mean", boundary="pad", side="right")
expected = self.cls(["x"], [0, 1.5, 3.5])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func=np.mean, side="right", boundary="trim")
expected = self.cls(["x"], [1.5, 3.5])
assert_identical(actual, expected)
# working test
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for windows, func, side, boundary in [
({"x": 2}, np.mean, "left", "trim"),
({"x": 2}, np.median, {"x": "left"}, "pad"),
({"x": 2, "y": 3}, np.max, "left", {"x": "pad", "y": "trim"}),
]:
v.coarsen(windows, func, boundary, side)
def test_coarsen_2d(self):
# 2d-mean should be the same with the successive 1d-mean
v = self.cls(["x", "y"], np.arange(6 * 12).reshape(6, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean")
expected = v.coarsen({"x": 3}, func="mean").coarsen({"y": 4}, func="mean")
assert_equal(actual, expected)
v = self.cls(["x", "y"], np.arange(7 * 12).reshape(7, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = v.coarsen({"x": 3}, func="mean", boundary="trim").coarsen(
{"y": 4}, func="mean", boundary="trim"
)
assert_equal(actual, expected)
# if there is nan, the two should be different
v = self.cls(["x", "y"], 1.0 * np.arange(6 * 12).reshape(6, 12))
v[2, 4] = np.nan
v[3, 5] = np.nan
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = (
v.coarsen({"x": 3}, func="sum", boundary="trim").coarsen(
{"y": 4}, func="sum", boundary="trim"
)
/ 12
)
assert not actual.equals(expected)
# adjusting the nan count
expected[0, 1] *= 12 / 11
expected[1, 1] *= 12 / 11
assert_allclose(actual, expected)
v = self.cls(("x", "y"), np.arange(4 * 4, dtype=np.float32).reshape(4, 4))
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
expected = self.cls(("x", "y"), 4 * np.ones((2, 2)))
assert_equal(actual, expected)
v[0, 0] = np.nan
v[-1, -1] = np.nan
expected[0, 0] = 3
expected[-1, -1] = 3
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=False)
expected = self.cls(("x", "y"), [[np.nan, 18], [42, np.nan]])
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=True)
expected = self.cls(("x", "y"), [[10, 18], [42, 35]])
assert_equal(actual, expected)
# perhaps @pytest.mark.parametrize("operation", [f for f in duck_array_ops])
def test_coarsen_keep_attrs(self, operation="mean"):
_attrs = {"units": "test", "long_name": "testing"}
test_func = getattr(duck_array_ops, operation, None)
# Test dropped attrs
with set_options(keep_attrs=False):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == _attrs
@requires_dask
class TestVariableWithDask(VariableSubclassobjects):
cls = staticmethod(lambda *args: Variable(*args).chunk())
@pytest.mark.xfail
def test_0d_object_array_with_list(self):
super().test_0d_object_array_with_list()
@pytest.mark.xfail
def test_array_interface(self):
# dask array does not have `argsort`
super().test_array_interface()
@pytest.mark.xfail
def test_copy_index(self):
super().test_copy_index()
@pytest.mark.xfail
def test_eq_all_dtypes(self):
super().test_eq_all_dtypes()
def test_getitem_fancy(self):
super().test_getitem_fancy()
def test_getitem_1d_fancy(self):
super().test_getitem_1d_fancy()
def test_getitem_with_mask_nd_indexer(self):
import dask.array as da
v = Variable(["x"], da.arange(3, chunks=3))
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(
v._getitem_with_mask(indexer, fill_value=-1),
self.cls(("x", "y"), [[0, -1], [-1, 2]]),
)
@requires_sparse
class TestVariableWithSparse:
# TODO inherit VariableSubclassobjects to cover more tests
def test_as_sparse(self):
data = np.arange(12).reshape(3, 4)
var = Variable(("x", "y"), data)._as_sparse(fill_value=-1)
actual = var._to_dense()
assert_identical(var, actual)
class TestIndexVariable(VariableSubclassobjects):
cls = staticmethod(IndexVariable)
def test_init(self):
with raises_regex(ValueError, "must be 1-dimensional"):
IndexVariable((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = IndexVariable(["time"], data, {"foo": "bar"})
assert pd.Index(data, name="time").identical(v.to_index())
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
v = IndexVariable(["x"], midx, {"foo": "bar"})
assert v.to_index().names == ("x_level_0", "x_level_1")
def test_data(self):
x = IndexVariable("x", np.arange(3.0))
assert isinstance(x._data, PandasIndexAdapter)
assert isinstance(x.data, np.ndarray)
assert float == x.dtype
assert_array_equal(np.arange(3), x)
assert float == x.values.dtype
with raises_regex(TypeError, "cannot be modified"):
x[:] = 0
def test_name(self):
coord = IndexVariable("x", [10.0])
assert coord.name == "x"
with pytest.raises(AttributeError):
coord.name = "y"
def test_level_names(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
assert x.level_names == midx.names
assert IndexVariable("y", [10.0]).level_names is None
def test_get_level_variable(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
level_1 = IndexVariable("x", midx.get_level_values("level_1"))
assert_identical(x.get_level_variable("level_1"), level_1)
with raises_regex(ValueError, "has no MultiIndex"):
IndexVariable("y", [10.0]).get_level_variable("level")
def test_concat_periods(self):
periods = pd.period_range("2000-01-01", periods=10)
coords = [IndexVariable("t", periods[:5]), IndexVariable("t", periods[5:])]
expected = IndexVariable("t", periods)
actual = IndexVariable.concat(coords, dim="t")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = IndexVariable.concat(coords, dim="t", positions=positions)
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ["a", "b"]])
coords = [IndexVariable("x", idx[:2]), IndexVariable("x", idx[2:])]
expected = IndexVariable("x", idx)
actual = IndexVariable.concat(coords, dim="x")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
def test_coordinate_alias(self):
with pytest.warns(Warning, match="deprecated"):
x = Coordinate("x", [1, 2, 3])
assert isinstance(x, IndexVariable)
def test_datetime64(self):
# GH:1932 Make sure indexing keeps precision
t = np.array([1518418799999986560, 1518418799999996560], dtype="datetime64[ns]")
v = IndexVariable("t", t)
assert v[0].data == t[0]
# These tests make use of multi-dimensional variables, which are not valid
# IndexVariable objects:
@pytest.mark.xfail
def test_getitem_error(self):
super().test_getitem_error()
@pytest.mark.xfail
def test_getitem_advanced(self):
super().test_getitem_advanced()
@pytest.mark.xfail
def test_getitem_fancy(self):
super().test_getitem_fancy()
@pytest.mark.xfail
def test_getitem_uint(self):
super().test_getitem_fancy()
@pytest.mark.xfail
@pytest.mark.parametrize(
"mode",
[
"mean",
"median",
"reflect",
"edge",
"linear_ramp",
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
super().test_pad(mode, xr_arg, np_arg)
@pytest.mark.xfail
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
super().test_pad_constant_values(xr_arg, np_arg)
@pytest.mark.xfail
def test_rolling_window(self):
super().test_rolling_window()
@pytest.mark.xfail
def test_coarsen_2d(self):
super().test_coarsen_2d()
class TestAsCompatibleData:
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, LazilyOuterIndexedArray)
for t in types:
for data in [
np.arange(3),
pd.date_range("2000-01-01", periods=3),
pd.date_range("2000-01-01", periods=3).values,
]:
x = t(data)
assert source_ndarray(x) is source_ndarray(as_compatible_data(x))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
assert_array_equal(np.asarray(input_array), actual)
assert np.ndarray == type(actual)
assert np.asarray(input_array).dtype == actual.dtype
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(int) == actual.dtype
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(float) == actual.dtype
def test_datetime(self):
expected = np.datetime64("2000-01-01")
actual = as_compatible_data(expected)
assert expected == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01", "ns")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
assert expected is source_ndarray(np.asarray(actual))
expected = np.datetime64("2000-01-01", "ns")
actual = as_compatible_data(datetime(2000, 1, 1))
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
def test_full_like(self):
# For more thorough tests, see test_variable.py
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
expect = orig.copy(deep=True)
expect.values = [[2.0, 2.0], [2.0, 2.0]]
assert_identical(expect, full_like(orig, 2))
# override dtype
expect.values = [[True, True], [True, True]]
assert expect.dtype == bool
assert_identical(expect, full_like(orig, True, dtype=bool))
# raise error on non-scalar fill_value
with raises_regex(ValueError, "must be scalar"):
full_like(orig, [1.0, 2.0])
@requires_dask
def test_full_like_dask(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
).chunk(((1, 1), (2,)))
def check(actual, expect_dtype, expect_values):
assert actual.dtype == expect_dtype
assert actual.shape == orig.shape
assert actual.dims == orig.dims
assert actual.attrs == orig.attrs
assert actual.chunks == orig.chunks
assert_array_equal(actual.values, expect_values)
check(full_like(orig, 2), orig.dtype, np.full_like(orig.values, 2))
# override dtype
check(
full_like(orig, True, dtype=bool),
bool,
np.full_like(orig.values, True, dtype=bool),
)
# Check that there's no array stored inside dask
# (e.g. we didn't create a numpy array and then we chunked it!)
dsk = full_like(orig, 1).data.dask
for v in dsk.values():
if isinstance(v, tuple):
for vi in v:
assert not isinstance(vi, np.ndarray)
else:
assert not isinstance(v, np.ndarray)
def test_zeros_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(zeros_like(orig), full_like(orig, 0))
assert_identical(zeros_like(orig, dtype=int), full_like(orig, 0, dtype=int))
def test_ones_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(ones_like(orig), full_like(orig, 1))
assert_identical(ones_like(orig, dtype=int), full_like(orig, 1, dtype=int))
def test_unsupported_type(self):
# Non indexable type
class CustomArray(NDArrayMixin):
def __init__(self, array):
self.array = array
class CustomIndexable(CustomArray, indexing.ExplicitlyIndexed):
pass
array = CustomArray(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, np.ndarray) # should not be CustomArray
array = CustomIndexable(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, CustomIndexable)
def test_raise_no_warning_for_nan_in_binary_ops():
with pytest.warns(None) as record:
Variable("x", [1, 2, np.NaN]) > 0
assert len(record) == 0
class TestBackendIndexing:
""" Make sure all the array wrappers can be indexed. """
@pytest.fixture(autouse=True)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def check_orthogonal_indexing(self, v):
assert np.allclose(v.isel(x=[8, 3], y=[2, 1]), self.d[[8, 3]][:, [2, 1]])
def check_vectorized_indexing(self, v):
ind_x = Variable("z", [0, 2])
ind_y = Variable("z", [2, 1])
assert np.allclose(v.isel(x=ind_x, y=ind_y), self.d[ind_x, ind_y])
def test_NumpyIndexingAdapter(self):
v = Variable(dims=("x", "y"), data=NumpyIndexingAdapter(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# could not doubly wrapping
with raises_regex(TypeError, "NumpyIndexingAdapter only wraps "):
v = Variable(
dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d))
)
def test_LazilyOuterIndexedArray(self):
v = Variable(dims=("x", "y"), data=LazilyOuterIndexedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"),
data=LazilyOuterIndexedArray(LazilyOuterIndexedArray(self.d)),
)
self.check_orthogonal_indexing(v)
# hierarchical wrapping
v = Variable(
dims=("x", "y"), data=LazilyOuterIndexedArray(NumpyIndexingAdapter(self.d))
)
self.check_orthogonal_indexing(v)
def test_CopyOnWriteArray(self):
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"), data=CopyOnWriteArray(LazilyOuterIndexedArray(self.d))
)
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
def test_MemoryCachedArray(self):
v = Variable(dims=("x", "y"), data=MemoryCachedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
@requires_dask
def test_DaskIndexingAdapter(self):
import dask.array as da
da = da.asarray(self.d)
v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(da))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(DaskIndexingAdapter(da)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
| apache-2.0 | -3,690,393,615,737,424,000 | 36.1105 | 88 | 0.53123 | false | 3.330737 | true | false | false |
bountyful/bountyfulcoins | bountyfulcoinsapp/migrations/0001_initial.py | 1 | 9626 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Link'
db.create_table(u'bountyfulcoinsapp_link', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')
(unique=True, max_length=200)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Link'])
# Adding model 'Bounty'
db.create_table(u'bountyfulcoinsapp_bounty', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')
(max_length=200)),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('link', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['bountyfulcoinsapp.Link'])),
('amount', self.gf('django.db.models.fields.DecimalField')
(default=0.0, max_digits=20, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')
(default='BTC', max_length=15)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Bounty'])
# Adding model 'Tag'
db.create_table(u'bountyfulcoinsapp_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')
(unique=True, max_length=64)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Tag'])
# Adding M2M table for field bounties on 'Tag'
m2m_table_name = db.shorten_name(u'bountyfulcoinsapp_tag_bounties')
db.create_table(m2m_table_name, (
('id', models.AutoField(
verbose_name='ID', primary_key=True, auto_created=True)),
('tag', models.ForeignKey(
orm[u'bountyfulcoinsapp.tag'], null=False)),
('bounty', models.ForeignKey(
orm[u'bountyfulcoinsapp.bounty'], null=False))
))
db.create_unique(m2m_table_name, ['tag_id', 'bounty_id'])
# Adding model 'SharedBounty'
db.create_table(u'bountyfulcoinsapp_sharedbounty', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('bounty', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['bountyfulcoinsapp.Bounty'], unique=True)),
('date', self.gf('django.db.models.fields.DateTimeField')
(auto_now_add=True, blank=True)),
('votes', self.gf(
'django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['SharedBounty'])
# Adding M2M table for field users_voted on 'SharedBounty'
m2m_table_name = db.shorten_name(
u'bountyfulcoinsapp_sharedbounty_users_voted')
db.create_table(m2m_table_name, (
('id', models.AutoField(
verbose_name='ID', primary_key=True, auto_created=True)),
('sharedbounty', models.ForeignKey(
orm[u'bountyfulcoinsapp.sharedbounty'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['sharedbounty_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'Link'
db.delete_table(u'bountyfulcoinsapp_link')
# Deleting model 'Bounty'
db.delete_table(u'bountyfulcoinsapp_bounty')
# Deleting model 'Tag'
db.delete_table(u'bountyfulcoinsapp_tag')
# Removing M2M table for field bounties on 'Tag'
db.delete_table(db.shorten_name(u'bountyfulcoinsapp_tag_bounties'))
# Deleting model 'SharedBounty'
db.delete_table(u'bountyfulcoinsapp_sharedbounty')
# Removing M2M table for field users_voted on 'SharedBounty'
db.delete_table(
db.shorten_name(u'bountyfulcoinsapp_sharedbounty_users_voted'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bountyfulcoinsapp.bounty': {
'Meta': {'object_name': 'Bounty'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '20', 'decimal_places': '2'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'BTC'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bountyfulcoinsapp.Link']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'bountyfulcoinsapp.link': {
'Meta': {'object_name': 'Link'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
u'bountyfulcoinsapp.sharedbounty': {
'Meta': {'object_name': 'SharedBounty'},
'bounty': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bountyfulcoinsapp.Bounty']", 'unique': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'users_voted': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'bountyfulcoinsapp.tag': {
'Meta': {'object_name': 'Tag'},
'bounties': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bountyfulcoinsapp.Bounty']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bountyfulcoinsapp']
| mit | -6,591,234,244,855,706,000 | 55.623529 | 195 | 0.566487 | false | 3.435403 | false | false | false |
HengeSense/website | apps/news/widgets.py | 1 | 2559 | ############################################################################
# This file is part of the Maui Web site.
#
# Copyright (c) 2012 Pier Luigi Fiorini
# Copyright (c) 2009-2010 Krzysztof Grodzicki
#
# Author(s):
# Pier Luigi Fiorini <[email protected]>
#
# $BEGIN_LICENSE:AGPL3+$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $END_LICENSE$
############################################################################
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
class TinyMCEEditor(forms.Textarea):
class Media:
js = (
"js/jquery-1.8.1.min.js",
"js/jquery.tinymce.js",
)
def __init__(self, language=None):
self.language = language or settings.LANGUAGE_CODE[:2]
super(TinyMCEEditor, self).__init__()
def render(self, name, value, attrs=None):
rendered = super(TinyMCEEditor, self).render(name, value, attrs)
context = {
"name": name,
"lang": self.language[:2],
"language": self.language,
"STATIC_URL": settings.STATIC_URL,
}
return rendered + mark_safe(render_to_string(
"admin/news/widgets/tinymce.html", context))
class WYMEditor(forms.Textarea):
class Media:
js = (
"js/jquery-1.8.1.min.js",
"cms/wymeditor/jquery.wymeditor.pack.js",
)
def __init__(self, language=None, attrs=None):
self.language = language or settings.LANGUAGE_CODE[:2]
self.attrs = {"class": "wymeditor"}
if attrs:
self.attrs.update(attrs)
super(WYMEditor, self).__init__(attrs)
def render(self, name, value, attrs=None):
rendered = super(WYMEditor, self).render(name, value, attrs)
context = {
"name": name,
"lang": self.language[:2],
"language": self.language,
"STATIC_URL": settings.STATIC_URL,
"page_link_wymeditor": 0,
"filebrowser": 0,
}
return rendered + mark_safe(render_to_string(
"admin/news/widgets/wymeditor.html", context))
| agpl-3.0 | 3,561,484,379,450,850,000 | 30.207317 | 76 | 0.664322 | false | 3.222922 | false | false | false |
karimbahgat/Pure-Python-Greiner-Hormann-Polygon-Clipping | GreinerHorman_Algo/KimKim/puremidpoints_v16(k&k,tryfixcrosschange).py | 1 | 36981 | # -*- coding: UTF-8 -*-
# Efficient Clipping of Arbitrary Polygons
#
# Copyright (c) 2011, 2012 Helder Correia <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# FINAL BEST IDEA, IMPLEMENTED NOW BUT DOESNT WORK CUS INTERSECTION STAGE NOT FINDING ALL INTERSECTIONS
# USE PREV AND NEXT MIDPOINT LOCS FOR DETERMINING ENTRY FLAG
# NORMAL RULES, EXCEPT FOR INTERSECTIONMODE TURN OFF INTERSECTIONFLAGS FOR OUT-ON-ON and ON-ON-OUT BC THEY ARE JUST TANGENT AND NOT RELATED TO INSIDES
# FINALLY WHEN TRAVERSING, AFTER COMPLETING ONE POLY, SEARCH FOR NEXT ISECT THAT IS UNCHECK IN BOTH CURRENT AND NEIGHRBOUR
"""
# Greiner-Hormann Polygon Clipping with support for degenerates
This is a fork aimed to improve Helder Correia's pure-Python Greiner-Hormann implementation for polygon clipping. Partly for educational purposes and partly for portable pure-Python clipping.
Status: Incomplete/unstable.
Fork author: Karim Bahgat <[email protected]>
-----------------------------------------------------------
# Efficient Clipping of Arbitrary Polygons
Based on the paper "Efficient Clipping of Arbitrary Polygons" by Günther
Greiner (greiner[at]informatik.uni-erlangen.de) and Kai Hormann
(hormann[at]informatik.tu-clausthal.de), ACM Transactions on Graphics
1998;17(2):71-83.
Available at: http://www.inf.usi.ch/hormann/papers/Greiner.1998.ECO.pdf
You should have received the README file along with this program.
If not, see <https://github.com/helderco/polyclip>
"""
DEBUG = False
class Vertex(object):
"""Node in a circular doubly linked list.
This class is almost exactly as described in the paper by Günther/Greiner.
"""
def __init__(self, vertex, alpha=0.0, intersect=False, entry=None, checked=False, degen=False):
if isinstance(vertex, Vertex):
vertex = (vertex.x, vertex.y)
# checked = True
self.x, self.y = vertex # point coordinates of the vertex
self.next = None # reference to the next vertex of the polygon
self.prev = None # reference to the previous vertex of the polygon
self.neighbour = None # reference to the corresponding intersection vertex in the other polygon
self.entry = entry # True if intersection is an entry point, False if exit
self.alpha = alpha # intersection point's relative distance from previous vertex
self.intersect = intersect # True if vertex is an intersection
self.checked = checked # True if the vertex has been checked (last phase)
self.couple = None
self.cross_change = None
@property
def xy(self):
return self.x, self.y
def isInside(self, poly):
if testLocation(self, poly) in ("in","on"):
return True
else: return False
def setChecked(self):
self.checked = True
if self.neighbour and not self.neighbour.checked:
self.neighbour.setChecked()
def copy(self):
copy = Vertex(self) # point coordinates of the vertex
copy.next = self.next # reference to the next vertex of the polygon
copy.prev = self.prev # reference to the previous vertex of the polygon
copy.neighbour = self.neighbour # reference to the corresponding intersection vertex in the other polygon
copy.entry = self.entry # True if intersection is an entry point, False if exit
copy.alpha = self.alpha # intersection point's relative distance from previous vertex
copy.intersect = self.intersect # True if vertex is an intersection
copy.couple = self.couple
copy.cross_change = self.cross_change
copy.checked = self.checked
return copy
def __repr__(self):
"""String representation of the vertex for debugging purposes."""
return "(%.2f, %.2f) <-> %s(%.2f, %.2f)%s <-> (%.2f, %.2f) %s" % (
self.prev.x, self.prev.y,
'i' if self.intersect else ' ',
self.x, self.y,
('e' if self.entry else 'x') if self.intersect else ' ',
self.next.x, self.next.y,
' !' if self.intersect and not self.checked else ''
)
class Polygon(object):
"""Manages a circular doubly linked list of Vertex objects that represents a polygon."""
first = None
def add(self, vertex):
"""Add a vertex object to the polygon (vertex is added at the 'end' of the list")."""
if not self.first:
self.first = vertex
self.first.next = vertex
self.first.prev = vertex
else:
next = self.first
prev = next.prev
next.prev = vertex
vertex.next = next
vertex.prev = prev
prev.next = vertex
def replace(self, old, new):
# when replacing old normal vertice with new intersection vertice at same xy
# only changes the attributes in place
old.intersect = new.intersect
old.x,old.y = new.x,new.y
old.neighbour = new.neighbour
old.neighbour.neighbour = old
old.entry = new.entry
old.alpha = new.alpha
## new.next = old.next
## new.prev = old.prev
## if old == self.first:
## #print "replaced first", self.first, new
## self.first = new
## old.prev.next = new
## old.next.prev = new
def insert(self, vertex, start, end):
"""Insert and sort a vertex between a specified pair of vertices.
This function inserts a vertex (most likely an intersection point)
between two other vertices (start and end). These other vertices
cannot be intersections (that is, they must be actual vertices of
the original polygon). If there are multiple intersection points
between the two vertices, then the new vertex is inserted based on
its alpha value.
"""
if vertex.xy == start.xy:
copy = vertex.copy()
self.replace(start, copy)
return # dont process further
elif vertex.xy == end.xy:
copy = vertex.copy()
self.replace(end, copy)
return # dont process further
# position based on alpha
curr = start
while curr != end and curr.alpha < vertex.alpha:
curr = curr.next
if vertex.xy == curr.prev.xy:
## if vertex.xy == curr.xy: self.replace(curr, vertex)
## elif vertex.xy == curr.prev.xy: self.replace(curr, vertex.prev)
vertex.neighbour.neighbour = curr.prev
return # dont do it if same as a previously inserted intersection
if vertex.xy == curr.xy:
## if vertex.xy == curr.xy: self.replace(curr, vertex)
## elif vertex.xy == curr.prev.xy: self.replace(curr, vertex.prev)
vertex.neighbour.neighbour = curr
return # dont do it if same as a previously inserted intersection
vertex.next = curr
vertex.prev = curr.prev
vertex.next.prev = vertex
vertex.prev.next = vertex
#print "inserted",vertex
def next(self, v):
"""Return the next non intersecting vertex after the one specified."""
c = v
while c.intersect:
c = c.next
return c
@property
def first_intersect(self):
"""Return the first unchecked intersection point in the polygon."""
for v in self.iter():
if v.intersect and not v.checked:
break
return v
@property
def points(self):
"""Return the polygon's points as a list of tuples (ordered coordinates pair)."""
p = []
for v in self.iter():
p.append((v.x, v.y))
return p
def unprocessed(self):
"""Check if any unchecked intersections remain in the polygon."""
for v in self.iter():
if v.intersect and not v.checked:
yield True
def union(self, clip):
return self.clip(clip, False, False)
def intersect(self, clip):
return self.clip(clip, True, True)
def difference(self, clip):
return self.clip(clip, False, True)
def clip(self, clip, s_entry, c_entry):
"""Clip this polygon using another one as a clipper.
This is where the algorithm is executed. It allows you to make
a UNION, INTERSECT or DIFFERENCE operation between two polygons.
Given two polygons A, B the following operations may be performed:
A|B ... A OR B (Union of A and B)
A&B ... A AND B (Intersection of A and B)
A\B ... A - B
B\A ... B - A
The entry records store the direction the algorithm should take when
it arrives at that entry point in an intersection. Depending on the
operation requested, the direction is set as follows for entry points
(f=forward, b=backward; exit points are always set to the opposite):
Entry
A B
-----
A|B b b
A&B f f
A\B b f
B\A f b
f = True, b = False when stored in the entry record
"""
# detect clip mode
unionmode = not s_entry and not c_entry
intersectionmode = s_entry and c_entry
differencemode = not s_entry and c_entry
# prep by removing repeat of startpoint at end
first = self.first
last = first.prev
if last.x == first.x and last.y == first.y:
first.prev = last.prev
last.prev.next = first
first = clip.first
last = first.prev
if last.x == first.x and last.y == first.y:
first.prev = last.prev
last.prev.next = first
# TODO: maybe also remove repeat points anywhere?
# ...
# phase one - find intersections
# ------------------------------
anyintersection = False
s_intsecs = []
c_intsecs = []
for s in self.iter(): # for each vertex Si of subject polygon do
for c in clip.iter(): # for each vertex Cj of clip polygon do
try:
#print "find isect %s - %s and %s - %s" %(s.xy, self.next(s.next).xy, c.xy, clip.next(c.next).xy )
i, alphaS, alphaC = intersect_or_on(s, self.next(s.next),
c, clip.next(c.next))
iS = Vertex(i, alphaS, intersect=True, entry=False)
iC = Vertex(i, alphaC, intersect=True, entry=False)
iS.neighbour = iC
iC.neighbour = iS
s_intsecs.append( (iS, alphaS, s, self.next(s.next)) )
c_intsecs.append( (iC, alphaC, c, clip.next(c.next)) )
anyintersection = True
except TypeError:
pass # this simply means intersect() returned None
# insert intersections into originals
for iS,a,s,s_next in reversed(s_intsecs):
if a == 0:
self.replace(s, iS)
elif a == 1:
self.replace(s_next, iS)
else:
self.insert(iS, s, s_next)
for iC,a,c,c_next in reversed(c_intsecs):
if a == 0:
self.replace(c, iC)
elif a == 1:
self.replace(c_next, iC)
else:
clip.insert(iC, c, c_next)
#print "testing if insert was done correctly"
for s in self.iter():
#print s
pass
#print "and"
for c in clip.iter():
#print c
pass
# phase one and a half - no intersections between subject and clip, so correctly return results
# --------------------
def specialcase_insidetest():
resultpolys = []
if unionmode: # union
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so just return subject shell
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so just return clip shell
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
else:
#clip polygon is entirely outside subject, so return both
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif intersectionmode: # intersection
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so the intersection is only the clip polygon
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so the intersection is only the subject polygon
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
else:
#clip polygon is entirely outside subject, so no intersection to return
pass
elif differencemode: # difference
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so the difference is subject with clip as a hole
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
hole = Polygon()
for c in clip.iter():
hole.add(Vertex(c))
polytuple = (clipped, [hole])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so there is no difference
pass
else:
#clip polygon is entirely outside subject, so difference is simply the subject
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
# no need to continue so just return result
return resultpolys
if not anyintersection:
return specialcase_insidetest()
# phase two - identify entry/exit points
# --------------------------------------
# From K&K
def mark_flags(poly, c, c_entry):
"c and c_entry are not actually the clip, can be for both s and c, just too lazy to change."
#print "intersection"
#print "\t",c
# intersection is degenerate, is the start/endpoint of a line
# so maybe delete intersection flag based on prev/next locations
prevloc = testLocation(c.prev, poly)
nextloc = testLocation(c.next, poly)
if prevloc == "on" or nextloc == "on":
prevmid = Vertex(((c.x+c.prev.x)/2.0,(c.y+c.prev.y)/2.0))
prevloc = testLocation(prevmid, poly)
nextmid = Vertex(((c.x+c.next.x)/2.0,(c.y+c.next.y)/2.0))
nextloc = testLocation(nextmid, poly)
if prevloc == "in" or nextloc == "in":
poly.anyinside = True
#print "\t %s -> degenintsec -> %s" %(prevloc,nextloc)
if prevloc == "out":
if nextloc == "out":
#just touching
c.entry = "en/ex" if c_entry else "ex/en"
elif nextloc == "in":
c.entry = "en" if c_entry else "ex"
elif nextloc == "on":
c.entry = "en" if c_entry else "ex"
elif prevloc == "in":
#union and difference should never go inside the other polygon
#so this should only happen for intersectmode...
if nextloc == "in":
#just touching
c.entry = "ex/en" if c_entry else "en/ex"
elif nextloc == "out":
c.entry = "ex" if c_entry else "en"
elif nextloc == "on":
c.entry = "ex" if c_entry else "en"
elif prevloc == "on":
if nextloc == "on":
c.entry = None
elif nextloc == "out":
c.entry = "ex" if c_entry else "en"
elif nextloc == "in":
c.entry = "en" if c_entry else "ex"
self.anyinside = False
# set clip
prevsingle = None
for c in clip.iter():
if c.intersect:
mark_flags(self, c, c_entry)
# set couple
if c.entry in ("ex","en"):
if prevsingle and c.entry == prevsingle.entry:
c.couple = prevsingle
prevsingle.couple = c
prevsingle = c
# set crosschange
# some modifications based on implementation in qt clipper source code
#if c.entry == "en/ex" == c.neighbour.entry or c.entry == "ex/en" == c.neighbour.entry:
if False: #c.entry == "en/ex" or c.entry == "ex/en":
print "Maybe crosschange..."
# tri1
#a,b,c = c.neighbour.prev, c.prev, c.neighbour.next
a,b,c = c.neighbour.next, c.prev, c.neighbour.prev
dir1 = 0.5 * (a.x * (b.y-c.y) +
b.x * (c.y-a.y) +
c.x * (a.y-b.y))
# tri2
#a,b,c = c.neighbour.prev, c.prev, c.next
a,b,c = c.next, c.prev, c.neighbour.prev
dir2 = 0.5 * (a.x * (b.y-c.y) +
b.x * (c.y-a.y) +
c.x * (a.y-b.y))
print dir1,dir2
#if dir1 < 0 != dir2 < 0: # different orientation
if (dir1 * dir2) < 0: # different orientation means at least one negative, making the results less than 0
print "CROSSCHANGE!!!"
c.cross_change = True
c.neighbour.cross_change = True # not sure if should set neighbour too
# maybe early abort
if not self.anyinside and intersectionmode:
return []
# what about perfect overlap???
# ...
if False: #DEBUG:
print "view clip entries"
for c in clip.iter():
print c, c.entry
# find first isect where both neighbours have valid flag
for c in clip.iter():
if c.entry:
s = c.neighbour
mark_flags(clip, s, s_entry)
if s.entry:
first_c = c
first_s = s
# print 777,s.entry
break
else:
return specialcase_insidetest()
#raise Exception("weird special case, no neighbours that both have flag left")
# autoset subj, if neighbour of first is different, then set all as opposite
# TODO: how deal with s_entry in case of different modes...?
print "view first"
print first_c, first_c.entry
print first_s, first_s.entry
if first_c.entry != first_s.entry: # and s_entry: # this is the behaviour for standard intersect mode, otherwise flip, hence the s_entry
for c in clip.iter():
if c.entry:
if c.entry == "en": c.neighbour.entry = "ex"
elif c.entry == "ex": c.neighbour.entry = "en"
elif c.entry == "en/ex": c.neighbour.entry = "ex/en"
elif c.entry == "ex/en": c.neighbour.entry = "en/ex"
# else set all same
else:
for c in clip.iter():
if c.entry:
c.neighbour.entry = c.entry
# set couple for subj (not sure if needed)
prevsingle = None
for s in self.iter():
if s.entry:
if s.entry in ("ex","en"):
if prevsingle and s.entry == prevsingle.entry:
s.couple = prevsingle
prevsingle.couple = s
prevsingle = s
if False: #DEBUG:
print "view subj entries"
for s in self.iter():
print s, s.entry
# phase three - construct a list of clipped polygons
# --------------------------------------------------
######
# Defs
def next_unprocessed(vert):
origvert = vert
while vert:
if vert.entry and not (vert.checked or vert.neighbour.checked):
#print "vert, found next unproc", vert, vert.checked, vert.neighbour.checked
if vert.couple:
# rule 1
if vert.couple.entry and vert.entry:
# rule 2
if vert.couple.entry == "en" and vert.entry == "en":
return vert.couple
elif vert.couple.entry == "ex" and vert.entry == "ex":
return vert
# rule 3
else:
return vert
vert = vert.next
if vert == origvert:
# if returned to first, return None
return None
def DeleteFlag1(cur, stat):
if cur.entry == "en/ex":
cur.entry = None
if cur.cross_change:
if stat == "D3":
return "D3"
else:
return "D4"
if stat == "D3":
return "D4"
else:
return "D3"
if cur.entry == "ex/en":
if stat == "D3":
cur.entry = "en"
return "D2"
else:
cur.entry = "ex"
return "D1"
if cur.entry == "en":
cur.entry = None
return "D1"
if cur.entry == "ex":
cur.entry = None
return "D2"
def DeleteFlag2(cur, prev, stat):
if cur.entry == "en/ex":
if stat == "D1":
cur.entry = "ex"
else:
cur.entry = "en"
if cur.cross_change:
if stat == "D1":
return "D4"
else:
return "D3"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "ex/en":
if stat == "D1":
cur.entry = "en"
else:
cur.entry = "ex"
if cur.cross_change:
if stat == "D1":
return "D4"
else:
return "D3"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "en":
cur.entry = None
if stat == "D1" and cur.couple and prev.couple == cur:
return "D1"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "ex":
cur.entry = None
if stat != "D1" and cur.couple and prev.couple == cur:
return "D2"
else:
if stat == "D1":
return "D3"
else:
return "D4"
def proceed(cur, stat):
cur.checked = True
if stat == "D1":
clipped.add(Vertex(cur))
return cur.next
elif stat == "D2":
clipped.add(Vertex(cur))
return cur.prev
else:
return cur.neighbour
####
resultpolys = []
self.first.checked = True
cur = prev = start = next_unprocessed(self.first)
while cur:
# each new polygon
print "new poly"
stat = DeleteFlag1(cur, "D3")
if DEBUG: print "v", cur, cur.entry, stat
clipped = Polygon()
cur = proceed(cur, stat)
# collect vertexes
while cur != start:
if DEBUG: print "v", cur, cur.entry, stat
if cur.entry:
if stat == "D1" or stat == "D2":
stat = DeleteFlag2(cur, prev, stat)
else:
stat = DeleteFlag1(cur, stat)
prev = cur
cur = proceed(cur, stat)
# return to first vertex
clipped.add(Vertex(clipped.first))
print clipped
resultpolys.append((clipped,[]))
cur = prev = start = next_unprocessed(self.first)
# finally, sort into exteriors and holes
for pindex,(polyext,polyholes) in enumerate(resultpolys):
for otherext,otherholes in resultpolys:
if polyext == otherext:
continue # don't compare to self
if polyext.first.isInside(otherext):
otherholes.append(polyext) #poly is within other so make into a hole
del resultpolys[pindex] #and delete poly from being an independent poly
return resultpolys
def __repr__(self):
"""String representation of the polygon for debugging purposes."""
count, out = 1, "\n"
for s in self.iter():
out += "%02d: %s\n" % (count, str(s))
count += 1
return out
def iter(self):
"""Iterator generator for this doubly linked list."""
s = self.first
while True:
yield s
s = s.next
if s == self.first:
return
def intersect_or_on(s1, s2, c1, c2):
"""Same as intersect(), except returns
intersection even if degenerate.
"""
den = float( (c2.y - c1.y) * (s2.x - s1.x) - (c2.x - c1.x) * (s2.y - s1.y) )
if not den:
return None
us = ((c2.x - c1.x) * (s1.y - c1.y) - (c2.y - c1.y) * (s1.x - c1.x)) / den
uc = ((s2.x - s1.x) * (s1.y - c1.y) - (s2.y - s1.y) * (s1.x - c1.x)) / den
if (0 <= us <= 1) and (0 <= uc <= 1):
#subj and clip line intersect eachother somewhere in the middle
#this includes the possibility of degenerates (edge intersections)
x = s1.x + us * (s2.x - s1.x)
y = s1.y + us * (s2.y - s1.y)
return (x, y), us, uc
else:
return None
def testLocation(point, polygon):
"""
Effective scanline test for the location of a point vis a vis a polygon.
Returns either "in","on",or "out".
Based on algorithm 7 from:
Kai Horman and Alexander Agathos,
"The point in polygon problem for arbitrary polygons".
Computational Geometry: Theory and Applications,
Volume 20 Issue 3, November 2001
"""
# begin
if polygon.first.y == point.y and polygon.first.x == point.x:
return "on" # vertex
w =0
for v in polygon.iter():
if v.next.y == point.y:
if v.next.x == point.x:
return "on" # vertex
else:
if v.y == point.y and (v.next.x > point.x) == (v.x < point.x):
return "on" # edge
# if crossing horizontal line
if (v.y < point.y and v.next.y >= point.y)\
or (v.y >= point.y and v.next.y < point.y):
if v.x >= point.x:
if v.next.x > point.x:
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
else:
det = (v.x - point.x) * (v.next.y - point.y) \
- (v.next.x - point.x) * (v.y - point.y)
if det == 0: return "on" # edge
# if right crossing
if (det > 0 and v.next.y > v.y)\
or (det < 0 and v.next.y < v.y):
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
else:
if v.next.x > point.x:
det = (v.x - point.x) * (v.next.y - point.y) \
- (v.next.x - point.x) * (v.y - point.y)
if det == 0: return "on" # edge
# if right crossing
if (det > 0 and v.next.y > v.y)\
or (det < 0 and v.next.y < v.y):
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
if (w % 2) != 0:
return "in"
else:
return "out"
def clip_polygon(subject, clipper, operation = 'difference'):
"""
Higher level function for clipping two polygons (from a list of points).
Since input polygons are lists of points, output is also in list format.
Each polygon in the resultlist is a tuple of: (polygon exterior, list of polygon holes)
"""
Subject = Polygon()
Clipper = Polygon()
for s in subject:
Subject.add(Vertex(s))
for c in clipper:
Clipper.add(Vertex(c))
clipped = Clipper.difference(Subject)\
if operation == 'reversed-diff'\
else Subject.__getattribute__(operation)(Clipper)
clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]
return clipped
if __name__ == "__main__":
"""
Test and visualize various polygon overlap scenarios.
Visualization requires the pure-Python PyDraw library from
https://github.com/karimbahgat/PyDraw
"""
subjpoly = [(0,0),(6,0),(6,6),(0,6),(0,0)]
# normal intersections
testpolys_normal = {"simple overlap":
[(4,4),(10,4),(10,10),(4,10),(4,4)],
"jigzaw overlap":
[(1,4),(3,8),(5,4),(5,10),(1,10),(1,4)],
## "smaller, outside":
## [(7,7),(7,9),(9,9),(9,7),(7,7)],
## "smaller, inside":
## [(2,2),(2,4),(4,4),(4,2),(2,2)],
## "larger, covering all":
## [(-1,-1),(-1,7),(7,7),(7,-1),(-1,-1)],
## "larger, outside":
## [(-10,-10),(-10,-70),(-70,-70),(-70,-10),(-10,-10)]
}
# degenerate intersections
testpolys_degens = {"degenerate, starts on edge intersection and goes inside":
[(0,5),(6,4),(10,4),(10,10),(4,10),(0,5)],
## "degenerate, starts on edge intersection and goes outside":
## [(5,6),(5.2,5.5),(5,5.4),(4.8,5.5)],
"degenerate, hesitating to enter and exit":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1,6),(1,5)],
"degenerate, also multiple degens along shared line":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1.3,6),(1.6,6),(1,6),(1,5)],
"degenerate, back and forth on-out along shared line":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1.5,5.7),(1,6),(0,6),(1,5)]
}
# nextto/almost copy special cases
testpolys_nextto_almostsame = {"degenerate, perfect overlap":
[(0,0),(6,0),(6,6),(0,6),(0,0)],
"degenerate, partial inside overlap":
[(1,0),(6,0),(6,6),(1,6),(1,0)],
"degenerate, right next to eachother":
[(0,6),(6,6),(6,10),(0,10),(0,6)],
"degenerate, partial right next to eachother":
[(2,6),(6,6),(6,10),(2,10),(2,6)]
}
#run operation
import os
import time
import pydraw
DEBUG = False
# test geo
## def test_draw(testname, subjpoly, clippoly, mode):
## t = time.time()
## #print testname, mode
## resultpolys = clip_polygon(subjpoly,clippoly,mode)
## print "finished:",len(resultpolys),time.time()-t
## print "start",str(resultpolys)[:100]
## print "end",str(resultpolys)[-100:]
## crs = pydraw.CoordinateSystem([0,80,45,50])
## img = pydraw.Image(300,300, crs=crs)
## img.drawpolygon(subjpoly, fillcolor=(222,0,0,111))
## img.drawpolygon(clippoly, fillcolor=(0,222,0,111))
## for ext,holes in resultpolys:
## img.drawpolygon(ext,holes)
## img.drawgridticks(10,10)
## img.save("test_output/"+testname+"-"+mode+".png")
##
## import pygeoj
## world = pygeoj.load("cshapes.geo.json")
## norw = next(cntr.geometry.coordinates[0][0] for cntr in world if cntr.properties["CNTRY_NAME"] == "Norway")
## swed = next(cntr.geometry.coordinates[0][0] for cntr in world if cntr.properties["CNTRY_NAME"] == "Sweden")
## test_draw("norway-sweden", norw, swed, "difference")
##
## breakonpurpose
# test basics
def test_draw(testname, subjpoly, clippoly, mode):
t = time.time()
#print testname, mode
resultpolys = clip_polygon(subjpoly,clippoly,mode)
#print "finished:",resultpolys,time.time()-t
crs = pydraw.CoordinateSystem([-1,-1,11,11])
img = pydraw.Image(300,300, crs=crs)
img.drawpolygon(subjpoly, fillcolor=(222,0,0))
img.drawpolygon(clippoly, fillcolor=(0,222,0))
for ext,holes in resultpolys:
img.drawpolygon(ext,holes)
img.drawgridticks(1,1)
img.save("test_output/"+testname+"-"+mode+".png")
if not os.path.lexists("test_output"): os.mkdir("test_output")
for testname,testclip in testpolys_normal.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
for testname,testclip in testpolys_degens.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
for testname,testclip in testpolys_nextto_almostsame.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
| gpl-3.0 | 691,236,750,043,469,000 | 37.399792 | 191 | 0.495308 | false | 4.0485 | true | false | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdecore/KEncodingDetector.py | 1 | 2735 | # encoding: utf-8
# module PyKDE4.kdecore
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdecore.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtNetwork as __PyQt4_QtNetwork
class KEncodingDetector(): # skipped bases: <type 'sip.wrapper'>
# no doc
def analyze(self, *args, **kwargs): # real signature unknown
pass
def autoDetectLanguage(self, *args, **kwargs): # real signature unknown
pass
def decode(self, *args, **kwargs): # real signature unknown
pass
def decodedInvalidCharacters(self, *args, **kwargs): # real signature unknown
pass
def decoder(self, *args, **kwargs): # real signature unknown
pass
def decodeWithBuffering(self, *args, **kwargs): # real signature unknown
pass
def encoding(self, *args, **kwargs): # real signature unknown
pass
def encodingChoiceSource(self, *args, **kwargs): # real signature unknown
pass
def errorsIfUtf8(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def hasAutoDetectionForScript(self, *args, **kwargs): # real signature unknown
pass
def nameForScript(self, *args, **kwargs): # real signature unknown
pass
def processNull(self, *args, **kwargs): # real signature unknown
pass
def resetDecoder(self, *args, **kwargs): # real signature unknown
pass
def scriptForName(self, *args, **kwargs): # real signature unknown
pass
def setAutoDetectLanguage(self, *args, **kwargs): # real signature unknown
pass
def setEncoding(self, *args, **kwargs): # real signature unknown
pass
def visuallyOrdered(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Arabic = 2
AutoDetectedEncoding = 1
AutoDetectScript = None # (!) real value is ''
Baltic = 3
BOM = 2
CentralEuropean = 4
ChineseSimplified = 5
ChineseTraditional = 6
Cyrillic = 7
DefaultEncoding = 0
EncodingChoiceSource = None # (!) real value is ''
EncodingFromHTTPHeader = 5
EncodingFromMetaTag = 4
EncodingFromXMLHeader = 3
Greek = 8
Hebrew = 9
Japanese = 10
Korean = 11
None = 0
NorthernSaami = 12
SemiautomaticDetection = 1
SouthEasternEurope = 13
Thai = 14
Turkish = 15
Unicode = 16
UserChosenEncoding = 6
WesternEuropean = 17
| gpl-2.0 | 1,633,415,797,107,800,600 | 25.553398 | 101 | 0.643876 | false | 3.946609 | false | false | false |
mph-/lcapy | lcapy/nexpr.py | 1 | 7914 | """This module provides the DiscreteTimeDomainExpression class to
represent discrete-time expressions.
Copyright 2020--2021 Michael Hayes, UCECE
"""
from __future__ import division
from .domains import DiscreteTimeDomain
from .sequence import Sequence
from .functions import exp
from .sym import j, oo, pi, fsym, oo
from .dsym import nsym, ksym, zsym, dt
from .ztransform import ztransform
from .dft import DFT
from .seqexpr import SequenceExpression
from .nseq import DiscreteTimeDomainSequence, nseq
from sympy import Sum, summation, limit, DiracDelta
__all__ = ('nexpr', )
class DiscreteTimeDomainExpression(DiscreteTimeDomain, SequenceExpression):
"""Discrete-time expression or symbol."""
var = nsym
seqcls = DiscreteTimeDomainSequence
def __init__(self, val, **assumptions):
check = assumptions.pop('check', True)
if 'integer' not in assumptions:
assumptions['real'] = True
super(DiscreteTimeDomainExpression, self).__init__(val, **assumptions)
expr = self.expr
if check and expr.has(zsym) and not expr.has(Sum):
raise ValueError(
'n-domain expression %s cannot depend on z' % expr)
if check and expr.has(ksym) and not expr.has(Sum):
raise ValueError(
'n-domain expression %s cannot depend on k' % expr)
def _mul_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
def _div_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
def as_expr(self):
return DiscreteTimeDomainExpression(self)
def differentiate(self):
"""First order difference."""
result = (self.expr - self.subs(n - 1).expr) / dt
return self.__class__(result, **self.assumptions)
def integrate(self):
"""First order integration."""
from .sym import symsymbol
from .utils import factor_const
from .extrafunctions import UnitImpulse
from .functions import u
# TODO, get SymPy to optimize this case.
expr = self.expr
const, expr = factor_const(expr, nsym)
if expr.is_Function and expr.func == UnitImpulse:
return dt * u(expr.args[0]) * const
msym = symsymbol('m', integer=True)
result = dt * summation(self.subs(msym).expr, (msym, -oo, nsym))
return self.__class__(result, **self.assumptions)
def ztransform(self, evaluate=True, **assumptions):
"""Determine one-sided z-transform."""
assumptions = self.assumptions.merge_and_infer(self, **assumptions)
result = ztransform(self.expr, self.var, zsym, evaluate)
return self.change(result, domain='Z', **assumptions)
def ZT(self, **assumptions):
return self.ztransform(**assumptions)
def plot(self, ni=None, **kwargs):
"""Plot the sequence. If `ni` is not specified, it defaults to the
range (-20, 20). `ni` can be a vector of specified sequence
indices, a tuple specifing the range, or a constant specifying
the maximum value with the minimum value set to 0.
kwargs include:
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label
ylabel - the y-axis label
xscale - the x-axis scaling, say for plotting as ms
yscale - the y-axis scaling, say for plotting mV
in addition to those supported by the matplotlib plot command.
The plot axes are returned.
"""
if ni is None:
ni = (-20, 20)
from .plot import plot_sequence
return plot_sequence(self, ni, **kwargs)
def initial_value(self):
"""Determine value at n = 0."""
return self.subs(0)
def final_value(self):
"""Determine value at n = oo."""
return self.__class__(limit(self.expr, self.var, oo))
def DFT(self, N=None, evaluate=True):
if N is None:
from .sym import symsymbol
N = symsymbol('N', integer=True, positive=True)
result = DFT(self.expr, nsym, ksym, N, evaluate=evaluate)
return self.change(result, domain='discrete fourier')
def delay(self,m):
"""Delay signal by m samples."""
return self.subs(n - m)
def extent(self, n1=-100, n2=100):
"""Determine extent of the signal.
For example, nexpr([1, 1]).extent() = 2
nexpr([1, 0, 1]).extent() = 3
nexpr([0, 1, 0, 1]).extent() = 3
This performs a search between n=n1 and n=n2."""
return self.seq((n1, n2)).extent()
def discrete_time_fourier_transform(self, var=None,
images=oo, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform.
Use `images = 0` to avoid the infinite number of spectral images.
"""
return self.DTFT(var, images, **assumptions)
def DTFT(self, var=None, images=oo, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform.
By default this returns the DTFT in terms of `f`. Use
`.DTFT(w)` to get the angular frequency form, `.DTFT(F)` to
get the normalised frequency form, or `.DTFT(W)` to get the
normalised angular frequency form.
Use `images = 0` to avoid the infinite number of spectral images.
"""
from .extrafunctions import UnitStep
from .symbols import f, omega, Omega, F
from .fexpr import fexpr
from .dtft import DTFT
if var is None:
var = f
if id(var) not in (id(f), id(F), id(omega), id(Omega)):
raise ValueError('DTFT requires var to be f, F, omega, or Omega`, not %s' % var)
dtft = DTFT(self.expr, self.var, fsym, images=images)
result = fexpr(dtft)(var)
result = result.simplify_dirac_delta()
result = result.simplify_heaviside()
result = result.simplify_rect()
# There is a bug in SymPy when simplifying Sum('X(n - m)', (m, -oo, oo))
# result = result.simplify()
result = result.cancel_terms()
return result
def norm_angular_fourier(self, **assumptions):
from .normomegaexpr import Omega
return self.DTFT()(Omega)
def difference_equation(self, inputsym='x', outputsym='y', form='iir'):
"""Create difference equation from impulse response.
`form` can be 'fir' or 'iir' ('direct form I').
"""
H = self.ZT()
return H.difference_equation(inputsym, outputsym, form)
def remove_condition(self):
"""Remove the piecewise condition from the expression."""
if not self.is_conditional:
return self
expr = self.expr
expr = expr.args[0].args[0]
return self.__class__(expr)
def nexpr(arg, **assumptions):
"""Create nExpr object. If `arg` is nsym return n"""
from .expr import Expr
from .seq import seq
if arg is nsym:
return n
if isinstance(arg, Expr):
if assumptions == {}:
return arg
return arg.__class__(arg, **assumptions)
if isinstance(arg, str) and arg.startswith('{'):
return nseq(arg)
from numpy import ndarray
if isinstance(arg, (list, ndarray)):
return DiscreteTimeDomainSequence(arg, var=n).as_impulses()
return DiscreteTimeDomainExpression(arg, **assumptions)
from .expressionclasses import expressionclasses
expressionclasses.register('discrete time', DiscreteTimeDomainExpression)
n = DiscreteTimeDomainExpression('n', integer=True)
| lgpl-2.1 | 6,243,116,347,179,922,000 | 30.15748 | 92 | 0.604372 | false | 4.003035 | false | false | false |
idiap/rgbd | Processing/Processor.py | 1 | 3672 | """
Copyright (c) 2014 Idiap Research Institute, http://www.idiap.ch/
Written by Kenneth Funes <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
from rgbd.Streaming.RGBDStreamer import RGBDStreamer
from rgbd.Rendering.RGBDViewer import RGBDViewer
from PySide import QtCore, QtGui
import sys
class Processor(QtCore.QObject):
"""
Class to handle a stream of RGB-D data and to display it
"""
newFrameSignal = QtCore.Signal()
def __init__(self, rendering=True):
super(Processor, self).__init__()
self.connected=False
self.viewer=None
self.streamer=None
self.frame_callback=None
self.rendering = rendering
self.count = 0
self.app = None
def __del__(self):
self.stop()
def createGUI(self):
# Creates the widget where to visualize the RGB-D data
self.viewer = RGBDViewer(self.app, render3D=self.rendering, close_callback=self.stop)
self.viewer.show()
self.connect(self.viewer.pauseButton, QtCore.SIGNAL("clicked()"), self.pause)
self.newFrameSignal.connect(self.processFrame)
def pause(self):
"""
Toggle the pause status
"""
self.streamer.pause(not self.streamer.paused)
def run(self, source=0, calibrationFile=None, frame_callback =None):
# Sets the function to be called each time a new frame data is available
self.frame_callback=frame_callback
# Creates the RGB-D data streaming class
self.streamer=RGBDStreamer(frame_callback=self.newFrameSignal.emit, connection_callback=self.connectionUpdate, calibrate = True) # frame_callback=self.newFrame
self.streamer.connect(source, calibrationFile)
# Now create the Qt Application (basically for the Qt events loop)
self.app = QtGui.QApplication(sys.argv)
# Creates the necessary GUI
self.createGUI()
# Puts the streamer to run freely
self.streamer.pause(False)
# Runs the Qt Loop
sys.exit(self.app.exec_())
def stop(self):
"""
Stops the process of data generation
"""
if self.streamer is not None:
self.streamer.pause(True)
self.streamer.disconnect()
self.streamer.kill()
self.streamer=None
def connectionUpdate(self, state=False):
self.connected = state
def processFrame(self):
"""
This function is called within the Qt events loop, as response tot he newFrameSignal activation
"""
if self.streamer is None:
return
data = self.streamer.popFrame()
if data is not None:
self.frame, self.frameIndex = data
self.newFrameAvailable = False
if self.frame is not None:
if self.frame_callback is not None:
self.frame_callback(self.frame, self.frameIndex)
self.viewer.setNewData(self.frame, self.frameIndex)
else:
self.frameMesh, self.state = None, None
| lgpl-3.0 | 6,560,452,943,276,382,000 | 35 | 168 | 0.659858 | false | 4.107383 | false | false | false |
jhartnett/ipCounter | src/ipCounter.py | 1 | 1779 | #!/bin/python
#/**************************************************************************
#* File: ipCounter.py
#*
#* This is a basic program to count the total number of IPs
#* in a given range. Input is a txt formatted file similar
#* to the sample provided
#*
#* This updated version uses Python to make it more agnostic.
#* Author: Hon1nbo
#***************************************************************************/
import math
import sys
fileName = sys.argv[1]
ipCount = 0
ipList = ""
# Open the file & read contents in
with open(fileName) as ipListFile:
ipList = ipListFile.read()
tmpTuple = ('not','\n','null')
tmpTupleSmall = ('not',' ','null')
tmpCIDR = 0
tmpTuple = ipList.split("\n")
entriesCount = len(tmpTuple)
x = 0
while (entriesCount - x) != 0:
tmpTupleSmall = tmpTuple[x].partition("/")
if tmpTupleSmall[2] != "":
tmpCount = math.pow(2, (32-int(tmpTupleSmall[2])))
print(tmpTuple[x],": ",int(tmpCount))
ipCount += tmpCount
else:
tmpTupleSmall = tmpTuple[x].partition("-");
if tmpTupleSmall[1] == "-":
startIP = tmpTupleSmall[0].split(".") # start IP
endIP = tmpTupleSmall[2].split(".") # end IP
tmpCount = 0
for octet in range (0,4):
# Compare each octet one by one based on iteration
difference = int(endIP[octet]) - int(startIP[octet])
# account for the inclusion of smaller number
# ex) 192.168.1.0-192.168.1.255
if difference != 0:
difference += 1
# 256 addresses in each octet, raise to power to do maths
tmpCount += (difference * pow(256, (3 - octet)))
print(tmpTuple[x],": ",int(tmpCount))
ipCount += tmpCount
else:
print(tmpTuple[x],": 1")
ipCount += 1
x += 1
print('iterated through ',int(x),' entries')
print('Total IPs Counted: ',int(ipCount))
| apache-2.0 | 33,192,525,953,773,740 | 23.708333 | 77 | 0.588533 | false | 3.240437 | false | false | false |
dunkhong/grr | grr/test_lib/db_test_lib.py | 1 | 2046 | #!/usr/bin/env python
"""Test utilities for RELDB-related testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import functools
import sys
import mock
from grr_response_core.lib.util import compatibility
from grr_response_server import data_store
from grr_response_server.databases import db as abstract_db
from grr_response_server.databases import db_test_mixin
from grr_response_server.databases import mem
from grr_response_server.databases import mysql_test
def TestDatabases(mysql=True):
"""Decorator that creates additional RELDB-enabled test classes."""
def _TestDatabasesDecorator(cls):
"""Decorator that creates additional RELDB-enabled test classes."""
module = sys.modules[cls.__module__]
cls_name = compatibility.GetName(cls)
# Prevent MRO issues caused by inheriting the same Mixin multiple times.
base_classes = ()
if not issubclass(cls, db_test_mixin.GlobalDatabaseTestMixin):
base_classes += (db_test_mixin.GlobalDatabaseTestMixin,)
if mysql:
db_test_cls_name = "{}_MySQLEnabled".format(cls_name)
db_test_cls = compatibility.MakeType(
name=db_test_cls_name,
base_classes=base_classes +
(mysql_test.MySQLDatabaseProviderMixin, cls),
namespace={})
setattr(module, db_test_cls_name, db_test_cls)
return cls
return _TestDatabasesDecorator
def WithDatabase(func):
"""A decorator for database-dependent test methods.
This decorator is intended for tests that need to access database in their
code. It will also augment the test function signature so that the database
object is provided and can be manipulated.
Args:
func: A test method to be decorated.
Returns:
A database-aware function.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
db = abstract_db.DatabaseValidationWrapper(mem.InMemoryDB())
with mock.patch.object(data_store, "REL_DB", db):
func(*(args + (db,)), **kwargs)
return Wrapper
| apache-2.0 | 5,616,811,757,687,994,000 | 29.537313 | 77 | 0.721896 | false | 3.972816 | true | false | false |
fja05680/pinkfish | examples/310.cryptocurrencies/strategy.py | 1 | 6833 | """
The SMA-ROC-portfolio stategy.
This is SMA-ROC strategy applied to a portfolio.
SMA-ROC is a rate of change calculation smoothed by
a moving average.
This module allows us to examine this strategy and try different
period, stop loss percent, margin, and whether to use a regime filter
or not. We split up the total capital between the symbols in the
portfolio and allocate based on either equal weight or volatility
parity weight (inverse volatility).
"""
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from talib.abstract import *
import pinkfish as pf
# A custom indicator to use in this strategy.
def SMA_ROC(ts, mom_lookback=1, sma_timeperiod=20, price='close'):
""" Returns a series which is an SMA with of a daily MOM. """
mom = pf.MOMENTUM(ts, lookback=mom_lookback, time_frame='daily', price=price)
sma_mom = SMA(mom, timeperiod=sma_timeperiod)
return sma_mom
default_options = {
'use_adj' : False,
'use_cache' : True,
'stock_market_calendar' : False,
'stop_loss_pct' : 1.0,
'margin' : 1,
'lookback' : 1,
'sma_timeperiod': 20,
'sma_pct_band': 0,
'use_regime_filter' : True,
'use_vola_weight' : False
}
class Strategy:
def __init__(self, symbols, capital, start, end, options=default_options):
self.symbols = symbols
self.capital = capital
self.start = start
self.end = end
self.options = options.copy()
self.ts = None
self.rlog = None
self.tlog = None
self.dbal = None
self.stats = None
def _algo(self):
pf.TradeLog.cash = self.capital
pf.TradeLog.margin = self.options['margin']
# Create a stop_loss dict for each symbol.
stop_loss = {symbol:0 for symbol in self.portfolio.symbols}
# stop loss pct should range between 0 and 1, user may have
# expressed this as a percentage 0-100
if self.options['stop_loss_pct'] > 1:
self.options['stop_loss_pct'] /= 100
upper_band = self.options['sma_pct_band']/1000
lower_band = -self.options['sma_pct_band']/1000
# Loop though timeseries.
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
end_flag = pf.is_last_row(self.ts, i)
# Get the prices for this row, put in dict p.
p = self.portfolio.get_prices(row,
fields=['close', 'regime', 'sma_roc', 'vola'])
# Sum the inverse volatility for each row.
inverse_vola_sum = 0
for symbol in self.portfolio.symbols:
inverse_vola_sum += 1 / p[symbol]['vola']
# Loop though each symbol in portfolio.
for symbol in self.portfolio.symbols:
# Use variables to make code cleaner.
close = p[symbol]['close']
regime = p[symbol]['regime']
sma_roc = p[symbol]['sma_roc']
inverse_vola = 1 / p[symbol]['vola']
# Sell Logic
# First we check if an existing position in symbol should be sold
# - sell sma_roc < 0
# - sell if price closes below stop loss
# - sell if end of data by adjusted the percent to zero
if symbol in self.portfolio.positions:
if sma_roc < lower_band or close < stop_loss[symbol] or end_flag:
if close < stop_loss[symbol]: print('STOP LOSS!!!')
self.portfolio.adjust_percent(date, close, 0, symbol, row)
# Buy Logic
# First we check to see if there is an existing position, if so do nothing
# - Buy if (regime > 0 or not use_regime_filter) and sma_roc > 0
else:
if (regime > 0 or not self.options['use_regime_filter']) and sma_roc > upper_band:
# Use volatility weight.
if self.options['use_vola_weight']:
weight = inverse_vola / inverse_vola_sum
# Use equal weight.
else:
weight = 1 / len(self.portfolio.symbols)
self.portfolio.adjust_percent(date, close, weight, symbol, row)
# Set stop loss
stop_loss[symbol] = (1-self.options['stop_loss_pct'])*close
# record daily balance
self.portfolio.record_daily_balance(date, row)
def run(self):
self.portfolio = pf.Portfolio()
self.ts = self.portfolio.fetch_timeseries(self.symbols, self.start, self.end,
fields=['close'], use_cache=self.options['use_cache'],
use_adj=self.options['use_adj'],
dir_name='cryptocurrencies',
stock_market_calendar=self.options['stock_market_calendar'])
# Add technical indicator: 200 sma regime filter for each symbol.
def _crossover(ts, ta_param, input_column):
return pf.CROSSOVER(ts, timeperiod_fast=1, timeperiod_slow=200,
price=input_column, prevday=False)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_crossover, ta_param=None,
output_column_suffix='regime', input_column_suffix='close')
# Add technical indicator: volatility.
def _volatility(ts, ta_param, input_column):
return pf.VOLATILITY(ts, price=input_column)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_volatility, ta_param=None,
output_column_suffix='vola', input_column_suffix='close')
# Add techincal indicator: X day SMA_ROC.
def _sma_roc(ts, ta_param, input_column):
return SMA_ROC(ts, mom_lookback=self.options['lookback'],
sma_timeperiod=self.options['sma_timeperiod'],
price=input_column)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_sma_roc, ta_param=None,
output_column_suffix='sma_roc', input_column_suffix='close')
# Finalize timeseries.
self.ts, self.start = self.portfolio.finalize_timeseries(self.ts, self.start)
# Init trade log objects.
self.portfolio.init_trade_logs(self.ts)
self._algo()
self._get_logs()
self._get_stats()
def _get_logs(self):
self.rlog, self.tlog, self.dbal = self.portfolio.get_logs()
def _get_stats(self):
self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)
| mit | 4,744,775,123,107,647,000 | 36.543956 | 102 | 0.56915 | false | 3.825868 | false | false | false |
tasoc/photometry | notes/halo_shift.py | 1 | 2629 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Rasmus Handberg <[email protected]>
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import sqlite3
import os.path
#------------------------------------------------------------------------------
def mag2flux(mag):
"""
Convert from magnitude to flux using scaling relation from
aperture photometry. This is an estimate.
Parameters:
mag (float): Magnitude in TESS band.
Returns:
float: Corresponding flux value
"""
return 10**(-0.4*(mag - 20.54))
if __name__ == '__main__':
pass
folder = r'C:\Users\au195407\Documents\tess_data_local\S01_DR01-2114872'
conn = sqlite3.connect(os.path.join(folder, 'todo.sqlite'))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT todolist.starid,tmag,onedge,edgeflux FROM todolist INNER JOIN diagnostics ON todolist.priority=diagnostics.priority;")
results = cursor.fetchall()
starid = np.array([row['starid'] for row in results], dtype='int64')
tmag = np.array([row['tmag'] for row in results])
OnEdge = np.array([np.NaN if row['onedge'] is None else row['onedge'] for row in results])
EdgeFlux = np.array([np.NaN if row['edgeflux'] is None else row['edgeflux'] for row in results])
cursor.close()
conn.close()
print(tmag)
print(OnEdge)
print(EdgeFlux)
tmag_limit = 3.0
flux_limit = 1e-3
indx = (OnEdge > 0)
indx_halo = (tmag <= tmag_limit) & (OnEdge > 0) & (EdgeFlux/mag2flux(tmag) > flux_limit)
indx_spec = (starid == 382420379)
print(starid[indx_halo])
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(tmag[indx], OnEdge[indx], alpha=0.5)
plt.scatter(tmag[indx_halo], OnEdge[indx_halo], marker='x', c='r')
plt.xlim(xmax=tmag_limit)
plt.ylim(ymin=0)
ax.set_xlabel('Tmag')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(tmag[indx], EdgeFlux[indx], alpha=0.5)
ax.set_xlim(xmax=5.0)
#ax.set_ylim(ymin=0.0)
ax.set_yscale('log')
ax.set_xlabel('Tmag')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(tmag[indx], EdgeFlux[indx]/mag2flux(tmag[indx]), alpha=0.5)
plt.scatter(tmag[indx_halo], EdgeFlux[indx_halo]/mag2flux(tmag[indx_halo]), alpha=0.3, marker='x', c='r')
plt.scatter(tmag[indx_spec], EdgeFlux[indx_spec]/mag2flux(tmag[indx_spec]), alpha=0.3, marker='o', c='g', lw=2)
plt.plot([2.0, 6.0], [1e-3, 2e-2], 'r--')
plt.axhline(flux_limit, c='r', ls='--')
plt.axvline(tmag_limit, c='r', ls='--')
#plt.xlim(xmax=tmag_limit)
ax.set_ylim(ymin=1e-5, ymax=1)
ax.set_yscale('log')
ax.set_ylabel('Edge Flux / Expected Total Flux')
ax.set_xlabel('Tmag')
plt.show()
| gpl-3.0 | -1,066,074,733,466,643,000 | 26.103093 | 142 | 0.657284 | false | 2.569892 | false | false | false |
CacaoMovil/guia-de-cacao-django | cacao_app/config/common.py | 1 | 11423 | # -*- coding: utf-8 -*-
"""
Django settings for cacao_app project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join, dirname, abspath
from configurations import Configuration, values
BASE_DIR = dirname(dirname(abspath(__file__)))
class Common(Configuration):
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# redirects app
'django.contrib.redirects',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'suitlocale',
'suit',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms',
'allauth',
# 'allauth.account',
'sorl.thumbnail',
'envelope',
'solo',
'django_perseus',
'rest_framework',
'ckeditor',
'widget_tweaks',
'wkhtmltopdf',
'taggit',
'google_cse',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pdf_kit',
'cacao',
'configuracion',
'event',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
# 'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# redirect middleware
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# MIGRATIONS CONFIGURATION
MIGRATION_MODULES = {
'sites': 'contrib.sites.migrations'
}
# END MIGRATIONS CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# https://docs.djangoproject.com/en/1.10/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = values.SecretValue()
# END EMAIL CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = values.SingleNestedTupleValue((
('Alice', 'alice@localhost'),
('Bob', 'bob@localhost'),
))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/cacao_app')
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify (used on heroku) is painful to install on windows.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# END CACHING
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-NI'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See:
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
'context.guia_items',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# URL Configuration
ROOT_URLCONF = 'urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# End URL Configuration
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# END AUTHENTICATION CONFIGURATION
# Custom user app defaults
# Select the correct user model
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "account_login"
# END Custom user app defaults
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
# END SLUGLIFIER
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Django REST Framework hide API docs
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# END LOGGING CONFIGURATION
# Your common stuff: Below this line define 3rd party library settings
SUIT_CONFIG = {
'ADMIN_NAME': 'Cacao',
'SHOW_REQUIRED_ASTERISK': True,
'CONFIRM_UNSAVED_CHANGES': True,
'MENU': (
{'app': 'cacao', 'label': 'Guias de Cacao', 'icon': 'icon-leaf'},
{'app': 'configuracion', 'icon': 'icon-cog'},
{'app': 'event', 'icon': 'icon-chevron-right'},
{'label': 'Archivos estaticos', 'icon': 'icon-globe', 'models': (
{'label': 'Generar archivos estaticos',
'url': '/admin/static-generator/'},
)},
{'app': 'auth', 'label': 'Usuarios y Grupos', 'icon': 'icon-lock'},
{'app': 'sites', 'icon': 'icon-chevron-right'},
{'app': 'redirects', 'icon': 'icon-repeat'},
),
# misc
'LIST_PER_PAGE': 15,
'HEADER_DATE_FORMAT': 'l, j, F Y',
}
# CKEditor
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': [
['Undo', 'Redo',
'-', 'Format', 'Bold', 'Italic', 'Underline', 'NumberedList', 'BulletedList', 'Blockquote',
'-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock',
'-', 'Link', 'Unlink', 'Scayt',
'-', 'Cut', 'Copy', 'PasteText',
'-', 'Source', 'Image', 'Iframe',
],
],
'width': 'auto',
'allowedContent': True,
'removePlugins': 'stylesheetparser',
'extraAllowedContent': 'iframe[*]',
},
}
# FB App ID
FB_APP_ID = values.SecretValue()
# GA APP ID
GA_APP_ID = values.SecretValue()
CX_CODE = values.SecretValue()
# used for the views delete folders and open the guide folder
PROJECT_DIR = dirname(dirname(abspath(__file__)))
PERSEUS_BUILD_DIR = '/tmp/perseus/build'
PERSEUS_SOURCE_DIR = '/tmp/perseus/guia'
# config for create pdf's
PDF_KIT_MODEL = 'cacao.Content'
| bsd-3-clause | 4,947,143,118,909,856,000 | 31.177465 | 108 | 0.617526 | false | 3.762516 | true | false | false |
VladimirShe/in100gram | in100gram/in100gram/settings.py | 1 | 2137 | """
Django settings for in100gram project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '22q%is3ng$=h^cx8(8%&(9)@7e9yi(m^lk#w5a#j8ym5!-mlua'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'in100gram.urls'
WSGI_APPLICATION = 'in100gram.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': 'in100gram',
'USER': 'in100gram',
'PASSWORD': 'in100gram',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 | -7,077,503,071,250,312,000 | 23.563218 | 71 | 0.709406 | false | 3.252664 | false | false | false |
mtdx/ml-algorithms | neural-networks/cnn.py | 1 | 2621 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
n_classes = 10
batch_size = 128
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
keep_rate = 0.8
keep_prob = tf.placeholder(tf.float32)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def maxpool2d(x):
# size of window movement of window
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def convolutional_neural_network(x):
weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
'W_fc': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])),
'out': tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {'b_conv1': tf.Variable(tf.random_normal([32])),
'b_conv2': tf.Variable(tf.random_normal([64])),
'b_fc': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))}
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool2d(conv1)
conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool2d(conv2)
fc = tf.reshape(conv2, [-1, 7 * 7 * 64])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])
fc = tf.nn.dropout(fc, keep_rate)
output = tf.matmul(fc, weights['out']) + biases['out']
return output
def train_neural_network(x):
prediction = convolutional_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
train_neural_network(x)
| mit | -4,813,126,551,715,110,000 | 33.038961 | 87 | 0.595193 | false | 3.019585 | false | false | false |
m4sth0/sauventory | tests/test_ranktransform.py | 1 | 4574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016
# Author(s):
# Thomas Leppelt <[email protected]>
# This file is part of sauventory.
# Spatial Autocorrelated Uncertainty of Inventories
# sauventory is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# sauventory is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# sauventory comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
# This is free software, and you are welcome to redistribute it
# under certain conditions; type `show c' for details.
"""
This module perform unittests for the rank transformation module.
Testing the module with example from Iman & Conover 1981:
https://www.uio.no/studier/emner/matnat/math/STK4400/v05/undervisningsmateriale
/A%20distribution-free%20approach%20to%20rank%20correlation.pdf
"""
import numpy as np
import unittest
from sauventory import ranktransform
class RankTransformTest(unittest.TestCase):
def setUp(self):
# Matrix R with n independent sampeld columns k, R = kxn.
self.r = np.array([[1.534, 1.534, -1.534, -1.534, .489, -.319],
[-.887, -.489, .887, -.887, -.157, .674],
[-.489, .674, -.489, 1.150, 1.534, -.489],
[.887, 0.000, -.674, .319, 0.000, -1.534],
[1.150, -.319, .489, .674, .157, 1.150],
[.157, -1.534, -.887, -.674, -.319, .157],
[-1.150, -.674, -.157, .157, -1.534, -.157],
[0.000, -.887, .157, -.319, -.674, .887],
[.319, -.157, .674, .887, .574, 1.534],
[-.319, .157, -.319, -1.150, 1.150, -.887],
[-1.534, .887, 1.150, 1.534, -.489, -1.150],
[-.157, -1.150, 1.534, -.157, -1.150, -.674],
[.489, .489, -1.150, .489, -.887, 0.000],
[.674, .319, .319, 0.000, .887, .319],
[-.674, 1.150, 0.000, -.489, .319, .489]])
# Example target correlation matrix.
self.c_star = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, .75, -.70],
[0, 0, 0, .75, 1, -.95],
[0, 0, 0, -.70, -.95, 1]])
# Result sample columns arranged to given correlation amtrix.
self.res = np.array([[1.534, 1.534, -1.534, -1.534, -0.887, 0.489],
[-0.887, -0.489, 0.887, -0.887, -0.674, 1.15],
[-0.489, 0.674, -0.489, 1.15, 1.534, -1.534],
[0.887, 0., -0.674, 0.319, 0.319, -0.887],
[1.15, -0.319, 0.489, 0.674, 0.574, -0.319],
[0.157, -1.534, -0.887, -0.674, -0.489, 0.674],
[-1.15, -0.674, -0.157, 0.157, -1.534, 0.887],
[0., -0.887, 0.157, -0.319, -0.319, 1.534],
[0.319, -0.157, 0.674, 0.887, 1.15, -0.674],
[-0.319, 0.157, -0.319, -1.15, 0.157, -0.157],
[-1.534, 0.887, 1.15, 1.534, 0.887, -1.15],
[-0.157, -1.15, 1.534, -0.157, -1.15, 0.319],
[0.489, 0.489, -1.15, 0.489, -0.157, 0.],
[0.674, 0.319, 0.319, 0., 0.489, -0.489],
[-0.674, 1.15, 0., -0.489, 0., 0.157]])
def tearDown(self):
pass
def test_conover(self):
r_cor = ranktransform.transform_by_corrmat(self.r, self.c_star)
compare = r_cor == self.res
self.assertEqual(compare.all(), True)
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(RankTransformTest))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| gpl-3.0 | -1,744,700,304,624,888,300 | 42.980769 | 79 | 0.49366 | false | 3.165398 | true | false | false |
T2DREAM/t2dream-portal | src/encoded/upgrade/file.py | 1 | 22524 | from snovault import upgrade_step
from pyramid.traversal import find_root
from datetime import datetime, time
@upgrade_step('file', '', '2')
def file_0_2(value, system):
# http://redmine.encodedcc.org/issues/1295
# http://redmine.encodedcc.org/issues/1307
if 'status' in value:
value['status'] = value['status'].lower()
@upgrade_step('file', '2', '3')
def file_2_3(value, system):
# http://redmine.encodedcc.org/issues/1572
file_format = value.get('file_format')
file_name = value['download_path'].rsplit('/', 1)[-1]
file_ext = file_name[file_name.find('.'):]
# REJECTIONS
if file_ext in ['.gtf.bigBed', '.pdf', '.pdf.gz', '.gff.bigBed', '.spikeins']:
value['status'] = 'deleted'
# Find the miscatorgorized bedMethyls
if file_ext == '.bed.bigBed' and 'MethylRrbs' in value.get('submitted_file_name'):
value['file_format'] = 'bedMethyl'
if file_ext == '.bed.gz' and 'MethylRrbs' in value.get('submitted_file_name'):
value['file_format'] = 'bed_bedMethyl'
unknownDict = {'.CEL.gz': 'CEL',
'.bb': 'bedMethyl',
'.bed': 'bed',
'.bed.gz': 'bed',
'.bed.bigBed': 'bigBed',
'.bigBed': 'bigBed',
'.bed9': 'bedMethyl',
'.bed9.gz': 'bed_bedMethyl',
'.bedCluster.bigBed': 'bigBed',
'.bedLogR.bigBed': 'bedLogR',
'.bedRnaElements.bigBed': 'bedRnaElements',
'.bedRrbs.bigBed': 'bedMethyl',
'.broadPeak.gz': 'bed_broadPeak',
'.bigBed': 'bigBed',
'.hic': 'hic',
'.bedgraph': 'bedgraph',
'.csfasta.gz': 'csfasta',
'.csqual.gz': 'csqual',
'.fasta.gz': 'fasta',
'.gff.bigBed': 'bigBed',
'.gff.gz': 'gtf',
'.gp.bigBed': 'bigBed',
'.matrix.gz': 'tsv',
'.matrix.tgz': 'tar',
'.narrowPeak': 'bed_narrowPeak',
'.narrowPeak.gz': 'bed_narrowPeak',
'.pdf': 'tsv', # These are going to be obsolete
'.pdf.gz': 'tsv', # These are going to be obsolete
'.peaks.gz': 'tsv',
'.peptideMapping.bigBed': 'bigBed',
'.shortFrags.bigBed': 'bigBed',
'.sorted.bigBed': 'bigBed',
'.tab.gz': 'tsv',
'.tgz': 'tar',
'.txt': 'tsv',
'.xlsx': 'tsv', # These need to be converted to tsv
}
if file_format in ['unknown', 'customTrack']:
value['file_format'] = unknownDict[file_ext]
# http://redmine.encodedcc.org/issues/1429
context = system['context']
root = find_root(context)
dataset = root.get_by_uuid(value['dataset']).upgrade_properties()
dataset_status = dataset.get('status')
status = value.get('status')
if status == 'current':
if dataset_status == 'released':
value['status'] = 'released'
else:
value['status'] = 'in progress'
if status == 'obsolete':
if dataset_status in ['released', 'revoked']:
value['status'] = 'revoked'
else:
value['status'] = 'deleted'
# http://redmine.encodedcc.org/issues/568
output_type_dict = {
'': 'raw data',
'Alignments': 'alignments',
'bigBed': 'sites',
'bigWig': 'sites',
'bedgraph': 'sites',
'hic': 'sites',
'Clusters': 'clusters',
'Contigs': 'contigs',
'FastqRd1': 'reads',
'FastqRd2': 'reads',
'forebrain_enhancers': 'enhancers_forebrain',
'heart_enhancers': 'enhancers_heart',
'GreenIdat': 'idat green file',
'hotspot_broad_peaks': 'hotspots',
'hotspot_narrow_peaks': 'hotspots',
'hotspot_signal': 'hotspots',
'Hotspots': 'hotspots',
'Interactions': 'interactions',
'MinusRawSignal': 'raw minus signal',
'PlusRawSignal': 'raw plus signal',
'macs2_dnase_peaks': 'peaks',
'macs2_dnase_signal': 'signal',
'MinusSignal': 'minus signal',
'minusSignal': 'minus signal',
'MultiMinus': 'multi-read minus signal',
'MultiPlus': 'multi-read plus signal',
'MultiSignal': 'multi-read signal',
'MultiUnstranded': 'multi-read signal',
'RawData2': 'reads',
'RedIdat': 'idat red file',
'peak': 'peaks',
'PeakCalls': 'peaks',
'Peaks': 'peaks',
'PlusSignal': 'plus signal',
'plusSignal': 'plus signal',
'predicted_enhancers_heart': 'enhancers_heart',
'RawSignal': 'raw signal',
'RawData': 'raw data',
'rcc': 'raw data',
'Read': 'reads',
'read': 'reads',
'read1': 'reads',
'rejected_reads': 'rejected reads',
'RepPeaks': 'peaks',
'RepSignal': 'signal',
'Signal': 'signal',
'SimpleSignal': 'signal',
'Sites': 'sites',
'Spikeins': 'spike-ins',
'Spikes': 'spike-ins',
'Splices': 'splice junctions',
'uniqueReads': 'unique signal',
'UniqueSignal': 'unique signal',
'uniqueSignal': 'unique signal',
'UniqueMinus': 'unique minus signal',
'uniqueMinusSignal': 'unique minus signal',
'UniquePlus': 'unique plus signal',
'uniquePlusSignal': 'unique plus signal',
'UniqueUnstranded': 'unique signal',
'UnstrandedSignal': 'signal',
'dataset_used': 'enhancers',
'TRAINING_DATA_MOUSE_VISTA': 'enhancers',
'method_description': 'enhancers',
'unknown': 'enhancers',
'Protocol': 'raw data',
}
current_output_type = value['output_type']
if current_output_type in output_type_dict:
value['output_type'] = output_type_dict[current_output_type]
# Help the raw data problem
if value['output_type'] == 'raw data' and value['file_format'] == 'fastq':
value['output_type'] = 'reads'
@upgrade_step('file', '3', '4')
def file_3_4(value, system):
# http://redmine.encodedcc.org/issues/1714
context = system['context']
root = find_root(context)
dataset = root.get_by_uuid(value['dataset']).upgrade_properties()
if 'download_path' in value:
value.pop('download_path')
value['lab'] = dataset['lab']
value['award'] = dataset['award']
# EDW User
if value.get('submitted_by') == '0e04cd39-006b-4b4a-afb3-b6d76c4182ff':
value['lab'] = 'fb0af3d0-3a4c-4e96-b67a-f273fe527b04'
value['award'] = '8bafd685-aa17-43fe-95aa-37bc1c90074a'
@upgrade_step('file', '4', '5')
def file_4_5(value, system):
# http://redmine.encodedcc.org/issues/2566
# http://redmine.encodedcc.org/issues/2565
# we need to remeber bedRnaElements,
bed_files = {
'bed_bedLogR': 'bedLogR',
'bed_bedMethyl': 'bedMethyl',
'bed_broadPeak': 'broadPeak',
'bed_gappedPeak': 'gappedPeak',
'bed_narrowPeak': 'narrowPeak',
'bed_bedRnaElements': 'bedRnaElements'
}
bigBed_files = [
'bedLogR',
'bedMethyl',
'broadPeak',
'narrowPeak',
'gappedPeak',
'bedRnaElements'
]
current = value['file_format']
if current in ['bed', 'bigBed']:
value['file_format_type'] = 'unknown'
# we do not know what those formats were, wranglers will need to investigate
elif current in bigBed_files:
value['file_format_type'] = current
value['file_format'] = 'bigBed'
elif current in bed_files:
value['file_format_type'] = bed_files[current]
value['file_format'] = 'bed'
elif current in ['gff']:
value['file_format_type'] = 'unknown'
# all gffs todate were in gff3, but we wouldn't know without wranglers checking
# classify the peptide stuff
if value['output_type'] in ['mPepMapGcFt', 'mPepMapGcUnFt']:
value['file_format_type'] = 'modPepMap'
elif value['output_type'] in ['pepMapGcFt', 'pepMapGcUnFt']:
value['file_format_type'] = 'pepMap'
# http://redmine.encodedcc.org/issues/2565
output_mapping = {
# Category: Raw data
'idat green file': 'idat green channel',
'idat red file': 'idat red channel',
'reads': 'reads',
'rejected reads': 'rejected reads',
'rcc': 'reporter code counts',
'CEL': 'intensity values',
'raw data': 'raw data',
'alignments': 'alignments',
'transcriptome alignments': 'transcriptome alignments',
'spike-ins': 'spike-in alignments',
'multi-read minus signal': 'minus strand signal of multi-mapped reads',
'multi-read plus signal': 'plus strand signal of multi-mapped reads',
'multi-read signal': 'signal of multi-mapped reads',
'multi-read normalized signal': 'normalized signal of multi-mapped reads',
'raw minus signal': 'raw minus strand signal',
'raw plus signal': 'raw plus strand signal',
'raw signal': 'raw signal',
'raw normalized signal': 'raw normalized signal',
'unique minus signal': 'minus strand signal of unique reads',
'unique plus signal': 'plus strand signal of unique reads',
'unique signal': 'signal of unique reads',
'signal': 'signal',
'minus signal': 'minus strand signal',
'plus signal': 'plus strand signal',
'Base_Overlap_Signal': 'base overlap signal',
'PctSignal': 'percentage normalized signal',
'SumSignal': 'summed densities signal',
'WaveSignal': 'wavelet-smoothed signal',
'signal p-value': 'signal p-value',
'fold change over control': 'fold change over control',
'gene read counts': 'gene read counts',
'enrichment': 'enrichment',
'exon quantifications': 'exon quantifications',
'ExonsDeNovo': 'exon quantifications',
'ExonsEnsV65IAcuff': 'exon quantifications',
'ExonsGencV10': 'exon quantifications',
'ExonsGencV3c': 'exon quantifications',
'ExonsGencV7': 'exon quantifications',
'GeneDeNovo': 'gene quantifications',
'GeneEnsV65IAcuff': 'gene quantifications',
'GeneGencV10': 'gene quantifications',
'GeneGencV3c': 'gene quantifications',
'GeneGencV7': 'gene quantifications',
'genome quantifications': 'gene quantifications',
'library_fraction': 'library fraction',
'transcript quantifications': 'transcript quantifications',
'TranscriptDeNovo': 'transcript quantifications',
'TranscriptEnsV65IAcuff': 'transcript quantifications',
'TranscriptGencV10': 'transcript quantifications',
'TranscriptGencV3c': 'transcript quantifications',
'TranscriptGencV7': 'transcript quantifications',
'mPepMapGcFt': 'filtered modified peptide quantification',
'mPepMapGcUnFt': 'unfiltered modified peptide quantification',
'pepMapGcFt': 'filtered peptide quantification',
'pepMapGcUnFt': 'unfiltered peptide quantification',
'clusters': 'clusters',
'CNV': 'copy number variation',
'contigs': 'contigs',
'enhancer validation': 'enhancer validation',
'FiltTransfrags': 'filtered transcribed fragments',
'hotspots': 'hotspots',
'Junctions': 'splice junctions',
'interactions': 'long range chromatin interactions',
'Matrix': 'long range chromatin interactions',
'PrimerPeaks': 'long range chromatin interactions',
'sites': 'methylation state at CpG',
'methyl CG': 'methylation state at CpG',
'methyl CHG': 'methylation state at CHG',
'methyl CHH': 'methylation state at CHH',
'peaks': 'peaks',
'replicated peaks': 'replicated peaks',
'RbpAssocRna': 'RNA-binding protein associated mRNAs',
'splice junctions': 'splice junctions',
'Transfrags': 'transcribed fragments',
'TssGencV3c': 'transcription start sites',
'TssGencV7': 'transcription start sites',
'Valleys': 'valleys',
'Alignability': 'sequence alignability',
'Excludable': 'blacklisted regions',
'Uniqueness': 'sequence uniqueness',
'genome index': 'genome index',
'genome reference': 'genome reference',
'Primer': 'primer sequence',
'spike-in sequence': 'spike-in sequence',
'reference': 'reference',
'enhancers': 'predicted enhancers',
'enhancers_forebrain': 'predicted forebrain enhancers',
'enhancers_heart': 'predicted heart enhancers',
'enhancers_wholebrain': 'predicted whole brain enhancers',
'TssHmm': 'predicted transcription start sites',
'UniformlyProcessedPeakCalls': 'optimal idr thresholded peaks',
'Validation': 'validation',
'HMM': 'HMM predicted chromatin state'
}
old_output_type = value['output_type']
# The peptide mapping files from UCSC all assumed V10 hg19
if old_output_type in ['mPepMapGcFt', 'mPepMapGcUnFt', 'pepMapGcFt', 'pepMapGcUnFt']:
value['genome_annotation'] = 'V10'
value['assembly'] = 'hg19'
elif old_output_type in ['ExonsEnsV65IAcuff', 'GeneEnsV65IAcuff', 'TranscriptEnsV65IAcuff']:
value['genome_annotation'] = 'ENSEMBL V65'
elif old_output_type in ['ExonsGencV3c', 'GeneGencV3c', 'TranscriptGencV3c', 'TssGencV3c']:
value['genome_annotation'] = 'V3c'
elif old_output_type in ['ExonsGencV7', 'GeneGenc7', 'TranscriptGencV7', 'TssGencV7']:
value['genome_annotation'] = 'V7'
elif old_output_type in ['ExonsGencV10', 'GeneGenc10', 'TranscriptGencV10', 'TssGencV10']:
value['genome_annotation'] = 'V10'
elif old_output_type in ['spike-ins'] and value['file_format'] == 'fasta':
old_output_type = 'spike-in sequence'
elif old_output_type in ['raw data'] and value['file_format'] in ['fastq', 'csfasta', 'csqual', 'fasta']:
old_output_type = 'reads'
elif old_output_type in ['raw data'] and value['file_format'] in ['CEL', 'tar']:
old_output_type = 'CEL'
elif old_output_type in ['raw data'] and value['file_format'] in ['rcc']:
old_output_type = 'rcc'
elif old_output_type in ['raw data'] and value['lab'] == '/labs/timothy-hubbard/':
old_output_type = 'reference'
elif old_output_type in ['raw data']:
if 'These are protocol documents' in value.get('notes', ''):
old_output_type = 'reference'
elif old_output_type == 'sites' and value['file_format'] == 'tsv':
old_output_type = 'interactions'
elif old_output_type in ['Validation'] and value['file_format'] == '2bit':
old_output_type = 'genome reference'
value['output_type'] = output_mapping[old_output_type]
# label the lost bedRnaElements files #2940
bedRnaElements_files = [
'transcript quantifications',
'gene quantifications',
'exon quantifications'
]
if (
value['output_type'] in bedRnaElements_files
and value['status'] in ['deleted', 'replaced']
and value['file_format'] == 'bigBed'
and value['file_format_type'] == 'unknown'
):
value['file_format_type'] = 'bedRnaElements'
# Get the replicate information
if value.get('file_format') in ['fastq', 'fasta', 'csfasta']:
context = system['context']
root = find_root(context)
if 'replicate' in value:
replicate = root.get_by_uuid(value['replicate']).upgrade_properties()
if 'read_length' not in value:
value['read_length'] = replicate.get('read_length')
if value['read_length'] is None:
del value['read_length']
run_type_dict = {
True: 'paired-ended',
False: 'single-ended',
None: 'unknown'
}
if 'run_type' not in value:
value['run_type'] = run_type_dict[replicate.get('paired_ended')]
if value.get('paired_end') in ['2']:
value['run_type'] = 'paired-ended'
# Backfill content_md5sum #2683
if 'content_md5sum' not in value:
md5sum_content_md5sum = system['registry'].get('backfill_2683', {})
if value['md5sum'] in md5sum_content_md5sum:
value['content_md5sum'] = md5sum_content_md5sum[value['md5sum']]
@upgrade_step('file', '5', '6')
def file_5_6(value, system):
# http://redmine.encodedcc.org/issues/3019
import re
if value.get('output_type') in [
'minus strand signal of multi-mapped reads',
'plus strand signal of multi-mapped reads',
'signal of multi-mapped reads',
'normalized signal of multi-mapped reads'
]:
value['output_type'] = re.sub('multi-mapped', 'all', value['output_type'])
@upgrade_step('file', '6', '7')
def file_6_7(value, system):
# http://redmine.encodedcc.org/issues/3063
if 'file_format_specifications' in value:
value['file_format_specifications'] = list(set(value['file_format_specifications']))
if 'controlled_by' in value:
value['controlled_by'] = list(set(value['controlled_by']))
if 'derived_from' in value:
value['derived_from'] = list(set(value['derived_from']))
if 'supercedes' in value:
value['supercedes'] = list(set(value['supersedes']))
if 'aliases' in value:
value['aliases'] = list(set(value['aliases']))
@upgrade_step('file', '7', '8')
def file_7_8(value, system):
return
@upgrade_step('file', '8', '9')
def file_8_9(value, system):
# http://redmine.encodedcc.org/issues/4183
if (value['file_format'] == 'fastq') and ('assembly' in value):
value.pop('assembly')
# http://redmine.encodedcc.org/issues/1859
if 'supercedes' in value:
value['supersedes'] = value['supercedes']
value.pop('supercedes', None)
def set_to_midnight(date_string):
release_date = datetime.strptime(date_string, '%Y-%m-%d')
min_pub_date_time = datetime.combine(release_date, time.min)
return '{:%Y-%m-%dT%H:%M:%S.%f+00:00}'.format(min_pub_date_time)
@upgrade_step('file', '10', '11')
def file_10_11(value, system):
# http://redmine.encodedcc.org/issues/5021
# http://redmine.encodedcc.org/issues/4929
# http://redmine.encodedcc.org/issues/4927
# http://redmine.encodedcc.org/issues/4903
# http://redmine.encodedcc.org/issues/4904
date_created = value.get('date_created')
if date_created.find('T') == -1:
value['date_created'] = set_to_midnight(date_created)
# http://redmine.encodedcc.org/issues/4748
aliases = []
if 'aliases' in value and value['aliases']:
aliases = value['aliases']
else:
return
aliases_to_remove = []
for i in range(0, len(aliases)):
new_alias = ''
if 'roadmap-epigenomics' in aliases[i]:
if '||' in aliases[i]:
scrub_parts = aliases[i].split('||')
date_split = scrub_parts[1].split(' ')
date = "-".join([date_split[1].strip(),
date_split[2].strip(),
date_split[5].strip()])
scrubbed_list = [scrub_parts[0].strip(), date.strip(), scrub_parts[2].strip()]
if len(scrub_parts) == 4:
scrubbed_list.append(scrub_parts[3].strip())
new_alias = '_'.join(scrubbed_list)
parts = aliases[i].split(':') if not new_alias else new_alias.split(':')
namespace = parts[0]
if namespace in ['ucsc_encode_db', 'UCSC_encode_db', 'versionof']:
# Remove the alias with the bad namespace
aliases_to_remove.append(aliases[i])
namespace = 'encode'
if namespace in ['CGC']:
namespace = namespace.lower()
rest = '_'.join(parts[1:]).strip()
# Remove or substitute bad characters and multiple whitespaces
import re
if '"' or '#' or '@' or '!' or '$' or '^' or '&' or '|' or '~' or ';' or '`' in rest:
rest = re.sub(r'[\"#@!$^&|~;`\/\\]', '', rest)
rest = ' '.join(rest.split())
if '%' in rest:
rest = re.sub(r'%', 'pct', rest)
if '[' or '{' in rest:
rest = re.sub('[\[{]', '(', rest)
if ']' or '}' in rest:
rest = re.sub('[\]}]', ')', rest)
new_alias = ':'.join([namespace, rest])
if new_alias not in aliases:
aliases[i] = new_alias
if aliases_to_remove and aliases:
for a in aliases_to_remove:
if a in aliases:
aliases.remove(a)
@upgrade_step('file', '10', '11')
def file_10_11(value, system):
# http://redmine.encodedcc.org/issues/5081
# http://redmine.encodedcc.org/issues/5049
# http://redmine.encodedcc.org/issues/4924
if not value.get('no_file_available'):
value['no_file_available'] = False
@upgrade_step('file', '11', '12')
def file_11_12(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-3347
return
| mit | -7,386,508,491,949,481,000 | 38.93617 | 109 | 0.544752 | false | 3.615409 | false | false | false |
tensorflow/ranking | tensorflow_ranking/python/keras/canned/dnn.py | 1 | 5354 | # Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""DNN Ranking network in Keras."""
import tensorflow.compat.v2 as tf
from tensorflow_ranking.python.keras import network as network_lib
class DNNRankingNetwork(network_lib.UnivariateRankingNetwork):
"""Deep Neural Network (DNN) scoring based univariate ranking network."""
def __init__(self,
context_feature_columns=None,
example_feature_columns=None,
hidden_layer_dims=None,
activation=None,
use_batch_norm=True,
batch_norm_moment=0.999,
dropout=0.5,
name='dnn_ranking_network',
**kwargs):
"""Initializes an instance of DNN ranking network.
This network consists of feedforward linear units passed through a
non-linear
activation. The hidden size of the linear units and the activation are
specified by the user.
Args:
context_feature_columns: A dict containing all the context feature columns
used by the network. Keys are feature names, and values are instances of
classes derived from `_FeatureColumn`.
example_feature_columns: A dict containing all the example feature columns
used by the network. Keys are feature names, and values are instances of
classes derived from `_FeatureColumn`.
hidden_layer_dims: Iterable of number hidden units per layer. All layers
are fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
activation: Activation function applied to each layer. If `None`, will use
an identity activation, which is default behavior in Keras activations.
use_batch_norm: Whether to use batch normalization after each hidden
layer.
batch_norm_moment: Momentum for the moving average in batch normalization.
dropout: When not `None`, the probability we will drop out a given
coordinate.
name: name of Keras network.
**kwargs: keyword arguments.
Raises:
`ValueError` if `example_feature_columns` or `hidden_layer_dims` is empty.
"""
if not example_feature_columns or not hidden_layer_dims:
raise ValueError('example_feature_columns or hidden_layer_dims must not '
'be empty.')
super(DNNRankingNetwork, self).__init__(
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns,
name=name,
**kwargs)
self._hidden_layer_dims = [int(d) for d in hidden_layer_dims]
self._activation = activation
self._use_batch_norm = use_batch_norm
self._batch_norm_moment = batch_norm_moment
self._dropout = dropout
layers = []
if self._use_batch_norm:
layers.append(
tf.keras.layers.BatchNormalization(momentum=self._batch_norm_moment))
for _, layer_width in enumerate(self._hidden_layer_dims):
layers.append(tf.keras.layers.Dense(units=layer_width))
if self._use_batch_norm:
layers.append(
tf.keras.layers.BatchNormalization(
momentum=self._batch_norm_moment))
layers.append(tf.keras.layers.Activation(activation=self._activation))
layers.append(tf.keras.layers.Dropout(rate=self._dropout))
self._scoring_layers = layers
self._output_score_layer = tf.keras.layers.Dense(units=1)
def score(self, context_features=None, example_features=None, training=True):
"""Univariate scoring of context and one example to generate a score.
Args:
context_features: (dict) context feature names to 2D tensors of shape
[batch_size, ...].
example_features: (dict) example feature names to 2D tensors of shape
[batch_size, ...].
training: (bool) whether in training or inference mode.
Returns:
(tf.Tensor) A score tensor of shape [batch_size, 1].
"""
context_input = [
tf.keras.layers.Flatten()(context_features[name])
for name in sorted(self.context_feature_columns)
]
example_input = [
tf.keras.layers.Flatten()(example_features[name])
for name in sorted(self.example_feature_columns)
]
inputs = tf.concat(context_input + example_input, 1)
outputs = inputs
for layer in self._scoring_layers:
outputs = layer(outputs, training=training)
return self._output_score_layer(outputs, training=training)
def get_config(self):
config = super(DNNRankingNetwork, self).get_config()
config.update({
'hidden_layer_dims': self._hidden_layer_dims,
'activation': self._activation,
'use_batch_norm': self._use_batch_norm,
'batch_norm_moment': self._batch_norm_moment,
'dropout': self._dropout,
})
return config
| apache-2.0 | -1,788,232,522,490,547,200 | 38.080292 | 80 | 0.675196 | false | 4.150388 | false | false | false |
berjc/aus-senate-audit | aus_senate_audit/audits/bayesian_audit.py | 1 | 5902 | # -*- coding: utf-8 -*-
""" Implements the Bayesian Audit. """
from collections import Counter
from itertools import chain
from random import gammavariate
from random import seed as set_seed
from time import time
def get_new_ballot_weights(election, r):
""" Returns new ballot weights for the given election.
The new ballot weights are constructed using Gamma Variates to draw from a Dirichlet distribution over existing
ballots, based on existing ballot weights. The sum of the new ballot weights should be equal to :param:`r`
(approximately). Note that ballot weights are rounded down.
:param :class:`BaseSenateElection` election: The senate election to generate new ballot weights for.
:param int r: The sum of the new ballot weights.
:returns: The new ballot weights generated using Gamma Variates.
:rtype: dict
"""
new_ballot_weights = {}
total = 0
for ballot in election.get_ballots():
weight = election.get_ballot_weight(ballot)
new_ballot_weights[ballot] = gammavariate(weight, 1) if weight else 0
total += new_ballot_weights[ballot]
for ballot in election.get_ballots():
new_ballot_weights[ballot] = int(r * new_ballot_weights[ballot] / total)
return new_ballot_weights
def audit(election, seed, unpopular_freq_threshold, stage_counter=0, alpha=0.05, trials=100, quick=False):
""" Runs a Bayesian audit on the given senate election.
:param :class:`BaseSenateElection` election: The senate election to audit.
:param int seed: The seed for the random number generator.
:param float unpopular_freq_threshold: The upper bound on the frequency of trials a candidate is elected in order
for the candidate to be deemed unpopular.
:param int stage_counter: The current audit stage (default: 0).
:param float alpha: The error tolerance for the given audit (default: 0.05).
:param int trials: The number of trials performed per sample (default: 100).
:param bool quick: A boolean indicating whether the audit should run to completion (True) or only run one stage
(False) (default: False).
"""
print(
'Audit of {} election.\n'.format(election.get_type()),
' Election ID: {}\n'.format(election.get_election_id()),
' Candidates: {}\n'.format(election.get_candidates()),
' Number of ballots cast: {}\n'.format(election.get_num_cast_ballots()),
' Number of seats being contested: {}\n'.format(election.get_num_seats()),
' Number of trials per sample: {}\n'.format(trials),
' Random number seed: {}'.format(seed),
)
start_time = time()
set_seed(seed)
# Cast one "prior" ballot for each candidate to establish a Bayesian prior. The prior ballot is a length-one partial
# ballot with just a first choice vote for that candidate.
for cid in election.get_candidate_ids():
election.add_ballot((cid,), 1)
# Mapping from candidates to the set of ballots that elected them.
candidate_to_ballots_map = {}
candidate_outcomes = None
done = False
while True:
stage_counter += 1
election.draw_ballots() # Increase sample of cast ballots.
print(
'\nAudit stage number: {}\n'.format(stage_counter),
' Sample size (including prior ballots): {}\n'.format(election.get_num_ballots_drawn()),
)
# -- Run trials in a Bayesian manner --
# Each outcome is a tuple of candidates who have been elected in lexicographical order (NOT the order in which
# they were elected).
print(' Performing {} Bayesian trials (posterior-based election simulations) in this stage.'.format(trials))
outcomes = []
for _ in range(trials):
new_ballot_weights = get_new_ballot_weights(election, election.get_num_cast_ballots())
outcome = election.get_outcome(new_ballot_weights)
for cid in outcome:
if cid not in candidate_to_ballots_map:
candidate_to_ballots_map[cid] = new_ballot_weights
outcomes.append(outcome)
best, freq = Counter(outcomes).most_common(1)[0]
print(
' Most common outcome ({} seats):\n'.format(election.get_num_seats()),
' {}\n'.format(best),
' Frequency of most common outcome: {} / {}'.format(freq, trials),
)
candidate_outcomes = Counter(chain(*outcomes))
print(
' Fraction present in outcome by candidate:\n {}'.format(
', '.join([
'{}: {}'.format(str(cid), cid_freq / trials)
for cid, cid_freq in sorted(candidate_outcomes.items(), key=lambda x: (x[1], x[0]))
]),
),
)
if freq >= trials * (1 - alpha):
print(
'Stopping because audit confirmed outcome:\n',
' {}\n'.format(best),
'Total number of ballots examined: {}'.format(election.get_num_ballots_drawn()),
)
done = True
break
if election.get_num_ballots_drawn() >= election.get_num_cast_ballots():
print('Audit has looked at all ballots. Done.')
done = True
break
if not quick:
break
if candidate_outcomes is not None and done:
for cid, cid_freq in sorted(candidate_outcomes.items(), key=lambda x: (x[1], x[0])):
if cid_freq / trials < unpopular_freq_threshold:
print(
' One set of ballots that elected low frequency '
'candidate {} which occurred in {}% of outcomes\n'.format(str(cid), str(cid_freq)),
' {}'.format(candidate_to_ballots_map[cid]),
)
print('Elapsed time: {} seconds.'.format(time() - start_time))
return done
| apache-2.0 | 373,211,840,810,849,340 | 42.080292 | 120 | 0.619282 | false | 3.877792 | false | false | false |
zenofewords/zenofewords | tests/utils.py | 1 | 1705 | from copy import copy
from django.conf import settings
from django.urls import reverse
def get_permissions(response_mapping, custom_mapping):
"""
Build permission mappings.
:param response_mapping: usually a predefined permission template (FORBIDDEN, NOT_FOUND, etc.)
:type response_mapping: dict
:param custom_mapping: key/value pairs which need to be customised
:type custom_mapping: dict
:returns: a new response method and status code mapping
:rtype: dict
"""
response_mapping = copy(response_mapping)
response_mapping.update(custom_mapping)
return response_mapping
def assert_permissions(client_type, response_code_mapping, client_mapping, url_reverse):
"""
Test URL response depending on client type.
:param client_type: type of client (anonymous, user, admin, etc.)
:type client_type: string
:param response_code_mapping: request type with a matching response code
:type response_code_mapping: dict
:param client_mapping: a fixture that contains client types
:type client_mapping: dict
:param url_reverse: tuple of reverse strings for URLs which receive requests
:type url_reverse: tuple
"""
for method in response_code_mapping.keys():
for url in url_reverse:
response_code = getattr(
client_mapping[client_type], method
)(reverse(url), secure=not settings.DEBUG).status_code
assert response_code == response_code_mapping[method], print(
'client: {}, method: {}, received: {}, expected: {}'.format(
client_type, method, response_code, response_code_mapping[method]
)
)
| mit | -4,056,573,595,075,421,000 | 35.276596 | 98 | 0.674487 | false | 4.383033 | false | false | false |
lmazuel/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/application_health_state_filter.py | 1 | 6307 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationHealthStateFilter(Model):
"""Defines matching criteria to determine whether a application should be
included in the cluster health chunk.
One filter can match zero, one or multiple applications, depending on its
properties.
.
:param application_name_filter: The name of the application that matches
the filter, as a fabric uri. The filter is applied only to the specified
application, if it exists.
If the application doesn't exist, no application is returned in the
cluster health chunk based on this filter.
If the application exists, it is included in the cluster health chunk if
it respects the other filter properties.
If not specified, all applications are matched against the other filter
members, like health state filter.
:type application_name_filter: str
:param application_type_name_filter: The name of the application type that
matches the filter.
If specified, the filter is applied only to applications of the selected
application type, if any exists.
If no applications of the specified application type exists, no
application is returned in the cluster health chunk based on this filter.
Each application of the specified application type is included in the
cluster health chunk if it respects the other filter properties.
If not specified, all applications are matched against the other filter
members, like health state filter.
:type application_type_name_filter: str
:param health_state_filter: The filter for the health state of the
applications. It allows selecting applications if they match the desired
health states.
The possible values are integer value of one of the following health
states. Only applications that match the filter are returned. All
applications are used to evaluate the cluster aggregated health state.
If not specified, default value is None, unless the application name or
the application type name are specified. If the filter has default value
and application name is specified, the matching application is returned.
The state values are flag based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6, it matches applications with
HealthState value of OK (2) and Warning (4).
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in order to
return no results on a given collection of states. The value is 1.
- Ok - Filter that matches input with HealthState value Ok. The value is
2.
- Warning - Filter that matches input with HealthState value Warning. The
value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The value is
65535.
. Default value: 0 .
:type health_state_filter: int
:param service_filters: Defines a list of filters that specify which
services to be included in the returned cluster health chunk as children
of the application. The services are returned only if the parent
application matches a filter.
If the list is empty, no services are returned. All the services are used
to evaluate the parent application aggregated health state, regardless of
the input filters.
The application filter may specify multiple service filters.
For example, it can specify a filter to return all services with health
state Error and another filter to always include a service identified by
its service name.
:type service_filters:
list[~azure.servicefabric.models.ServiceHealthStateFilter]
:param deployed_application_filters: Defines a list of filters that
specify which deployed applications to be included in the returned cluster
health chunk as children of the application. The deployed applications are
returned only if the parent application matches a filter.
If the list is empty, no deployed applications are returned. All the
deployed applications are used to evaluate the parent application
aggregated health state, regardless of the input filters.
The application filter may specify multiple deployed application filters.
For example, it can specify a filter to return all deployed applications
with health state Error and another filter to always include a deployed
application on a specified node.
:type deployed_application_filters:
list[~azure.servicefabric.models.DeployedApplicationHealthStateFilter]
"""
_attribute_map = {
'application_name_filter': {'key': 'ApplicationNameFilter', 'type': 'str'},
'application_type_name_filter': {'key': 'ApplicationTypeNameFilter', 'type': 'str'},
'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'},
'service_filters': {'key': 'ServiceFilters', 'type': '[ServiceHealthStateFilter]'},
'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'},
}
def __init__(self, application_name_filter=None, application_type_name_filter=None, health_state_filter=0, service_filters=None, deployed_application_filters=None):
super(ApplicationHealthStateFilter, self).__init__()
self.application_name_filter = application_name_filter
self.application_type_name_filter = application_type_name_filter
self.health_state_filter = health_state_filter
self.service_filters = service_filters
self.deployed_application_filters = deployed_application_filters
| mit | -3,039,962,441,478,437,400 | 55.81982 | 168 | 0.721896 | false | 4.935055 | false | false | false |
guillaume-philippon/aquilon | lib/aquilon/worker/formats/chassis.py | 1 | 1715 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chassis formatter."""
from aquilon.aqdb.model import Chassis
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.hardware_entity import HardwareEntityFormatter
class ChassisFormatter(HardwareEntityFormatter):
def format_raw(self, chassis, indent="", embedded=True,
indirect_attrs=True):
details = [super(ChassisFormatter, self).format_raw(chassis, indent)]
for slot in chassis.slots:
if slot.machine:
if slot.machine.primary_name:
hostname = slot.machine.primary_name
else:
hostname = "no hostname"
details.append(indent + " Slot #%d: %s (%s)" %
(slot.slot_number, slot.machine.label, hostname))
else:
details.append(indent + " Slot #%d: Empty" % slot.slot_number)
return "\n".join(details)
ObjectFormatter.handlers[Chassis] = ChassisFormatter()
| apache-2.0 | 9,082,157,566,535,196,000 | 38.883721 | 80 | 0.668222 | false | 3.979118 | false | false | false |
mlperf/training_results_v0.5 | v0.5.0/nvidia/submission/code/rnn_translator/pytorch/seq2seq/data/sampler.py | 1 | 5047 | import logging
import torch
from torch.utils.data.sampler import Sampler
from mlperf_compliance import mlperf_log
from seq2seq.utils import gnmt_print
from seq2seq.utils import get_world_size, get_rank
class BucketingSampler(Sampler):
"""
Distributed data sampler supporting bucketing by sequence length.
"""
def __init__(self, dataset, batch_size, seeds, bucketing=True,
world_size=None, rank=None):
"""
Constructor for the BucketingSampler.
:param dataset: dataset
:param batch_size: batch size
:param bucketing: if True enables bucketing by sequence length
:param world_size: number of processes participating in distributed
training
:param rank: rank of the current process within world_size
"""
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.dataset = dataset
self.world_size = world_size
self.rank = rank
self.epoch = 0
self.bucketing = bucketing
self.seeds = seeds
self.batch_size = batch_size
self.global_batch_size = batch_size * world_size
self.data_len = len(self.dataset)
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def __iter__(self):
gnmt_print(key=mlperf_log.INPUT_ORDER)
# deterministically shuffle based on epoch
g = torch.Generator()
seed = self.seeds[self.epoch]
logging.info(f'Sampler for epoch {self.epoch} uses seed {seed}')
g.manual_seed(seed)
# generate permutation
indices = torch.randperm(self.data_len, generator=g)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# splits the dataset into chunks of 'batches_in_shard' global batches
# each, sorts by (src + tgt) sequence length within each chunk,
# reshuffles all global batches
if self.bucketing:
batches_in_shard = 80
shard_size = self.global_batch_size * batches_in_shard
gnmt_print(key=mlperf_log.INPUT_SHARD, value=shard_size)
nshards = (self.num_samples + shard_size - 1) // shard_size
lengths = self.dataset.lengths[indices]
shards = [indices[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
len_shards = [lengths[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
indices = []
for len_shard in len_shards:
_, ind = len_shard.sort()
indices.append(ind)
output = tuple(shard[idx] for shard, idx in zip(shards, indices))
indices = torch.cat(output)
# global reshuffle
indices = indices.view(-1, self.global_batch_size)
order = torch.randperm(indices.shape[0], generator=g)
indices = indices[order, :]
indices = indices.view(-1)
assert len(indices) == self.num_samples
# build indices for each individual worker
# consecutive ranks are getting consecutive batches,
# default pytorch DistributedSampler assigns strided batches
# with offset = length / world_size
indices = indices.view(-1, self.batch_size)
indices = indices[self.rank::self.world_size].contiguous()
indices = indices.view(-1)
indices = indices.tolist()
assert len(indices) == self.num_samples // self.world_size
return iter(indices)
def __len__(self):
return self.num_samples // self.world_size
def set_epoch(self, epoch):
"""
Sets current epoch index. This value is used to seed RNGs in __iter__()
function.
:param epoch: index of current epoch
"""
self.epoch = epoch
class StaticDistributedSampler(Sampler):
def __init__(self, dataset, batch_size, pad, world_size=None, rank=None):
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.world_size = world_size
global_batch_size = batch_size * world_size
data_len = len(dataset)
num_samples = (data_len + global_batch_size - 1) \
// global_batch_size * global_batch_size
self.num_samples = num_samples
indices = list(range(data_len))
if pad:
indices += [0] * (num_samples - len(indices))
else:
indices += [-1] * (num_samples - len(indices))
indices = torch.tensor(indices)
indices = indices.view(-1, batch_size)
indices = indices[rank::world_size].contiguous()
indices = indices.view(-1)
indices = indices[indices != -1]
indices = indices.tolist()
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
| apache-2.0 | -4,972,239,737,936,623,000 | 32.646667 | 93 | 0.597781 | false | 4.011924 | false | false | false |
smenon8/AlgDataStruct_practice | practice_problems/MorePractice.py | 1 | 2289 | # Divide and Conquer Algorithm for finding the maximum sub array sum
def maxSubArraySum(arr,h,t):
if h == t:
return arr[h]
m = (h+t)//2
# 1. find max in left subarray
leftSum = maxSubArraySum(arr,h,m)
# 2. find max in right subarray
rightSum = maxSubArraySum(arr,m+1,t)
# 3. find max in mid-point crossing
midPointSum = midPointCrossSum(arr,h,m,t)
return max(leftSum,rightSum,midPointSum)
def midPointCrossSum(arr,h,m,t):
# Adding the left sub-array from the mid-point to head till the sum is non-decreasing
sum = 0
leftSum = arr[m]
for i in range(m-1,h-1,-1):
sum += arr[i]
if sum > leftSum:
leftSum = sum
# Adding the right sub-array from the mid-point to tail till the sum is non-decreasing
sum = 0
rightSum = arr[m+1]
for i in range(m+2,t+1):
sum += arr[i]
if sum > rightSum:
rightSum = sum
return leftSum+rightSum
arr = [-2,-5,6,-2,-3,1,5,-6]
print("Maximum Sub Array Sum")
print(maxSubArraySum(arr,0,len(arr)-1))
print()
# Similar problem: Given a sum find the pair of numbers which add upto the sum
def twoSumProblemSort(arr,n):
arr.sort()
head = 0
tail = len(arr)-1
print(arr)
while head <= tail:
s = arr[head] + arr[tail]
if s == n:
return arr[head],arr[tail]
elif s < n:
head += 1
else:
tail -= 1
return False
arr = [6,8,2,3,10,11]
print("Two sum problem")
print(twoSumProblemSort(arr,10))
print()
'''
1. Highly depends on the pivot element i.e. the middle element.
2. If the middle element is smaller than both its neighbours, it will tend to finding the element in the left sub half
3. Otherwise right half's left part will get pre-dominance.
'''
def findPeakEle(arr,low,high,n):
mid = (low+high) // 2
# Handling the boundary cases
if mid == 0 or mid == n-1: # reached the first or the last element - boundary case
return arr[mid],mid
else:
if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]: # definition of peak element
return arr[mid],mid
else:
if arr[mid] < arr[mid-1]: # peak element will lie to the left
return findPeakEle(arr,low,mid-1,n)
else:
if arr[mid] < arr[mid+1]: # peak element will lie to the right
return findPeakEle(arr,mid+1,high,n)
arr = [2,20,19,21,23,90,67]
n = len(arr)
print("Find peak element")
print(findPeakEle(arr,0,n-1,n))
print() | mit | 6,559,211,954,744,387,000 | 23.105263 | 118 | 0.671472 | false | 2.640138 | false | false | false |
argv-minus-one/obnam | obnamlib/encryption.py | 1 | 7876 | # Copyright 2011 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import subprocess
import tempfile
import tracing
import obnamlib
class EncryptionError(obnamlib.ObnamError):
pass
class GpgError(EncryptionError):
msg = 'gpg failed with exit code {returncode}:\n{stderr}'
def generate_symmetric_key(numbits, filename='/dev/random'):
'''Generate a random key of at least numbits for symmetric encryption.'''
tracing.trace('numbits=%d', numbits)
bytes = (numbits + 7) / 8
f = open(filename, 'rb')
key = f.read(bytes)
f.close()
return key.encode('hex')
class SymmetricKeyCache(object):
'''Cache symmetric keys in memory.'''
def __init__(self):
self.clear()
def get(self, repo, toplevel):
if repo in self.repos and toplevel in self.repos[repo]:
return self.repos[repo][toplevel]
return None
def put(self, repo, toplevel, key):
if repo not in self.repos:
self.repos[repo] = {}
self.repos[repo][toplevel] = key
def clear(self):
self.repos = {}
def _gpg_pipe(args, data, passphrase):
'''Pipe things through gpg.
With the right args, this can be either an encryption or a decryption
operation.
For safety, we give the passphrase to gpg via a file descriptor.
The argument list is modified to include the relevant options for that.
The data is fed to gpg via a temporary file, readable only by
the owner, to avoid congested pipes.
'''
# Open pipe for passphrase, and write it there. If passphrase is
# very long (more than 4 KiB by default), this might block. A better
# implementation would be to have a loop around select(2) to do pipe
# I/O when it can be done without blocking. Patches most welcome.
keypipe = os.pipe()
os.write(keypipe[1], passphrase + '\n')
os.close(keypipe[1])
# Actually run gpg.
argv = ['gpg', '--passphrase-fd', str(keypipe[0]), '-q', '--batch',
'--no-textmode'] + args
tracing.trace('argv=%s', repr(argv))
p = subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(data)
os.close(keypipe[0])
# Return output data, or deal with errors.
if p.returncode: # pragma: no cover
raise GpgError(returncode=p.returncode, stderr=err)
return out
def encrypt_symmetric(cleartext, key):
'''Encrypt data with symmetric encryption.'''
return _gpg_pipe(['-c'], cleartext, key)
def decrypt_symmetric(encrypted, key):
'''Decrypt encrypted data with symmetric encryption.'''
return _gpg_pipe(['-d'], encrypted, key)
def _gpg(args, stdin='', gpghome=None):
'''Run gpg and return its output.'''
env = dict()
env.update(os.environ)
if gpghome is not None:
env['GNUPGHOME'] = gpghome
tracing.trace('gpghome=%s' % gpghome)
argv = ['gpg', '-q', '--batch', '--no-textmode'] + args
tracing.trace('argv=%s', repr(argv))
p = subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, err = p.communicate(stdin)
# Return output data, or deal with errors.
if p.returncode: # pragma: no cover
raise GpgError(returncode=p.returncode, stderr=err)
return out
def get_public_key(keyid, gpghome=None):
'''Return the ASCII armored export form of a given public key.'''
return _gpg(['--export', '--armor', keyid], gpghome=gpghome)
def get_public_key_user_ids(keyid, gpghome=None): # pragma: no cover
'''Return the ASCII armored export form of a given public key.'''
user_ids = []
output = _gpg(['--with-colons', '--list-keys', keyid], gpghome=gpghome)
for line in output.splitlines():
token = line.split(":")
if len(token) >= 10:
user_id = token[9].strip().replace(r'\x3a', ":")
if user_id:
user_ids.append(user_id)
return user_ids
class Keyring(object):
'''A simplistic representation of GnuPG keyrings.
Just enough functionality for obnam's purposes.
'''
_keyring_name = 'pubring.gpg'
def __init__(self, encoded=''):
self._encoded = encoded
self._gpghome = None
self._keyids = None
def _setup(self):
self._gpghome = tempfile.mkdtemp()
f = open(self._keyring, 'wb')
f.write(self._encoded)
f.close()
_gpg(['--import-ownertrust'], stdin='''\
# List of assigned trustvalues, created Sun 01 Dec 2013 19:13:26 GMT
# (Use "gpg --import-ownertrust" to restore them)
''', gpghome=self._gpghome)
def _cleanup(self):
shutil.rmtree(self._gpghome)
self._gpghome = None
@property
def _keyring(self):
return os.path.join(self._gpghome, self._keyring_name)
def _real_keyids(self):
output = self.gpg(False, ['--list-keys', '--with-colons'])
keyids = []
for line in output.splitlines():
fields = line.split(':')
if len(fields) >= 5 and fields[0] == 'pub':
keyids.append(fields[4])
return keyids
def keyids(self):
if self._keyids is None:
self._keyids = self._real_keyids()
return self._keyids
def __str__(self):
return self._encoded
def __contains__(self, keyid):
return keyid in self.keyids()
def _reread_keyring(self):
f = open(self._keyring, 'rb')
self._encoded = f.read()
f.close()
self._keyids = None
def add(self, key):
self.gpg(True, ['--import'], stdin=key)
def remove(self, keyid):
self.gpg(True, ['--delete-key', '--yes', keyid])
def gpg(self, reread, *args, **kwargs):
self._setup()
kwargs['gpghome'] = self._gpghome
try:
result = _gpg(*args, **kwargs)
except BaseException: # pragma: no cover
self._cleanup()
raise
else:
if reread:
self._reread_keyring()
self._cleanup()
return result
class SecretKeyring(Keyring):
'''Same as Keyring, but for secret keys.'''
_keyring_name = 'secring.gpg'
def _real_keyids(self):
output = self.gpg(False, ['--list-secret-keys', '--with-colons'])
keyids = []
for line in output.splitlines():
fields = line.split(':')
if len(fields) >= 5 and fields[0] == 'sec':
keyids.append(fields[4])
return keyids
def encrypt_with_keyring(cleartext, keyring):
'''Encrypt data with all keys in a keyring.'''
recipients = []
for keyid in keyring.keyids():
recipients += ['-r', keyid]
return keyring.gpg(False,
['-e',
'--trust-model', 'always',
'--no-encrypt-to',
'--no-default-recipient',
] + recipients,
stdin=cleartext)
def decrypt_with_secret_keys(encrypted, gpghome=None):
'''Decrypt data using secret keys GnuPG finds on its own.'''
return _gpg(['-d'], stdin=encrypted, gpghome=gpghome)
| gpl-3.0 | 1,874,813,231,981,704,400 | 27.536232 | 77 | 0.603225 | false | 3.841951 | false | false | false |
tuxar-uk/Merlyn | Merlyn.py | 1 | 6041 | """ Merlyn Speech Control for PC
We load in commands (& spells) generated by lmtool.py
and also language files generated by the Sphinx lmtool
http://www.speech.cs.cmu.edu/tools/lmtool-new.html
then open up a stream of words from the mic via LiveSpeech
and try to parse it into commands and possibly some parameters.
If succesful, hand off to the OS.
The parsing will need improving as the syntax evolves...
Copyright 2017 Alan Richmond @ AILinux.net
The MIT License https://opensource.org/licenses/MIT
"""
import os
from subprocess import call
from pocketsphinx import LiveSpeech, get_model_path
class Merlyn:
""" Merlyn Speech Control for PC"
"""
def __init__(self, num):
""" init with the number given by the lmtool
"""
self.num = str(num)
self.mer = os.path.expanduser("~/Merlyn")
cmds = os.path.join(self.mer, 'cmds/all.txt')
lang = os.path.join(self.mer, 'lang/')
self.lm = os.path.join(lang, self.num + '.lm')
self.dic = os.path.join(lang, self.num + '.dic')
# Read in and store commands
try:
lines = open(cmds)
except IOError:
sys.exit("Could not open file " + cmds)
count = 0
self.commands = {}
for line in lines:
line = line.strip()
if len(line) > 1 and line[0] != "#": # skip over empty lines & comments
(cmd, spell) = line.split(":",1)
self.commands[cmd.strip().lower()] = spell.strip()
count += 1
def parse_the(self, cmd):
""" Parse the text command supplied by the PocketSphinx listener.
"""
self.cmd = cmd
self.spell = None
self.params = []
# start with the whole phrase
while self.cmd not in self.commands: # if not recognised then
words = self.cmd.split() # split up phrase into words
if len(words) < 2: break
word = words[-1] # split off last word
del words[-1]
# This is probably temporary. I'm assuming only integer params for now...
if word == "to": # Sphinx thinks user said 'to'
word = "two" # but more likely they said 'two'
elif word == "for": # Sphinx thinks user said 'for'
word = "four" # you get the idea...
self.params.append(word) # save words not part of the command
self.cmd = ' '.join(words).strip() # re-join words for possible command
if self.cmd not in self.commands:
return None
self.params.reverse() # above loop picked off words from right
self.spell = self.commands[self.cmd] # this is the spell that Merlyn will utter
if self.params: # are there some params?
par = ' '.join(self.params).strip() # join them back into a string
try: # for now I'm assuming ints only
num = str(text2int(par))
except:
print("Not a good num:", par)
try:
self.spell = self.spell % num # substitute in the spell
except: ok = False
return self.spell
def printcmd(self):
# print("<", self.cmd, self.params, ' {', self.spell, ' }')
print("<", self.cmd, self.params)
def parse_do(self, cmd):
""" Parse the command then do it.
"""
spell = self.parse_the(cmd)
if spell is None: return
self.printcmd()
try:
retcode = call(spell, shell=True) # here OS, do this!
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
def do_demo(self):
""" Run Merlyn's self-demo.
"""
demo = os.path.join(self.mer, 'demo/demo.sh &')
print(demo)
call(demo, shell=True) # here OS, do this!
def listen(self):
""" Top-level loop to get text from the user's microphone,
check for some special commands; if we're expecting a command then do it.
"""
print( "| Say 'Merlyn' to make him/her listen.\n\
| Merlyn will obey the next command. If that is 'keep listening' then\n\
| Merlyn will continue to obey commands until you say 'stop listening'.\n\
| Say 'help' to see this message again, and to get further help.")
listening = obey = first = False
# https://pypi.python.org/pypi/pocketsphinx
speech = LiveSpeech(hmm=os.path.join(get_model_path(), 'en-us'), lm=self.lm, dic=self.dic)
for spoken in speech: # get user's command
cmd = str(spoken).lower()
if cmd == 'merlyn': # need to hear my name before doing stuff
obey = True # flag to obey next command
first = True # obey flag will be toggled off after first use
elif cmd == 'keep listening': # or be told to keep listening for commands
listening = True
elif cmd == 'stop listening': # until told to stop
listening = False
obey = True # need to acknowledge the stop
elif cmd == 'exit': # we're done...
break
elif cmd == '': # somehow got an empty command
continue
if obey or listening: # attempt to recognise the command and params
self.parse_do(cmd)
if not first:
obey = False
first = False
# http://stackoverflow.com/questions/493173/is-there-a-way-to-convert-number-words-to-integers
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
numwords["and"] = (1, 0)
for idx, word in enumerate(units): numwords[word] = (1, idx)
for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)
current = result = 0
for word in textnum.split():
if word == 'full': word = 'four' # kludge
if word == 'q': word = 'two' # kludge
if word not in numwords:
raise Exception("Illegal word: " + word)
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
| mit | -4,345,104,676,021,986,300 | 30.463542 | 99 | 0.636484 | false | 3.061835 | false | false | false |
DLR-SC/DataFinder | contrib/script_examples/command_line/items.py | 1 | 4139 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Demonstrates different items API functions.
"""
from StringIO import StringIO
import sys
from datafinder.script_api.repository import connectRepository, \
getWorkingRepository, setWorkingRepository
from datafinder.script_api.item import item_support
__version__ = "$Revision-Id:$"
def unmanagedRepository(basePath):
""" Demonstrates the script API using the local file system as unmanaged repository. """
print "Connecting repository file:///..."
repository = connectRepository("file:///")
setWorkingRepository(repository)
assert repository == getWorkingRepository()
print "\nChecking base path and creating children..."
print item_support.itemDescription(basePath)
item_support.refresh(basePath)
print item_support.getChildren(basePath)
collectionPath = basePath + "/collection"
item_support.createCollection(collectionPath)
print item_support.itemDescription(collectionPath)
leafPath = basePath + "/leaf"
item_support.createLeaf(leafPath)
item_support.storeData(leafPath, StringIO("some data..."))
print item_support.itemDescription(leafPath)
print "Put in the following data:"
fileObject = item_support.retrieveData(leafPath)
print fileObject.read()
fileObject.close()
linkPath = basePath + "/link.lnk"
item_support.createLink(linkPath, collectionPath)
print item_support.itemDescription(linkPath)
print item_support.getChildren(basePath)
print "\nCopy and move some things..."
copyLeafPath = collectionPath + "/leaf_copy"
item_support.copy(leafPath, copyLeafPath)
print item_support.getChildren(collectionPath)
item_support.move(copyLeafPath, collectionPath + "/leaf")
print item_support.getChildren(collectionPath)
print "\nArchiving everything..."
item_support.createArchive(basePath, collectionPath)
print "\nWalking the base path..."
print item_support.walk(basePath)
print "\nCleaning up..."
for path in [collectionPath, leafPath, linkPath]:
item_support.delete(path)
print item_support.walk(basePath)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Call: items.py basePath"
else:
basePath_ = unicode(sys.argv[1])
unmanagedRepository(basePath_)
| bsd-3-clause | 7,515,495,566,930,876,000 | 35.627273 | 92 | 0.70476 | false | 4.176589 | false | false | false |
soybean217/lora-python | UServer/http_api_no_auth/api/api_gateway.py | 1 | 5817 | import json
from http_api_no_auth.api import api, root
from http_api_no_auth.api.decorators import gateway_belong_to_user
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from ..http_auth import auth
from flask import request, Response
from .forms import get_formdata_from_json_or_form
from userver.object.statistician_gateway import Statistician
from utils.log import logger
import time
@api.route(root + 'gateways', methods=['GET', 'POST'])
@auth.auth_required
def gateways(user):
if request.method == 'GET':
logger.info('TIMESTAMP \'gateways\' HTTP[GET]:%s' % time.time())
gateways_list = []
logger.info('TIMESTAMP \'gateways\' QueryBegin:%s' % time.time())
gateways = Gateway.query.filter_by(user_id=user.id)
logger.info('TIMESTAMP \'gateways\' QueryOver:%s' % time.time())
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
logger.info('TIMESTAMP \'gateways\' obj_to_dict_Over:%s' % time.time())
respond_data = json.dumps(gateways_list)
logger.info('TIMESTAMP \'gateways\' obj_to_dict_Over:%s' % time.time())
logger.info('TIMESTAMP \'gateways\' SendRespond:%s' % time.time())
return Response(status=200, response=respond_data)
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
add_gateway = AddGatewayForm(formdata)
if add_gateway.validate():
try:
gateway = import_gateway(user, add_gateway)
gateway.save()
new_gateway = Gateway.query.get(gateway.id)
return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
except KeyDuplicateError as error:
errors = {'mac_addr': str(error)}
return Response(status=406, response=json.dumps({"errors": errors}))
except AssertionError as error:
return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
else:
errors = {}
for key, value in add_gateway.errors.items():
errors[key] = value[0]
return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/statistician/hourly', methods=['GET', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway_statistician_hourly(user, gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/hourly\' HTTP[GET]:%s' % time.time())
statistician = Statistician(gateway.id)
hourly = statistician.count_in_hour()
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/hourly\' SendRespond:%s' % time.time())
return json.dumps(hourly), 200
@api.route(root + 'gateways/<gateway_id>/statistician/daily', methods=['GET', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway_statistician_daily(user, gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/daily\' HTTP[GET]:%s' % time.time())
statistician = Statistician(gateway.id)
daily = statistician.count_in_daily()
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/daily\' SendRespond:%s' % time.time())
return json.dumps(daily), 200
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway(user, gateway):
if request.method == 'GET':
return Response(status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
# def import_gateway(user, add_gateway):
# mac_addr = add_gateway['mac_addr'].data
# name = add_gateway['name'].data
# platform = add_gateway['platform'].data
# freq_plan = add_gateway['freq_plan'].data
# location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
# if platform == Platform.rpi:
# model = add_gateway['model'].data
# return RaspBerryPiGateway(user.id, mac_addr, name, model, freq_plan=freq_plan, location=location)
# elif platform == Platform.ll:
# return LinkLabsGateway(user.id, mac_addr, name, freq_plan=freq_plan, location=location)
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location) | mit | 255,426,881,030,014,140 | 43.10687 | 116 | 0.646702 | false | 3.57709 | false | false | false |
valeriansaliou/django-gitlab-logging | gitlab_logging/handlers.py | 1 | 2294 | import logging
class GitlabIssuesHandler(logging.Handler):
"""
Handles logs as issues with GitLab API
"""
def __init__(self):
logging.Handler.__init__(self)
def __open_issue(self, title, content, trace_raw):
"""
Open an issue on GitLab with given content
"""
from tasks import task_log_gitlab_issue_open
task_log_gitlab_issue_open.delay(title, content, trace_raw)
def __reopen_issue(self, issue_id):
"""
Re-open a given issue on GitLab
"""
from tasks import task_log_gitlab_issue_reopen
task_log_gitlab_issue_reopen.delay(issue_id)
def emit(self, record):
"""
Fired when an error is emitted
"""
from django.conf import settings
from django.views.debug import get_exception_reporter_filter
from helpers import GitlabIssuesHelper
try:
has_repr, request_repr = True, '\n{0}'.format(
get_exception_reporter_filter(record.request).get_request_repr(record.request)
)
except Exception:
has_repr, request_repr = False, ':warning: Request data unavailable.'
# Generate issue title
title = '[{level}@{environment}] {message}'.format(
level=record.levelname,
message=record.getMessage(),
environment=getattr(settings, 'ENVIRONMENT', 'default'),
)
# Generate issue content
trace_raw = self.format(record)
contents = {
'head': '#### :zap: Note: this issue has been automatically opened.',
'trace': '```python\n%s\n```' % trace_raw,
'repr': '```\n%s\n```' % request_repr if has_repr\
else ('*%s*' % request_repr),
}
issue_exists, issue_id = GitlabIssuesHelper.check_issue(settings.GITLAB_PROJECT_ID, trace_raw)
if not issue_exists:
content = '{head}\n\n---\n\n{trace}\n\n---\n\n{repr}'.format(
head=contents['head'],
trace=contents['trace'],
repr=contents['repr'],
)
self.__open_issue(title, content, trace_raw)
elif issue_id:
self.__reopen_issue(issue_id)
| mit | 2,486,562,706,917,107,700 | 30.424658 | 102 | 0.551003 | false | 4.103757 | false | false | false |
cpieloth/GPGPU-on-Hadoop | hadoop_ocl_link_test/runTime/runtime.py | 1 | 1118 | #!/usr/bin/env python
# Python 3
import shlex
import subprocess
import re
import sys
import time
# read command line arguments
if len(sys.argv) < 3:
print('Usage: <program> <outputfile> <value name>')
sys.exit(1)
# Variables
PRG_NAME = sys.argv[1]
DATA_NAME = sys.argv[2]
VAL_NAME = sys.argv[3]
RUNS = 5 # TODO to set
SLEEP = 2
# Print information
print('Program:', PRG_NAME)
print('Run size:', RUNS)
print('Outputfile: ', DATA_NAME, sep='', end='\n')
# Open file
file = open(DATA_NAME, 'a')
# Run tests
print('Start:')
regEx = re.compile('.*time=(.*);.*')
# prepare command to start
command = PRG_NAME # TODO to set
print(' command:', command, end=' ')
args = shlex.split(command)
avgTime = 0
for run in range(0, RUNS):
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.wait()
t = regEx.match(str(p.stdout.read()))
avgTime = avgTime + float(t.group(1))
print('.', end='')
time.sleep(SLEEP)
avgTime = avgTime/RUNS
print('done! Average time:', avgTime)
file.write(VAL_NAME + "\t" + str(avgTime) + '\n') # TODO to set
# Close file
file.close()
| apache-2.0 | -4,649,629,103,920,189,000 | 19.5 | 63 | 0.63059 | false | 2.674641 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.