repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
zx8/youtube-dl | youtube_dl/extractor/dump.py | 120 | 1036 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class DumpIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
_TEST = {
'url': 'http://www.dump.com/oneus/',
'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
'info_dict': {
'id': 'oneus',
'ext': 'flv',
'title': "He's one of us.",
'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
| unlicense |
songfj/calibre | src/calibre/ebooks/markdown/extensions/toc.py | 46 | 8336 | """
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree
from .headerid import slugify, unique, itertext
import re
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
if not remaining_list:
return [], []
current = remaining_list.pop(0)
if not 'children' in current.keys():
current['children'] = []
if not prev_elements:
# This happens for instance with [8, 1, 1], ie. when some
# header level is outside a scope. We treat it as a
# top-level
next_elements, children = build_correct(remaining_list, [current])
current['children'].append(children)
return [current] + next_elements, []
prev_element = prev_elements.pop()
children = []
next_elements = []
# Is current part of the child list or next list?
if current['level'] > prev_element['level']:
#print "%d is a child of %d" % (current['level'], prev_element['level'])
prev_elements.append(prev_element)
prev_elements.append(current)
prev_element['children'].append(current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children += children2
next_elements += next_elements2
else:
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements:
#print "No previous elements, so appending to the next set"
next_elements.append(current)
prev_elements = [current]
next_elements2, children2 = build_correct(remaining_list, prev_elements)
current['children'].extend(children2)
else:
#print "Previous elements, comparing to those first"
remaining_list.insert(0, current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children.extend(children2)
next_elements += next_elements2
return next_elements, children
ordered_list, __ = build_correct(toc_list)
return ordered_list
class TocTreeprocessor(Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment
if self.use_anchors:
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = self.config["anchorlink"] in [1, '1', True, 'True', 'true']
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = unique(self.config["slugify"](text, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
self.add_anchor(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
# serialize and attach to markdown instance.
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(configs={}):
return TocExtension(configs=configs)
| gpl-3.0 |
40423107/2017springcd_hw | theme/pelican-bootstrap3_local/static/glow/primitive.py | 161 | 4838 | from javascript import JSConstructor, JSObject
from .vector import vec
class primitive:
def __init__(self, prim, **kwargs):
for _key in kwargs.keys():
if isinstance(kwargs[_key], vec):
kwargs[_key]=kwargs[_key]._vec
self._prim=prim(kwargs)
def rotate(self, **kwargs):
if 'axis' in kwargs:
#for now lets assume axis is a vector
kwargs['axis']=kwargs['axis']._vec
self._prim.rotate(kwargs)
@property
def pos(self):
_v=vec()
_v._set_vec(self._prim.pos)
return _v
@pos.setter
def pos(self, value):
if isinstance(value, vec):
self._prim.pos=value._vec
else:
print("Error! pos must be a vector")
@property
def color(self):
_v=vec()
_v._set_vec(self._prim.color)
return _v
@color.setter
def color(self, value):
if isinstance(value, vec):
self._prim.color=value._vec
else:
print("Error! color must be a vec")
@property
def axis(self):
_v=vec()
_v._set_vec(self._prim.axis)
return _v
@axis.setter
def axis(self, value):
if isinstance(value, vec):
self._prim.axis=value._vec
else:
print("Error! axis must be a vec")
@property
def size(self):
return self._prim.size
@size.setter
def size(self, value):
self._prim.size=value
@property
def up(self):
_v=vec()
_v._set_vec(self._prim.up)
return _v
@up.setter
def up(self, value):
if isinstance(value, vec):
self._prim.up=value._vec
else:
print("Error! up must be a vec")
@property
def opacity(self):
return self._prim.opacity
@opacity.setter
def opacity(self, value):
self._prim.opacity=value
@property
def shininess(self):
return self._prim.shininess
@shininess.setter
def shininess(self, value):
self._prim.shininess=value
@property
def emissive(self):
return self._prim.emissive
@emissive.setter
def emissive(self, value):
self._prim.emissive=value
@property
def texture(self):
return self._prim.texture
@texture.setter
def texture(self, **kwargs):
self._prim.texture=kwargs
@property
def visible(self):
return self._prim.visible
@visible.setter
def visible(self, flag):
assert isinstance(flag, bool)
self._prim.visble=flag
class arrow(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.arrow), **kwargs)
class box(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.box), **kwargs)
class cone(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.cone), **kwargs)
class curve(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.curve), **kwargs)
def push(self, v):
if isinstance(v, vec):
self._prim.push(v._vec)
elif isinstance(v, dict):
for _key in v.keys():
if isinstance(_key, vec):
v[_key]=v[_key]._vec
self._prim.push(v)
def append(self, v):
self.push(v)
class cylinder(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.cylinder), **kwargs)
class helix(cylinder):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.helix), **kwargs)
class pyramid(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.pyramid), **kwargs)
#class ring(curve):
class sphere(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.sphere), **kwargs)
#triangle
#class triangle:
# def __init__(self, **kwargs):
# self._tri = JSConstructor(glowscript.triangle)(kwargs)
#vertex
#class vertex:
# def __init__(self, **kwargs):
# self._ver = JSConstructor(glowscript.vertex)(kwargs)
#quad
#compound
#class compound(box):
# def __init__(self, **kwargs):
# box.__init__(self, kwargs)
# I'm not sure if the declarations below are correct. Will fix later.
class distinct_light:
def __init__(self, **kwargs):
self._dl=JSConstructor(glowscript.distant_light)(kwargs)
class local_light:
def __init__(self, **kwargs):
self._ll=JSConstructor(glowscript.local_light)(kwargs)
class draw:
def __init__(self, **kwargs):
self._draw=JSConstructor(glowscript.draw)(kwargs)
class label:
def __init__(self, **kwargs):
self._label=JSConstructor(glowscript.label)(kwargs)
def attach_trail(object, **kwargs):
if isinstance(object, primitive):
JSObject(glowscript.attach_trail)(object._prim, kwargs)
else:
JSObject(glowscript.attach_trail)(object, kwargs)
| agpl-3.0 |
Emergya/icm-openedx-educamadrid-platform-basic | cms/djangoapps/models/settings/course_grading.py | 143 | 8907 | from datetime import timedelta
from xmodule.modulestore.django import modulestore
class CourseGradingModel(object):
"""
Basically a DAO and Model combo for CRUD operations pertaining to grading policy.
"""
# Within this class, allow access to protected members of client classes.
# This comes up when accessing kvs data and caches during kvs saves and modulestore writes.
def __init__(self, course_descriptor):
self.graders = [
CourseGradingModel.jsonize_grader(i, grader) for i, grader in enumerate(course_descriptor.raw_grader)
] # weights transformed to ints [0..100]
self.grade_cutoffs = course_descriptor.grade_cutoffs
self.grace_period = CourseGradingModel.convert_set_grace_period(course_descriptor)
self.minimum_grade_credit = course_descriptor.minimum_grade_credit
@classmethod
def fetch(cls, course_key):
"""
Fetch the course grading policy for the given course from persistence and return a CourseGradingModel.
"""
descriptor = modulestore().get_course(course_key)
model = cls(descriptor)
return model
@staticmethod
def fetch_grader(course_key, index):
"""
Fetch the course's nth grader
Returns an empty dict if there's no such grader.
"""
descriptor = modulestore().get_course(course_key)
index = int(index)
if len(descriptor.raw_grader) > index:
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
# return empty model
else:
return {"id": index,
"type": "",
"min_count": 0,
"drop_count": 0,
"short_label": None,
"weight": 0
}
@staticmethod
def update_from_json(course_key, jsondict, user):
"""
Decode the json into CourseGradingModel and save any changes. Returns the modified model.
Probably not the usual path for updates as it's too coarse grained.
"""
descriptor = modulestore().get_course(course_key)
graders_parsed = [CourseGradingModel.parse_grader(jsonele) for jsonele in jsondict['graders']]
descriptor.raw_grader = graders_parsed
descriptor.grade_cutoffs = jsondict['grade_cutoffs']
modulestore().update_item(descriptor, user.id)
CourseGradingModel.update_grace_period_from_json(course_key, jsondict['grace_period'], user)
CourseGradingModel.update_minimum_grade_credit_from_json(course_key, jsondict['minimum_grade_credit'], user)
return CourseGradingModel.fetch(course_key)
@staticmethod
def update_grader_from_json(course_key, grader, user):
"""
Create or update the grader of the given type (string key) for the given course. Returns the modified
grader which is a full model on the client but not on the server (just a dict)
"""
descriptor = modulestore().get_course(course_key)
# parse removes the id; so, grab it before parse
index = int(grader.get('id', len(descriptor.raw_grader)))
grader = CourseGradingModel.parse_grader(grader)
if index < len(descriptor.raw_grader):
descriptor.raw_grader[index] = grader
else:
descriptor.raw_grader.append(grader)
modulestore().update_item(descriptor, user.id)
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
@staticmethod
def update_cutoffs_from_json(course_key, cutoffs, user):
"""
Create or update the grade cutoffs for the given course. Returns sent in cutoffs (ie., no extra
db fetch).
"""
descriptor = modulestore().get_course(course_key)
descriptor.grade_cutoffs = cutoffs
modulestore().update_item(descriptor, user.id)
return cutoffs
@staticmethod
def update_grace_period_from_json(course_key, graceperiodjson, user):
"""
Update the course's default grace period. Incoming dict is {hours: h, minutes: m} possibly as a
grace_period entry in an enclosing dict. It is also safe to call this method with a value of
None for graceperiodjson.
"""
descriptor = modulestore().get_course(course_key)
# Before a graceperiod has ever been created, it will be None (once it has been
# created, it cannot be set back to None).
if graceperiodjson is not None:
if 'grace_period' in graceperiodjson:
graceperiodjson = graceperiodjson['grace_period']
grace_timedelta = timedelta(**graceperiodjson)
descriptor.graceperiod = grace_timedelta
modulestore().update_item(descriptor, user.id)
@staticmethod
def update_minimum_grade_credit_from_json(course_key, minimum_grade_credit, user):
"""Update the course's default minimum grade requirement for credit.
Args:
course_key(CourseKey): The course identifier
minimum_grade_json(Float): Minimum grade value
user(User): The user object
"""
descriptor = modulestore().get_course(course_key)
# 'minimum_grade_credit' cannot be set to None
if minimum_grade_credit is not None:
minimum_grade_credit = minimum_grade_credit
descriptor.minimum_grade_credit = minimum_grade_credit
modulestore().update_item(descriptor, user.id)
@staticmethod
def delete_grader(course_key, index, user):
"""
Delete the grader of the given type from the given course.
"""
descriptor = modulestore().get_course(course_key)
index = int(index)
if index < len(descriptor.raw_grader):
del descriptor.raw_grader[index]
# force propagation to definition
descriptor.raw_grader = descriptor.raw_grader
modulestore().update_item(descriptor, user.id)
@staticmethod
def delete_grace_period(course_key, user):
"""
Delete the course's grace period.
"""
descriptor = modulestore().get_course(course_key)
del descriptor.graceperiod
modulestore().update_item(descriptor, user.id)
@staticmethod
def get_section_grader_type(location):
descriptor = modulestore().get_item(location)
return {
"graderType": descriptor.format if descriptor.format is not None else 'notgraded',
"location": unicode(location),
}
@staticmethod
def update_section_grader_type(descriptor, grader_type, user):
if grader_type is not None and grader_type != u'notgraded':
descriptor.format = grader_type
descriptor.graded = True
else:
del descriptor.format
del descriptor.graded
modulestore().update_item(descriptor, user.id)
return {'graderType': grader_type}
@staticmethod
def convert_set_grace_period(descriptor):
# 5 hours 59 minutes 59 seconds => converted to iso format
rawgrace = descriptor.graceperiod
if rawgrace:
hours_from_days = rawgrace.days * 24
seconds = rawgrace.seconds
hours_from_seconds = int(seconds / 3600)
hours = hours_from_days + hours_from_seconds
seconds -= hours_from_seconds * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
graceperiod = {'hours': 0, 'minutes': 0, 'seconds': 0}
if hours > 0:
graceperiod['hours'] = hours
if minutes > 0:
graceperiod['minutes'] = minutes
if seconds > 0:
graceperiod['seconds'] = seconds
return graceperiod
else:
return None
@staticmethod
def parse_grader(json_grader):
# manual to clear out kruft
result = {"type": json_grader["type"],
"min_count": int(json_grader.get('min_count', 0)),
"drop_count": int(json_grader.get('drop_count', 0)),
"short_label": json_grader.get('short_label', None),
"weight": float(json_grader.get('weight', 0)) / 100.0
}
return result
@staticmethod
def jsonize_grader(i, grader):
# Warning: converting weight to integer might give unwanted results due
# to the reason how floating point arithmetic works
# e.g, "0.29 * 100 = 28.999999999999996"
return {
"id": i,
"type": grader["type"],
"min_count": grader.get('min_count', 0),
"drop_count": grader.get('drop_count', 0),
"short_label": grader.get('short_label', ""),
"weight": grader.get('weight', 0) * 100,
}
| agpl-3.0 |
HBEE/odoo-addons | stock_picking_locations/__openerp__.py | 3 | 1744 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#######################################################s#######################
{
'name': 'Stock Picking Locations',
'version': '8.0.1.0.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Picking Locations
=======================
Add Location and Destiny Location to stock picking. When stock moves are
created they are taken by default.
Add a button to stock picking to update the stock move Location and Destiny
Location.
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'stock',
],
'data': [
'stock_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
quantumlib/OpenFermion | src/openfermion/testing/lih_integration_test.py | 1 | 6590 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests many modules to compute energy of LiH."""
import os
import unittest
import numpy
from openfermion.config import DATA_DIRECTORY
from openfermion.chem import MolecularData
from openfermion.transforms.opconversions import (get_fermion_operator,
normal_ordered, jordan_wigner,
reverse_jordan_wigner)
from openfermion.transforms.repconversions import freeze_orbitals
from openfermion.measurements import get_interaction_rdm
from openfermion.linalg import get_sparse_operator, get_ground_state
from openfermion.linalg.sparse_tools import (expectation, jw_hartree_fock_state,
get_density_matrix)
from openfermion.utils.operator_utils import count_qubits
class LiHIntegrationTest(unittest.TestCase):
def setUp(self):
# Set up molecule.
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))]
basis = 'sto-3g'
multiplicity = 1
filename = os.path.join(DATA_DIRECTORY, 'H1-Li1_sto-3g_singlet_1.45')
self.molecule = MolecularData(geometry,
basis,
multiplicity,
filename=filename)
self.molecule.load()
# Get molecular Hamiltonian
self.molecular_hamiltonian = self.molecule.get_molecular_hamiltonian()
self.molecular_hamiltonian_no_core = (
self.molecule.get_molecular_hamiltonian(
occupied_indices=[0],
active_indices=range(1, self.molecule.n_orbitals)))
# Get FCI RDM.
self.fci_rdm = self.molecule.get_molecular_rdm(use_fci=1)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get fermion Hamiltonian.
self.fermion_hamiltonian = normal_ordered(
get_fermion_operator(self.molecular_hamiltonian))
# Get qubit Hamiltonian.
self.qubit_hamiltonian = jordan_wigner(self.fermion_hamiltonian)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get matrix form.
self.hamiltonian_matrix = get_sparse_operator(
self.molecular_hamiltonian)
self.hamiltonian_matrix_no_core = get_sparse_operator(
self.molecular_hamiltonian_no_core)
def test_all(self):
# Test reverse Jordan-Wigner.
fermion_hamiltonian = reverse_jordan_wigner(self.qubit_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test mapping to interaction operator.
fermion_hamiltonian = get_fermion_operator(self.molecular_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test RDM energy.
fci_rdm_energy = self.nuclear_repulsion
fci_rdm_energy += numpy.sum(self.fci_rdm.one_body_tensor *
self.one_body)
fci_rdm_energy += numpy.sum(self.fci_rdm.two_body_tensor *
self.two_body)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Confirm expectation on qubit Hamiltonian using reverse JW matches.
qubit_rdm = self.fci_rdm.get_qubit_expectations(self.qubit_hamiltonian)
qubit_energy = 0.0
for term, coefficient in qubit_rdm.terms.items():
qubit_energy += coefficient * self.qubit_hamiltonian.terms[term]
self.assertAlmostEqual(qubit_energy, self.molecule.fci_energy)
# Confirm fermionic RDMs can be built from measured qubit RDMs.
new_fermi_rdm = get_interaction_rdm(qubit_rdm)
new_fermi_rdm.expectation(self.molecular_hamiltonian)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Test sparse matrices.
energy, wavefunction = get_ground_state(self.hamiltonian_matrix)
self.assertAlmostEqual(energy, self.molecule.fci_energy)
expected_energy = expectation(self.hamiltonian_matrix, wavefunction)
self.assertAlmostEqual(expected_energy, energy)
# Make sure you can reproduce Hartree-Fock energy.
hf_state = jw_hartree_fock_state(self.molecule.n_electrons,
count_qubits(self.qubit_hamiltonian))
hf_density = get_density_matrix([hf_state], [1.])
expected_hf_density_energy = expectation(self.hamiltonian_matrix,
hf_density)
expected_hf_energy = expectation(self.hamiltonian_matrix, hf_state)
self.assertAlmostEqual(expected_hf_energy, self.molecule.hf_energy)
self.assertAlmostEqual(expected_hf_density_energy,
self.molecule.hf_energy)
# Check that frozen core result matches frozen core FCI from psi4.
# Recore frozen core result from external calculation.
self.frozen_core_fci_energy = -7.8807607374168
no_core_fci_energy = numpy.linalg.eigh(
self.hamiltonian_matrix_no_core.toarray())[0][0]
self.assertAlmostEqual(no_core_fci_energy, self.frozen_core_fci_energy)
# Check that the freeze_orbitals function has the same effect as the
# as the occupied_indices option of get_molecular_hamiltonian.
frozen_hamiltonian = freeze_orbitals(
get_fermion_operator(self.molecular_hamiltonian), [0, 1])
self.assertTrue(frozen_hamiltonian == get_fermion_operator(
self.molecular_hamiltonian_no_core))
| apache-2.0 |
kpayson64/grpc | examples/python/route_guide/route_guide_pb2_grpc.py | 55 | 4244 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import route_guide_pb2 as route__guide__pb2
class RouteGuideStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetFeature = channel.unary_unary(
'/routeguide.RouteGuide/GetFeature',
request_serializer=route__guide__pb2.Point.SerializeToString,
response_deserializer=route__guide__pb2.Feature.FromString,
)
self.ListFeatures = channel.unary_stream(
'/routeguide.RouteGuide/ListFeatures',
request_serializer=route__guide__pb2.Rectangle.SerializeToString,
response_deserializer=route__guide__pb2.Feature.FromString,
)
self.RecordRoute = channel.stream_unary(
'/routeguide.RouteGuide/RecordRoute',
request_serializer=route__guide__pb2.Point.SerializeToString,
response_deserializer=route__guide__pb2.RouteSummary.FromString,
)
self.RouteChat = channel.stream_stream(
'/routeguide.RouteGuide/RouteChat',
request_serializer=route__guide__pb2.RouteNote.SerializeToString,
response_deserializer=route__guide__pb2.RouteNote.FromString,
)
class RouteGuideServicer(object):
"""Interface exported by the server.
"""
def GetFeature(self, request, context):
"""A simple RPC.
Obtains the feature at a given position.
A feature with an empty name is returned if there's no feature at the given
position.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFeatures(self, request, context):
"""A server-to-client streaming RPC.
Obtains the Features available within the given Rectangle. Results are
streamed rather than returned at once (e.g. in a response message with a
repeated field), as the rectangle may cover a large area and contain a
huge number of features.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RecordRoute(self, request_iterator, context):
"""A client-to-server streaming RPC.
Accepts a stream of Points on a route being traversed, returning a
RouteSummary when traversal is completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RouteChat(self, request_iterator, context):
"""A Bidirectional streaming RPC.
Accepts a stream of RouteNotes sent while a route is being traversed,
while receiving other RouteNotes (e.g. from other users).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouteGuideServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetFeature': grpc.unary_unary_rpc_method_handler(
servicer.GetFeature,
request_deserializer=route__guide__pb2.Point.FromString,
response_serializer=route__guide__pb2.Feature.SerializeToString,
),
'ListFeatures': grpc.unary_stream_rpc_method_handler(
servicer.ListFeatures,
request_deserializer=route__guide__pb2.Rectangle.FromString,
response_serializer=route__guide__pb2.Feature.SerializeToString,
),
'RecordRoute': grpc.stream_unary_rpc_method_handler(
servicer.RecordRoute,
request_deserializer=route__guide__pb2.Point.FromString,
response_serializer=route__guide__pb2.RouteSummary.SerializeToString,
),
'RouteChat': grpc.stream_stream_rpc_method_handler(
servicer.RouteChat,
request_deserializer=route__guide__pb2.RouteNote.FromString,
response_serializer=route__guide__pb2.RouteNote.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'routeguide.RouteGuide', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| apache-2.0 |
MSusik/invenio | invenio/modules/formatter/format_elements/bfe_date_rec.py | 39 | 1053 | ## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints date of the entry of the record in database
"""
__revision__ = "$Id$"
def format_element(bfo):
"""
Date of the entry of the record in the database
@see: date.py
"""
date = bfo.field('909C1c')
return date
| gpl-2.0 |
lmazuel/azure-sdk-for-python | azure-batch/azure/batch/models/pool_add_parameter.py | 1 | 12361 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolAddParameter(Model):
"""A pool in the Azure Batch service to add.
:param id: A string that uniquely identifies the pool within the account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
pool IDs within an account that differ only by case).
:type id: str
:param display_name: The display name for the pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: The size of virtual machines in the pool. All virtual
machines in a pool are the same size. For information about available
sizes of virtual machines for Cloud Services pools (pools created with
cloudServiceConfiguration), see Sizes for Cloud Services
(http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for pools using images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This property and virtualMachineConfiguration are mutually
exclusive and one of the properties must be specified. This property
cannot be specified if the Batch account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration:
~azure.batch.models.CloudServiceConfiguration
:param virtual_machine_configuration: The virtual machine configuration
for the pool. This property and cloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration:
~azure.batch.models.VirtualMachineConfiguration
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service returns an error; if you are calling the REST API
directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
compute nodes in the pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
property is required and the pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information. For more information about specifying this formula, see
'Automatically scale compute nodes in an Azure Batch pool'
(https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes. Enabling inter-node communication limits the
maximum size of the pool due to deployment restrictions on the nodes of
the pool. This may result in the pool not reaching its desired size. The
default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the pool.
:type network_configuration: ~azure.batch.models.NetworkConfiguration
:param start_task: A task specified to run on each compute node as it
joins the pool. The task runs when the node is added to the pool or when
the node is restarted.
:type start_task: ~azure.batch.models.StartTask
:param certificate_references: The list of certificates to be installed on
each compute node in the pool. For Windows compute nodes, the Batch
service installs the certificates to the specified certificate store and
location. For Linux compute nodes, the certificates are stored in a
directory inside the task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this
location. For certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and certificates are placed in that directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param application_package_references: The list of application packages to
be installed on each compute node in the pool.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param application_licenses: The list of application licenses the Batch
service will make available on each compute node in the pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
pool creation will fail.
:type application_licenses: list[str]
:param max_tasks_per_node: The maximum number of tasks that can run
concurrently on a single compute node in the pool. The default value is 1.
The maximum value of this setting depends on the size of the compute nodes
in the pool (the vmSize setting).
:type max_tasks_per_node: int
:param task_scheduling_policy: How tasks are distributed across compute
nodes in a pool.
:type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy
:param user_accounts: The list of user accounts to be created on each node
in the pool.
:type user_accounts: list[~azure.batch.models.UserAccount]
:param metadata: A list of name-value pairs associated with the pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_validation = {
'id': {'required': True},
'vm_size': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, id, vm_size, display_name=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes=None, target_low_priority_nodes=None, enable_auto_scale=None, auto_scale_formula=None, auto_scale_evaluation_interval=None, enable_inter_node_communication=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node=None, task_scheduling_policy=None, user_accounts=None, metadata=None):
super(PoolAddParameter, self).__init__()
self.id = id
self.display_name = display_name
self.vm_size = vm_size
self.cloud_service_configuration = cloud_service_configuration
self.virtual_machine_configuration = virtual_machine_configuration
self.resize_timeout = resize_timeout
self.target_dedicated_nodes = target_dedicated_nodes
self.target_low_priority_nodes = target_low_priority_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.auto_scale_evaluation_interval = auto_scale_evaluation_interval
self.enable_inter_node_communication = enable_inter_node_communication
self.network_configuration = network_configuration
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.application_licenses = application_licenses
self.max_tasks_per_node = max_tasks_per_node
self.task_scheduling_policy = task_scheduling_policy
self.user_accounts = user_accounts
self.metadata = metadata
| mit |
UBERMALLOW/kernel_lge_hammerhead | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
achang97/YouTunes | lib/python2.7/site-packages/pyasn1_modules/rfc2560.py | 5 | 8307 | #
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
# OCSP request/response syntax
#
# Derived from a minimal OCSP library (RFC2560) code written by
# Bud P. Bruegger <[email protected]>
# Copyright: Ancitel, S.p.a, Rome, Italy
# License: BSD
#
#
# current limitations:
# * request and response works only for a single certificate
# * only some values are parsed out of the response
# * the request does't set a nonce nor signature
# * there is no signature validation of the response
# * dates are left as strings in GeneralizedTime format -- datetime.datetime
# would be nicer
#
from pyasn1.type import tag, namedtype, namedval, univ, useful
from pyasn1_modules import rfc2459
# Start of OCSP module definitions
# This should be in directory Authentication Framework (X.509) module
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
# end of directory Authentication Framework (X.509) module
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString):
pass
# end of PKIX Certificate Extensions module
id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
class AcceptableResponses(univ.SequenceOf):
componentType = univ.ObjectIdentifier()
class ArchiveCutoff(useful.GeneralizedTime):
pass
class UnknownInfo(univ.Null):
pass
class RevokedInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('issuerNameHash', univ.OctetString()),
namedtype.NamedType('issuerKeyHash', univ.OctetString()),
namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
)
class CertStatus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('good',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('revoked',
RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('unknown',
UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SingleResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certID', CertID()),
namedtype.NamedType('certStatus', CertStatus()),
namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyHash(univ.OctetString):
pass
class ResponderID(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byName',
rfc2459.Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('byKey',
KeyHash().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(('v1', 0))
class ResponseData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('responderID', ResponderID()),
namedtype.NamedType('producedAt', useful.GeneralizedTime()),
namedtype.NamedType('responses', univ.SequenceOf(componentType=SingleResponse())),
namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class BasicOCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsResponseData', ResponseData()),
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ResponseBytes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseType', univ.ObjectIdentifier()),
namedtype.NamedType('response', univ.OctetString())
)
class OCSPResponseStatus(univ.Enumerated):
namedValues = namedval.NamedValues(
('successful', 0),
('malformedRequest', 1),
('internalError', 2),
('tryLater', 3),
('undefinedStatus', 4), # should never occur
('sigRequired', 5),
('unauthorized', 6)
)
class OCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseStatus', OCSPResponseStatus()),
namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Request(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('reqCert', CertID()),
namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class TBSRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('requestList', univ.SequenceOf(componentType=Request())),
namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class OCSPRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsRequest', TBSRequest()),
namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
| mit |
cosmicAsymmetry/zulip | zerver/tests/test_tutorial.py | 32 | 2671 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Dict
from zerver.lib.test_helpers import (
get_user_profile_by_email,
most_recent_message,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
UserProfile,
)
import ujson
def fix_params(raw_params):
# type: (Dict[str, Any]) -> Dict[str, str]
# A few of our few legacy endpoints need their
# individual parameters serialized as JSON.
return {k: ujson.dumps(v) for k, v in raw_params.items()}
class TutorialTests(ZulipTestCase):
def test_send_message(self):
# type: () -> None
email = '[email protected]'
user = get_user_profile_by_email(email)
self.login(email)
welcome_bot = get_user_profile_by_email("[email protected]")
raw_params = dict(
type='stream',
recipient='Denmark',
topic='welcome',
content='hello'
)
params = fix_params(raw_params)
result = self.client_post("/json/tutorial_send_message", params)
self.assert_json_success(result)
message = most_recent_message(user)
self.assertEqual(message.content, 'hello')
self.assertEqual(message.sender, welcome_bot)
# now test some error cases
result = self.client_post("/json/tutorial_send_message", {})
self.assert_json_error(result, "Missing 'type' argument")
result = self.client_post("/json/tutorial_send_message", raw_params)
self.assert_json_error(result, 'argument "type" is not valid json.')
raw_params = dict(
type='INVALID',
recipient='Denmark',
topic='welcome',
content='hello'
)
params = fix_params(raw_params)
result = self.client_post("/json/tutorial_send_message", params)
self.assert_json_error(result, 'Bad data passed in to tutorial_send_message')
def test_tutorial_status(self):
# type: () -> None
email = '[email protected]'
self.login(email)
cases = [
('started', UserProfile.TUTORIAL_STARTED),
('finished', UserProfile.TUTORIAL_FINISHED),
]
for incoming_status, expected_db_status in cases:
raw_params = dict(status=incoming_status)
params = fix_params(raw_params)
result = self.client_post('/json/tutorial_status', params)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(user.tutorial_status, expected_db_status)
| apache-2.0 |
cmelange/ansible | lib/ansible/plugins/action/script.py | 22 | 3738 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = 'check mode not supported for this module'
return result
if not tmp:
tmp = self._make_tmp_path()
creates = self._task.args.get('creates')
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if self._remote_file_exists(creates):
self._remove_tmp_path(tmp)
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
removes = self._task.args.get('removes')
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not self._remote_file_exists(removes):
self._remove_tmp_path(tmp)
return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
# the script name is the first item in the raw params, so we split it
# out now so we know the file name we need to transfer to the remote,
# and everything else is an argument to the script which we need later
# to append to the remote command
parts = self._task.args.get('_raw_params', '').strip().split()
source = parts[0]
args = ' '.join(parts[1:])
try:
source = self._loader.get_real_file(self._find_needle('files', source))
except AnsibleError as e:
return dict(failed=True, msg=to_native(e))
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
self._transfer_file(source, tmp_src)
# set file permissions, more permissive when the copy is done as a different user
self._fixup_perms2((tmp, tmp_src), execute=True)
# add preparation steps to one ssh roundtrip executing the script
env_string = self._compute_environment_string()
script_cmd = ' '.join([env_string, tmp_src, args])
script_cmd = self._connection._shell.wrap_for_exec(script_cmd)
result.update(self._low_level_execute_command(cmd=script_cmd, sudoable=True))
# clean up after
self._remove_tmp_path(tmp)
result['changed'] = True
return result
| gpl-3.0 |
nmercier/linux-cross-gcc | linux/lib/python2.7/dist-packages/numpy/lib/tests/test_nanfunctions.py | 21 | 28230 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_raises, assert_array_equal
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)
def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)
def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)
def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)
tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
class SharedNanFunctionsTestsMixin(object):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
def test_allnans(self):
# Check for FutureWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = np.nansum([np.nan]*3, axis=None)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check scalar
res = np.nansum(np.nan)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check there is no warning for not all-nan
np.nansum([0]*3, axis=None)
assert_(len(w) == 0, 'unwanted warning raised')
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
else:
assert_(len(w) == 0)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 2)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
class TestNanFunctions_Median(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
warnings.simplefilter('ignore', FutureWarning)
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanmedian(np.nan)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanmedian, d, axis=-5)
assert_raises(IndexError, np.nanmedian, d, axis=(0, -5))
assert_raises(IndexError, np.nanmedian, d, axis=4)
assert_raises(IndexError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', RuntimeWarning)
a = np.array([[np.inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [np.inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [np.inf, np.nan])
assert_equal(np.nanmedian(a), np.inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
# no mask path
a = np.array([[np.inf, np.inf], [np.inf, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
class TestNanFunctions_Percentile(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanpercentile(ndat, 30)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.percentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
res = np.nanpercentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
# Transpose the array to fit the output convention of numpy.percentile
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanpercentile(np.nan, 60)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanpercentile(0., 100) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4))
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
def test_multiple_percentiles(self):
perc = [50, 100]
mat = np.ones((4, 3))
nan_mat = np.nan * mat
# For checking consistency in higher dimensional case
large_mat = np.ones((3, 4, 5))
large_mat[:, 0:2:4, :] = 0
large_mat[:, :, 3:] *= 2
for axis in [None, 0, 1]:
for keepdim in [False, True]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val.shape, val.shape)
val = np.percentile(large_mat, perc, axis=axis,
keepdims=keepdim)
nan_val = np.nanpercentile(large_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val, val)
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
epssy/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/cryptutil.py | 87 | 2961 | import sys
import random
import os.path
from openid import cryptutil
# Most of the purpose of this test is to make sure that cryptutil can
# find a good source of randomness on this machine.
def test_cryptrand():
# It's possible, but HIGHLY unlikely that a correct implementation
# will fail by returning the same number twice
s = cryptutil.getBytes(32)
t = cryptutil.getBytes(32)
assert len(s) == 32
assert len(t) == 32
assert s != t
a = cryptutil.randrange(2L ** 128)
b = cryptutil.randrange(2L ** 128)
assert type(a) is long
assert type(b) is long
assert b != a
# Make sure that we can generate random numbers that are larger
# than platform int size
cryptutil.randrange(long(sys.maxint) + 1L)
def test_reversed():
if hasattr(cryptutil, 'reversed'):
cases = [
('', ''),
('a', 'a'),
('ab', 'ba'),
('abc', 'cba'),
('abcdefg', 'gfedcba'),
([], []),
([1], [1]),
([1,2], [2,1]),
([1,2,3], [3,2,1]),
(range(1000), range(999, -1, -1)),
]
for case, expected in cases:
expected = list(expected)
actual = list(cryptutil.reversed(case))
assert actual == expected, (case, expected, actual)
twice = list(cryptutil.reversed(actual))
assert twice == list(case), (actual, case, twice)
def test_binaryLongConvert():
MAX = sys.maxint
for iteration in xrange(500):
n = 0L
for i in range(10):
n += long(random.randrange(MAX))
s = cryptutil.longToBinary(n)
assert type(s) is str
n_prime = cryptutil.binaryToLong(s)
assert n == n_prime, (n, n_prime)
cases = [
('\x00', 0L),
('\x01', 1L),
('\x7F', 127L),
('\x00\xFF', 255L),
('\x00\x80', 128L),
('\x00\x81', 129L),
('\x00\x80\x00', 32768L),
('OpenID is cool', 1611215304203901150134421257416556L)
]
for s, n in cases:
n_prime = cryptutil.binaryToLong(s)
s_prime = cryptutil.longToBinary(n)
assert n == n_prime, (s, n, n_prime)
assert s == s_prime, (n, s, s_prime)
def test_longToBase64():
f = file(os.path.join(os.path.dirname(__file__), 'n2b64'))
try:
for line in f:
parts = line.strip().split(' ')
assert parts[0] == cryptutil.longToBase64(long(parts[1]))
finally:
f.close()
def test_base64ToLong():
f = file(os.path.join(os.path.dirname(__file__), 'n2b64'))
try:
for line in f:
parts = line.strip().split(' ')
assert long(parts[1]) == cryptutil.base64ToLong(parts[0])
finally:
f.close()
def test():
test_reversed()
test_binaryLongConvert()
test_cryptrand()
test_longToBase64()
test_base64ToLong()
if __name__ == '__main__':
test()
| apache-2.0 |
kingvuplus/gui_test3 | lib/python/Plugins/Extensions/DVDBurn/ProjectSettings.py | 34 | 10923 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.FileList import FileList
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_FONTS, SCOPE_HDD
from Components.config import config, getConfigListEntry
from Components.ConfigList import ConfigListScreen
class FileBrowser(Screen, HelpableScreen):
def __init__(self, session, scope, configRef):
Screen.__init__(self, session)
# for the skin: first try FileBrowser_DVDBurn, then FileBrowser, this allows individual skinning
self.skinName = ["FileBrowser_DVDBurn", "FileBrowser" ]
HelpableScreen.__init__(self)
self.scope = scope
pattern = ""
self.configRef = configRef
currDir = "/"
if self.scope == "project":
currDir = self.getDir()
pattern = "(?i)^.*\.(ddvdp\.xml)"
elif self.scope == "menutemplate":
currDir = self.getDir()
pattern = "(?i)^.*\.(ddvdm\.xml)"
if self.scope == "menubg":
currDir = self.getDir(configRef.value)
pattern = "(?i)^.*\.(jpeg|jpg|jpe|png|bmp)"
elif self.scope == "menuaudio":
currDir = self.getDir(configRef.value)
pattern = "(?i)^.*\.(mp2|m2a|ac3)"
elif self.scope == "vmgm":
currDir = self.getDir(configRef.value)
pattern = "(?i)^.*\.(mpg|mpeg)"
elif self.scope == "font_face":
currDir = self.getDir(configRef.value, resolveFilename(SCOPE_FONTS))
pattern = "(?i)^.*\.(ttf)"
elif self.scope == "isopath":
currDir = configRef.value
elif self.scope == "image":
currDir = resolveFilename(SCOPE_HDD)
pattern = "(?i)^.*\.(iso)"
self.filelist = FileList(currDir, matchingPattern=pattern)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions"],
{
"save": self.ok,
"ok": self.ok,
"cancel": self.exit
})
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("DVD file browser"))
def getDir(self, currentVal=None, defaultDir=None):
if currentVal:
return (currentVal.rstrip("/").rsplit("/",1))[0]
return defaultDir or (resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/")
def ok(self):
if self.filelist.canDescent():
self.filelist.descent()
if self.scope == "image":
path = self["filelist"].getCurrentDirectory() or ""
if fileExists(path+"VIDEO_TS"):
self.close(path,self.scope,self.configRef)
else:
ret = self["filelist"].getCurrentDirectory() + '/' + self["filelist"].getFilename()
self.close(ret,self.scope,self.configRef)
def exit(self):
if self.scope == "isopath":
self.close(self["filelist"].getCurrentDirectory(),self.scope,self.configRef)
self.close(None,False,None)
class ProjectSettings(Screen,ConfigListScreen):
skin = """
<screen name="ProjectSettings" position="center,center" size="560,440" title="Collection settings" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="5,50" size="550,276" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,350" zPosition="1" size="560,2" />
<widget source="info" render="Label" position="10,360" size="550,80" font="Regular;18" halign="center" valign="center" />
</screen>"""
def __init__(self, session, project = None):
Screen.__init__(self, session)
self.project = project
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Load"))
if config.usage.setup_level.index >= 2: # expert+
self["key_blue"] = StaticText(_("Save"))
else:
self["key_blue"] = StaticText()
if config.usage.setup_level.index >= 2: # expert+
infotext = _("Available format variables") + ":\n$i=" + _("Track") + ", $t=" + _("Title") + ", $d=" + _("Description") + ", $l=" + _("length") + ", $c=" + _("chapters") + ",\n" + _("Record") + " $T=" + _("Begin time") + ", $Y=" + _("Year") + ", $M=" + _("month") + ", $D=" + _("day") + ",\n$A=" + _("audio tracks") + ", $C=" + _("Channel") + ", $f=" + _("filename")
else:
infotext = ""
self["info"] = StaticText(infotext)
self.keydict = {}
self.settings = project.settings
ConfigListScreen.__init__(self, [])
self.initConfigList()
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.exit,
"red": self.cancel,
"blue": self.saveProject,
"yellow": self.loadProject,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Collection settings"))
def changedConfigList(self):
key = self.keydict[self["config"].getCurrent()[1]]
if key == "authormode" or key == "output":
self.initConfigList()
def initConfigList(self):
authormode = self.settings.authormode.value
output = self.settings.output.value
self.list = []
self.list.append(getConfigListEntry(_("Collection name"), self.settings.name))
self.list.append(getConfigListEntry(_("Authoring mode"), self.settings.authormode))
self.list.append(getConfigListEntry(_("Output"), self.settings.output))
if output == "iso":
self.list.append(getConfigListEntry(_("ISO path"), self.settings.isopath))
if authormode.startswith("menu"):
self.list.append(getConfigListEntry(_("Menu")+' '+_("template file"), self.settings.menutemplate))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(getConfigListEntry(_("Menu")+' '+_("Title"), self.project.menutemplate.settings.titleformat))
self.list.append(getConfigListEntry(_("Menu")+' '+_("Subtitles"), self.project.menutemplate.settings.subtitleformat))
self.list.append(getConfigListEntry(_("Menu")+' '+_("background image"), self.project.menutemplate.settings.menubg))
self.list.append(getConfigListEntry(_("Menu")+' '+_("Language selection"), self.project.menutemplate.settings.menulang))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("headline")+' '+_("color"), self.settings.color_headline))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("text")+' '+_("color"), self.settings.color_button))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("highlighted button")+' '+_("color"), self.settings.color_highlight))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("font face"), self.settings.font_face))
#self.list.append(getConfigListEntry(_("Font size")+' ('+_("headline")+', '+_("Title")+', '+_("Subtitles")+')', self.settings.font_size))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("spaces (top, between rows, left)"), self.settings.space))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("Audio"), self.settings.menuaudio))
if config.usage.setup_level.index >= 2: # expert+
if authormode != "data_ts":
self.list.append(getConfigListEntry(_("Titleset mode"), self.settings.titlesetmode))
if self.settings.titlesetmode.value == "single" or authormode == "just_linked":
self.list.append(getConfigListEntry(_("VMGM (intro trailer)"), self.settings.vmgm))
else:
self.list.append(getConfigListEntry(_("DVD data format"), self.settings.dataformat))
self["config"].setList(self.list)
self.keydict = {}
for key, val in self.settings.dict().iteritems():
self.keydict[val] = key
for key, val in self.project.menutemplate.settings.dict().iteritems():
self.keydict[val] = key
def keyLeft(self):
ConfigListScreen.keyLeft(self)
key = self.keydict[self["config"].getCurrent()[1]]
if key == "authormode" or key == "output" or key=="titlesetmode":
self.initConfigList()
def keyRight(self):
ConfigListScreen.keyRight(self)
key = self.keydict[self["config"].getCurrent()[1]]
if key == "authormode" or key == "output" or key=="titlesetmode":
self.initConfigList()
def exit(self):
self.applySettings()
self.close(True)
def applySettings(self):
for x in self["config"].list:
x[1].save()
def ok(self):
key = self.keydict[self["config"].getCurrent()[1]]
from DVDProject import ConfigFilename
if type(self["config"].getCurrent()[1]) == ConfigFilename:
self.session.openWithCallback(self.FileBrowserClosed, FileBrowser, key, self["config"].getCurrent()[1])
def cancel(self):
self.close(False)
def loadProject(self):
self.session.openWithCallback(self.FileBrowserClosed, FileBrowser, "project", self.settings)
def saveProject(self):
if config.usage.setup_level.index >= 2: # expert+
self.applySettings()
ret = self.project.saveProject(resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/")
if ret.startswith:
text = _("Save")+' '+_('OK')+':\n'+ret
self.session.open(MessageBox,text,type = MessageBox.TYPE_INFO)
else:
text = _("Save")+' '+_('Error')
self.session.open(MessageBox,text,type = MessageBox.TYPE_ERROR)
def FileBrowserClosed(self, path, scope, configRef):
if scope == "menutemplate":
if self.project.menutemplate.loadTemplate(path):
print "[ProjectSettings] menu template loaded"
configRef.setValue(path)
self.initConfigList()
else:
self.session.open(MessageBox,self.project.error,MessageBox.TYPE_ERROR)
elif scope == "project":
self.path = path
print "len(self.titles)", len(self.project.titles)
if len(self.project.titles):
self.session.openWithCallback(self.askLoadCB, MessageBox,text = _("Your current collection will get lost!") + "\n" + _("Do you want to restore your settings?"), type = MessageBox.TYPE_YESNO)
else:
self.askLoadCB(True)
elif scope:
configRef.setValue(path)
self.initConfigList()
def askLoadCB(self, answer):
if answer is not None and answer:
if self.project.loadProject(self.path):
self.initConfigList()
else:
self.session.open(MessageBox,self.project.error,MessageBox.TYPE_ERROR)
| gpl-2.0 |
alexlo03/ansible | contrib/inventory/vagrant.py | 28 | 4076 | #!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_file = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <[email protected]>
# 2015 Igor Khomyakov <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import os.path
import subprocess
import re
from paramiko import SSHConfig
from optparse import OptionParser
from collections import defaultdict
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import StringIO
_group = 'vagrant' # a default group
_ssh_to_ansible = [('user', 'ansible_ssh_user'),
('hostname', 'ansible_ssh_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_ssh_port')]
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
# list all the running boxes
def list_running_boxes():
output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
boxes = []
for line in output:
matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
# get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
config = SSHConfig()
config.parse(StringIO(output))
host_config = config.lookup(box_name)
# man 5 ssh_config:
# > It is possible to have multiple identity files ...
# > all these identities will be tried in sequence.
for id in host_config['identityfile']:
if os.path.isfile(id):
host_config['identityfile'] = id
return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
meta = defaultdict(dict)
for host in ssh_config:
meta['hostvars'][host] = ssh_config[host]
print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
sys.exit(0)
# Get out the host details
# ------------------------------
elif options.host:
print(json.dumps(get_a_ssh_config(options.host)))
sys.exit(0)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(0)
| gpl-3.0 |
mrgloom/menpofit | menpofit/_version.py | 5 | 15768 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "menpofit-"
cfg.versionfile_source = "menpofit/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| bsd-3-clause |
xtao/openduckbill | src/openduckbilld.py | 3 | 1582 | #!/usr/bin/python2.4
# Copyright 2008 Google Inc.
# Author : Anoop Chandran <[email protected]>
#
# openduckbill is a simple backup application. It offers support for
# transferring data to a local backup directory, NFS. It also provides
# file system monitoring of directories marked for backup. Please read
# the README file for more details.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""The wrapper which starts the application.
openduckbilld is a wrapper to rest of the application code. The function
StartOpenDuckbill calls the code which reads the config file and does rest of
the initialisation.
"""
import daemon
def StartOpenDuckbill():
"""Starts the process of setting up environment and initialisation."""
main_config = 'config.yaml'
dbinit = daemon.OpenDuckbillMain(main_config)
if dbinit.MainInitialize():
dbinit.BackupInitialize()
if __name__ == '__main__':
StartOpenDuckbill()
| gpl-2.0 |
ZenHarbinger/snapcraft | integration_tests/test_tar_plugin.py | 2 | 2083 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
FileExists
)
import integration_tests
class TarPluginTestCase(integration_tests.TestCase):
def test_stage_nil_plugin(self):
self.run_snapcraft('stage', 'tar')
expected_files = [
'flat',
os.path.join('flatdir', 'flat2'),
'onedeep',
os.path.join('onedeepdir', 'onedeep2'),
'oneflat',
'top-simple',
'notop',
'parent',
'slash',
'readonly_file',
os.path.join('destdir1', 'destdir2', 'top-simple')
]
for expected_file in expected_files:
self.assertThat(
os.path.join(self.stage_dir, expected_file),
FileExists())
expected_dirs = [
'dir-simple',
'notopdir',
'destdir1',
os.path.join('destdir1', 'destdir2')
]
for expected_dir in expected_dirs:
self.assertThat(
os.path.join(self.stage_dir, expected_dir),
DirExists())
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join(self.stage_dir, 'bin', 'test'))
self.assertEqual('tarproject\n', binary_output)
# Regression test for
# https://bugs.launchpad.net/snapcraft/+bug/1500728
self.run_snapcraft('pull')
| gpl-3.0 |
taylorhxu/pybrain | examples/supervised/backprop/datasets/parity.py | 30 | 1587 | #!/usr/bin/env python
__author__ = 'Tom Schaul ([email protected])'
from pybrain.datasets import SequentialDataSet
class ParityDataSet(SequentialDataSet):
""" Determine whether the bitstring up to the current point conains a pair number of 1s or not."""
def __init__(self):
SequentialDataSet.__init__(self, 1,1)
self.newSequence()
self.addSample([-1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.newSequence()
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([1], [1])
| bsd-3-clause |
sysalexis/kbengine | kbe/res/scripts/common/Lib/test/test_asynchat.py | 72 | 11161 | # test asynchat
from test import support
# If this fails, the test will be skipped.
thread = support.import_module('_thread')
import asynchat
import asyncore
import errno
import socket
import sys
import time
import unittest
import unittest.mock
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
SERVER_QUIT = b'QUIT\n'
TIMEOUT = 3.0
if threading:
class echo_server(threading.Thread):
# parameter to determine the number of bytes passed back to the
# client each send
chunk_size = 1
def __init__(self, event):
threading.Thread.__init__(self)
self.event = event
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.sock)
# This will be set if the client wants us to wait before echoing
# data back.
self.start_resend_event = None
def run(self):
self.sock.listen(1)
self.event.set()
conn, client = self.sock.accept()
self.buffer = b""
# collect data until quit message is seen
while SERVER_QUIT not in self.buffer:
data = conn.recv(1)
if not data:
break
self.buffer = self.buffer + data
# remove the SERVER_QUIT message
self.buffer = self.buffer.replace(SERVER_QUIT, b'')
if self.start_resend_event:
self.start_resend_event.wait()
# re-send entire set of collected data
try:
# this may fail on some tests, such as test_close_when_done,
# since the client closes the channel when it's done sending
while self.buffer:
n = conn.send(self.buffer[:self.chunk_size])
time.sleep(0.001)
self.buffer = self.buffer[n:]
except:
pass
conn.close()
self.sock.close()
class echo_client(asynchat.async_chat):
def __init__(self, terminator, server_port):
asynchat.async_chat.__init__(self)
self.contents = []
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((HOST, server_port))
self.set_terminator(terminator)
self.buffer = b""
def handle_connect(self):
pass
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
def collect_incoming_data(self, data):
self.buffer += data
def found_terminator(self):
self.contents.append(self.buffer)
self.buffer = b""
def start_echo_server():
event = threading.Event()
s = echo_server(event)
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
return s, event
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestAsynchat(unittest.TestCase):
usepoll = False
def setUp(self):
self._threads = support.threading_setup()
def tearDown(self):
support.threading_cleanup(*self._threads)
def line_terminator_check(self, term, server_chunk):
event = threading.Event()
s = echo_server(event)
s.chunk_size = server_chunk
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
c = echo_client(term, s.port)
c.push(b"hello ")
c.push(b"world" + term)
c.push(b"I'm not dead yet!" + term)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
# the line terminator tests below check receiving variously-sized
# chunks back from the server in order to exercise all branches of
# async_chat.handle_read
def test_line_terminator1(self):
# test one-character terminator
for l in (1, 2, 3):
self.line_terminator_check(b'\n', l)
def test_line_terminator2(self):
# test two-character terminator
for l in (1, 2, 3):
self.line_terminator_check(b'\r\n', l)
def test_line_terminator3(self):
# test three-character terminator
for l in (1, 2, 3):
self.line_terminator_check(b'qqq', l)
def numeric_terminator_check(self, termlen):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(termlen, s.port)
data = b"hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [data[:termlen]])
def test_numeric_terminator1(self):
# check that ints & longs both work (since type is
# explicitly checked in async_chat.handle_read)
self.numeric_terminator_check(1)
def test_numeric_terminator2(self):
self.numeric_terminator_check(6)
def test_none_terminator(self):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(None, s.port)
data = b"hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [])
self.assertEqual(c.buffer, data)
def test_simple_producer(self):
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b"hello world\nI'm not dead yet!\n"
p = asynchat.simple_producer(data+SERVER_QUIT, buffer_size=8)
c.push_with_producer(p)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
def test_string_producer(self):
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b"hello world\nI'm not dead yet!\n"
c.push_with_producer(data+SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
def test_empty_line(self):
# checks that empty lines are handled correctly
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
c.push(b"hello world\n\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents,
[b"hello world", b"", b"I'm not dead yet!"])
def test_close_when_done(self):
s, event = start_echo_server()
s.start_resend_event = threading.Event()
c = echo_client(b'\n', s.port)
c.push(b"hello world\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
c.close_when_done()
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
# Only allow the server to start echoing data back to the client after
# the client has closed its connection. This prevents a race condition
# where the server echoes all of its data before we can check that it
# got any down below.
s.start_resend_event.set()
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [])
# the server might have been able to send a byte or two back, but this
# at least checks that it received something and didn't just fail
# (which could still result in the client not having received anything)
self.assertGreater(len(s.buffer), 0)
def test_push(self):
# Issue #12523: push() should raise a TypeError if it doesn't get
# a bytes string
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b'bytes\n'
c.push(data)
c.push(bytearray(data))
c.push(memoryview(data))
self.assertRaises(TypeError, c.push, 10)
self.assertRaises(TypeError, c.push, 'unicode')
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
self.assertEqual(c.contents, [b'bytes', b'bytes', b'bytes'])
class TestAsynchat_WithPoll(TestAsynchat):
usepoll = True
class TestAsynchatMocked(unittest.TestCase):
def test_blockingioerror(self):
# Issue #16133: handle_read() must ignore BlockingIOError
sock = unittest.mock.Mock()
sock.recv.side_effect = BlockingIOError(errno.EAGAIN)
dispatcher = asynchat.async_chat()
dispatcher.set_socket(sock)
self.addCleanup(dispatcher.del_channel)
with unittest.mock.patch.object(dispatcher, 'handle_error') as error:
dispatcher.handle_read()
self.assertFalse(error.called)
class TestHelperFunctions(unittest.TestCase):
def test_find_prefix_at_end(self):
self.assertEqual(asynchat.find_prefix_at_end("qwerty\r", "\r\n"), 1)
self.assertEqual(asynchat.find_prefix_at_end("qwertydkjf", "\r\n"), 0)
class TestFifo(unittest.TestCase):
def test_basic(self):
f = asynchat.fifo()
f.push(7)
f.push(b'a')
self.assertEqual(len(f), 2)
self.assertEqual(f.first(), 7)
self.assertEqual(f.pop(), (1, 7))
self.assertEqual(len(f), 1)
self.assertEqual(f.first(), b'a')
self.assertEqual(f.is_empty(), False)
self.assertEqual(f.pop(), (1, b'a'))
self.assertEqual(len(f), 0)
self.assertEqual(f.is_empty(), True)
self.assertEqual(f.pop(), (0, None))
def test_given_list(self):
f = asynchat.fifo([b'x', 17, 3])
self.assertEqual(len(f), 3)
self.assertEqual(f.pop(), (1, b'x'))
self.assertEqual(f.pop(), (1, 17))
self.assertEqual(f.pop(), (1, 3))
self.assertEqual(f.pop(), (0, None))
class TestNotConnected(unittest.TestCase):
def test_disallow_negative_terminator(self):
# Issue #11259
client = asynchat.async_chat()
self.assertRaises(ValueError, client.set_terminator, -1)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
jicruz/heroku-bot | lib/pip/_vendor/distlib/wheel.py | 412 | 39115 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| gpl-3.0 |
nguyenppt/support-tools | googlecode-issues-exporter/github_issue_converter_test.py | 2 | 21195 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the GitHub Services."""
# pylint: disable=missing-docstring,protected-access
import collections
import httplib
import json
import unittest
import urlparse
import github_issue_converter
import issues
from issues_test import DEFAULT_USERNAME
from issues_test import SINGLE_COMMENT
from issues_test import SINGLE_ISSUE
from issues_test import COMMENT_ONE
from issues_test import COMMENT_TWO
from issues_test import COMMENT_THREE
from issues_test import COMMENTS_DATA
from issues_test import NO_ISSUE_DATA
from issues_test import USER_MAP
from issues_test import REPO
# The GitHub username.
GITHUB_USERNAME = DEFAULT_USERNAME
# The GitHub repo name.
GITHUB_REPO = REPO
# The GitHub oauth token.
GITHUB_TOKEN = "oauth_token"
# The URL used for calls to GitHub.
GITHUB_API_URL = "https://api.github.com"
class FakeGitHubService(github_issue_converter.GitHubService):
"""A fake of the GitHubService.
This also allows for queueing of responses and there content into a reponse
queue. For example if you wanted a successful response and then a failure you
would call AddSuccessfulResponse and then AddFailureResponse. Then when a call
to _PerformHttpRequest is made the succesful response is made. The next call
would then return the failed response.
If no responses are in the queue a succesful request with no content is
returned.
Attributes:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
github_oauth_token: The oauth token to use for the requests.
"""
# pylint: disable=super-init-not-called
def __init__(self, github_owner_username, github_repo_name,
github_oauth_token):
"""Initialize the FakeGitHubService.
Args:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
github_oauth_token: The oauth token to use for the requests.
"""
self.github_owner_username = github_owner_username
self.github_repo_name = github_repo_name
self._github_oauth_token = github_oauth_token
self._action_queue = collections.deque([])
def AddSuccessfulResponse(self, content=None):
"""Adds a succesfull response with no content to the reponse queue."""
self.AddResponse(content=content)
def AddFailureResponse(self):
"""Adds a failed response with no content to the reponse queue."""
self.AddResponse(httplib.BAD_REQUEST)
def AddResponse(self, response=httplib.OK, content=None):
status = {"status": response}
full_response = {}
full_response["status"] = status
full_response["content"] = content if content else {}
self._action_queue.append(full_response)
def _PerformHttpRequest(self, method, url, body="{}", params=None):
if not self._action_queue:
return {"status": httplib.OK}, {}
full_response = self._action_queue.popleft()
return (full_response["status"], full_response["content"])
def PerformGetRequest(self, url, params=None):
"""Makes a fake GET request.
Args:
url: The URL to make the call to.
params: A dictionary of parameters to be used in the http call.
Returns:
A tuple of a fake response and fake content.
"""
return self._PerformHttpRequest("GET", url, params=params)
def PerformPostRequest(self, url, body):
"""Makes a POST request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of a fake response and content
"""
return self._PerformHttpRequest("POST", url, body=body)
def PerformPatchRequest(self, url, body):
"""Makes a PATCH request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of a fake response and content
"""
return self._PerformHttpRequest("PATCH", url, body=body)
class Http2Mock(object):
"""Mock httplib2.Http object. Only mocks out the request function.
This mock keeps track of the last url, method and body called.
Attributes:
response_success: Fake successful HTTP response.
response_failure: Fake failure HTTP response.
response: The response of the next HTTP request.
content: The content of the next HTTP request.
last_url: The last URL that an HTTP request was made to.
last_method: The last method that an HTTP request was made to.
last_body: The last body method that an HTTP request was made to.
"""
response_success = {"status": httplib.OK}
response_failure = {"status": httplib.BAD_REQUEST}
def __init__(self):
"""Initialize the Http2Mock."""
self.response = self.response_success
self.content = {}
self.last_url = None
self.last_method = None
self.last_body = None
def request(self, url, method, headers=None, body=None):
"""Makes a fake HTTP request.
Args:
url: The url to make the call to.
method: The type of call. POST, GET, etc.
body: The request of the body.
Returns:
A tuple of a response and its content.
"""
self.last_url = url
self.last_method = method
self.last_body = body
return (self.response, json.dumps(self.content))
class TestGitHubService(unittest.TestCase):
"""Tests for the GitHubService."""
def setUp(self):
self.http_mock = Http2Mock()
self.github_service = github_issue_converter.GitHubService(
GITHUB_USERNAME, GITHUB_REPO, GITHUB_TOKEN,
rate_limit=False,
http_instance=self.http_mock)
def testSuccessfulRequestSuccess(self):
success = github_issue_converter._CheckSuccessful(
self.http_mock.response_success)
self.assertTrue(success)
def testSuccessfulRequestFailure(self):
failure = github_issue_converter._CheckSuccessful(
self.http_mock.response_failure)
self.assertFalse(failure)
def testGetRemainingRequestsRequestsLeft(self):
self.http_mock.content = {"rate": {"remaining": "500"}}
requests = self.github_service._GetRemainingRequests()
self.assertEqual(requests, 500)
def testGetRemainingRequestsNoRequestsLeft(self):
self.http_mock.content = {"rate": {"remaining": "0"}}
requests = self.github_service._GetRemainingRequests()
self.assertEqual(requests, 0)
def testGetRemainingRequestsBadResponse(self):
self.http_mock.content = {"bad": "content"}
requests = self.github_service._GetRemainingRequests()
self.assertEqual(requests, 0)
def testRequestLimitReachedLimitReached(self):
self.http_mock.content = {"rate": {"remaining": "0"}}
limit_reached = self.github_service._RequestLimitReached()
self.assertTrue(limit_reached)
def testRequestLimitReachedLimitNotReached(self):
self.http_mock.content = {"rate": {"remaining": "500"}}
limit_reached = self.github_service._RequestLimitReached()
self.assertFalse(limit_reached)
def testHttpRequest(self):
response, content = self.github_service._PerformHttpRequest("GET", "/test")
self.assertEqual(response, self.http_mock.response_success)
self.assertEqual(content, {})
self.assertEqual(self.http_mock.last_method, "GET")
uri = ("%s/test?access_token=%s" % (GITHUB_API_URL, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
def testHttpRequestParams(self):
params = {"one": 1, "two": 2}
response, content = self.github_service._PerformHttpRequest("POST",
"/test",
params=params)
self.assertEqual(response, self.http_mock.response_success)
self.assertEqual(content, {})
self.assertEqual(self.http_mock.last_method, "POST")
uri = ("%s/test?access_token=%s&one=1&two=2" %
(GITHUB_API_URL, GITHUB_TOKEN))
# pylint: disable=unpacking-non-sequence
(expected_scheme, expected_domain, expected_path, expected_params,
expected_query, expected_fragment) = urlparse.urlparse(uri)
expected_query_list = expected_query.split("&")
# pylint: disable=unpacking-non-sequence
(actual_scheme, actual_domain, actual_path, actual_params, actual_query,
actual_fragment) = urlparse.urlparse(self.http_mock.last_url)
actual_query_list = actual_query.split("&")
self.assertEqual(expected_scheme, actual_scheme)
self.assertEqual(expected_domain, actual_domain)
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_params, actual_params)
self.assertEqual(expected_fragment, actual_fragment)
self.assertItemsEqual(expected_query_list, actual_query_list)
def testGetRequest(self):
self.github_service.PerformGetRequest("/test")
self.assertEqual(self.http_mock.last_method, "GET")
def testPostRequest(self):
self.github_service.PerformPostRequest("/test", "")
self.assertEqual(self.http_mock.last_method, "POST")
def testPatchRequest(self):
self.github_service.PerformPatchRequest("/test", "")
self.assertEqual(self.http_mock.last_method, "PATCH")
class TestUserService(unittest.TestCase):
"""Tests for the UserService."""
def setUp(self):
self.github_service = FakeGitHubService(GITHUB_USERNAME,
GITHUB_REPO,
GITHUB_TOKEN)
self.github_user_service = github_issue_converter.UserService(
self.github_service)
def testIsUserTrue(self):
is_user = self.github_user_service.IsUser("username123")
self.assertTrue(is_user)
def testIsUserFalse(self):
self.github_service.AddFailureResponse()
is_user = self.github_user_service.IsUser("username321")
self.assertFalse(is_user)
class TestIssueService(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self.http_mock = Http2Mock()
self.github_service = github_issue_converter.GitHubService(
GITHUB_USERNAME, GITHUB_REPO, GITHUB_TOKEN,
rate_limit=False,
http_instance=self.http_mock)
self.github_issue_service = github_issue_converter.IssueService(
self.github_service, comment_delay=0)
def testCreateIssue(self):
issue_body = {
"body": (
"```\none\n```\n\nOriginal issue reported on code.google.com by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n"),
"assignee": "default_username",
"labels": ["awesome", "great"],
"title": "issue_title",
}
self.http_mock.content = {"number": 1}
issue_number = self.github_issue_service.CreateIssue(SINGLE_ISSUE)
self.assertEqual(self.http_mock.last_method, "POST")
uri = ("%s/repos/%s/%s/issues?access_token=%s" %
(GITHUB_API_URL, GITHUB_USERNAME, GITHUB_REPO, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
self.assertEqual(self.http_mock.last_body, json.dumps(issue_body))
self.assertEqual(1, issue_number)
def testCloseIssue(self):
self.github_issue_service.CloseIssue(123)
self.assertEqual(self.http_mock.last_method, "PATCH")
uri = ("%s/repos/%s/%s/issues/%d?access_token=%s" %
(GITHUB_API_URL, GITHUB_USERNAME, GITHUB_REPO, 123, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
self.assertEqual(self.http_mock.last_body,
json.dumps({"state": "closed"}))
def testCreateComment(self):
comment_body = (
"```\none\n```\n\nOriginal issue reported on code.google.com "
"by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n")
self.github_issue_service.CreateComment(1, SINGLE_COMMENT)
self.assertEqual(self.http_mock.last_method, "POST")
uri = ("%s/repos/%s/%s/issues/%d/comments?access_token=%s" %
(GITHUB_API_URL, GITHUB_USERNAME, GITHUB_REPO, 1, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
self.assertEqual(self.http_mock.last_body,
json.dumps({"body": comment_body}))
def testGetIssueNumber(self):
issue = {"number": 1347}
issue_number = self.github_issue_service._GetIssueNumber(issue)
self.assertEqual(1347, issue_number)
def testGetIssues(self):
fake_github_service = FakeGitHubService(GITHUB_USERNAME,
GITHUB_REPO,
GITHUB_TOKEN)
github_issue_service = github_issue_converter.IssueService(
fake_github_service, comment_delay=0)
fake_github_service.AddFailureResponse()
with self.assertRaises(IOError):
github_issue_service.GetIssues()
class TestIssueExporter(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self.github_service = FakeGitHubService(GITHUB_USERNAME,
GITHUB_REPO,
GITHUB_TOKEN)
self.github_user_service = github_issue_converter.UserService(
self.github_service)
self.github_issue_service = github_issue_converter.IssueService(
self.github_service, comment_delay=0)
self.issue_exporter = issues.IssueExporter(
self.github_issue_service, self.github_user_service,
NO_ISSUE_DATA, GITHUB_REPO, USER_MAP)
self.issue_exporter.Init()
self.TEST_ISSUE_DATA = [
{
"id": "1",
"number": "1",
"title": "Title1",
"state": "open",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO, COMMENT_THREE],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
},
{
"id": "2",
"number": "2",
"title": "Title2",
"state": "closed",
"owner": {"kind": "projecthosting#issuePerson",
"name": "User2"
},
"labels": [],
"comments": {
"items": [COMMENT_ONE],
},
},
{
"id": "3",
"number": "3",
"title": "Title3",
"state": "closed",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO],
},
"labels": ["Type-Defect"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User3"
}
}]
def testGetAllPreviousIssues(self):
self.assertEqual(0, len(self.issue_exporter._previously_created_issues))
content = [{"number": 1, "title": "issue_title", "comments": 2}]
self.github_service.AddResponse(content=content)
self.issue_exporter._GetAllPreviousIssues()
self.assertEqual(1, len(self.issue_exporter._previously_created_issues))
self.assertTrue("issue_title" in self.issue_exporter._previously_created_issues)
previous_issue = self.issue_exporter._previously_created_issues["issue_title"]
self.assertEqual(1, previous_issue["id"])
self.assertEqual("issue_title", previous_issue["title"])
self.assertEqual(2, previous_issue["comment_count"])
def testCreateIssue(self):
content = {"number": 1234}
self.github_service.AddResponse(content=content)
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1234, issue_number)
def testCreateIssueFailedOpenRequest(self):
self.github_service.AddFailureResponse()
with self.assertRaises(issues.ServiceError):
self.issue_exporter._CreateIssue(SINGLE_ISSUE)
def testCreateIssueFailedCloseRequest(self):
content = {"number": 1234}
self.github_service.AddResponse(content=content)
self.github_service.AddFailureResponse()
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1234, issue_number)
def testCreateComments(self):
self.assertEqual(0, self.issue_exporter._comment_number)
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
self.assertEqual(4, self.issue_exporter._comment_number)
def testCreateCommentsFailure(self):
self.github_service.AddFailureResponse()
self.assertEqual(0, self.issue_exporter._comment_number)
with self.assertRaises(issues.ServiceError):
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
def testStart(self):
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
# Note: Some responses are from CreateIssues, others are from CreateComment.
self.github_service.AddResponse(content={"number": 1})
self.github_service.AddResponse(content={"number": 10})
self.github_service.AddResponse(content={"number": 11})
self.github_service.AddResponse(content={"number": 2})
self.github_service.AddResponse(content={"number": 20})
self.github_service.AddResponse(content={"number": 3})
self.github_service.AddResponse(content={"number": 30})
self.issue_exporter.Start()
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
# Comment counts are per issue and should match the numbers from the last
# issue created, minus one for the first comment, which is really
# the issue description.
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
def testStart_SkipDeletedComments(self):
comment = {
"content": "one",
"id": 1,
"published": "last year",
"author": {"name": "[email protected]"},
"updates": {
"labels": ["added-label", "-removed-label"],
},
}
self.issue_exporter._issue_json_data = [
{
"id": "1",
"number": "1",
"title": "Title1",
"state": "open",
"comments": {
"items": [
COMMENT_ONE,
comment,
COMMENT_TWO,
comment],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
}]
# Verify the comment data from the test issue references all comments.
self.github_service.AddResponse(content={"number": 1})
self.issue_exporter.Start()
# Remember, the first comment is for the issue.
self.assertEqual(3, self.issue_exporter._comment_number)
self.assertEqual(3, self.issue_exporter._comment_total)
# Set the deletedBy information for the comment object, now they
# should be ignored by the export.
comment["deletedBy"] = {}
self.github_service.AddResponse(content={"number": 1})
self.issue_exporter._previously_created_issues = {}
self.issue_exporter.Start()
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
def testStart_SkipAlreadyCreatedIssues(self):
self.issue_exporter._previously_created_issues["Title1"] = {
"id": 1,
"title": "Title1",
"comment_count": 3
}
self.issue_exporter._previously_created_issues["Title2"] = {
"id": 2,
"title": "Title2",
"comment_count": 1
}
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
self.github_service.AddResponse(content={"number": 3})
self.issue_exporter.Start()
self.assertEqual(2, self.issue_exporter._skipped_issues)
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
def testStart_ReAddMissedComments(self):
self.issue_exporter._previously_created_issues["Title1"] = {
"id": 1,
"title": "Title1",
"comment_count": 1 # Missing 2 comments.
}
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
# First requests to re-add comments, then create issues.
self.github_service.AddResponse(content={"number": 11})
self.github_service.AddResponse(content={"number": 12})
self.github_service.AddResponse(content={"number": 2})
self.github_service.AddResponse(content={"number": 3})
self.issue_exporter.Start()
self.assertEqual(1, self.issue_exporter._skipped_issues)
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
if __name__ == "__main__":
unittest.main(buffer=True)
| apache-2.0 |
heke123/chromium-crosswalk | net/data/verify_certificate_chain_unittest/generate-expired-target-notBefore.py | 5 | 1182 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediary, where the target is expired (violates
validity.notBefore). Verification is expected to fail."""
import common
# Self-signed root certificate (part of trust store).
root = common.create_self_signed_root_certificate('Root')
root.set_validity_range(common.JANUARY_1_2015_UTC, common.JANUARY_1_2016_UTC)
# Intermediary certificate.
intermediary = common.create_intermediary_certificate('Intermediary', root)
intermediary.set_validity_range(common.JANUARY_1_2015_UTC,
common.JANUARY_1_2016_UTC)
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediary)
target.set_validity_range(common.MARCH_2_2015_UTC, common.JANUARY_1_2016_UTC)
chain = [target, intermediary]
trusted = [root]
# Both the root and intermediary are valid at this time, however the
# target is not.
time = common.MARCH_1_2015_UTC
verify_result = False
common.write_test_file(__doc__, chain, trusted, time, verify_result)
| bsd-3-clause |
qlands/onadata | onadata/apps/logger/migrations/0007_auto__add_field_xform_has_start_time.py | 13 | 6918 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'XForm.has_start_time'
db.add_column('odk_logger_xform', 'has_start_time', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'XForm.has_start_time'
db.delete_column('odk_logger_xform', 'has_start_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['logger']
| bsd-2-clause |
cstipkovic/spidermonkey-research | python/macholib/macholib/itergraphreport.py | 16 | 1929 | """
Utilities for creating dot output from a MachOGraph
XXX: need to rewrite this based on altgraph.Dot
"""
from collections import deque
try:
from itertools import imap
except ImportError:
imap = map
__all__ = ['itergraphreport']
def itergraphreport(nodes, describe_edge, name='G'):
edges = deque()
nodetoident = {}
mainedges = set()
def nodevisitor(node, data, outgoing, incoming):
return {'label': str(node)}
def edgevisitor(edge, data, head, tail):
return {}
yield 'digraph %s {\n' % (name,)
attr = dict(rankdir='LR', concentrate='true')
cpatt = '%s="%s"'
for item in attr.iteritems():
yield '\t%s;\n' % (cpatt % item,)
# find all packages (subgraphs)
for (node, data, outgoing, incoming) in nodes:
nodetoident[node] = getattr(data, 'identifier', node)
# create sets for subgraph, write out descriptions
for (node, data, outgoing, incoming) in nodes:
# update edges
for edge in imap(describe_edge, outgoing):
edges.append(edge)
# describe node
yield '\t"%s" [%s];\n' % (
node,
','.join([
(cpatt % item) for item in
nodevisitor(node, data, outgoing, incoming).iteritems()
]),
)
graph = []
while edges:
edge, data, head, tail = edges.popleft()
if data in ('run_file', 'load_dylib'):
graph.append((edge, data, head, tail))
def do_graph(edges, tabs):
edgestr = tabs + '"%s" -> "%s" [%s];\n'
# describe edge
for (edge, data, head, tail) in edges:
attribs = edgevisitor(edge, data, head, tail)
yield edgestr % (
head,
tail,
','.join([(cpatt % item) for item in attribs.iteritems()]),
)
for s in do_graph(graph, '\t'):
yield s
yield '}\n'
| mpl-2.0 |
anoopcs9/samba | python/samba/tests/net_join.py | 6 | 2340 | # Unix SMB/CIFS implementation.
#
# Copyright (C) Catalyst.Net Ltd. 2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Confirm that net.join_member works
"""
import samba.tests
import os
from samba.net import Net, LIBNET_JOIN_AUTOMATIC
from samba.credentials import DONT_USE_KERBEROS
from samba import NTSTATUSError, ntstatus
import ctypes
class NetJoinTests(samba.tests.TestCaseInTempDir):
def setUp(self):
super(NetJoinTests, self).setUp()
self.domain = os.environ["DOMAIN"]
self.server = os.environ["SERVER"]
self.lp = self.get_loadparm()
self.lp.set("private dir", self.tempdir)
self.lp.set("lock dir", self.tempdir)
self.lp.set("state directory", self.tempdir)
def tearDown(self):
super(NetJoinTests, self).tearDown()
def test_net_join(self):
netbios_name = "NetJoinTest"
machinepass = "abcdefghij"
creds = self.insta_creds(template=self.get_credentials(),
kerberos_state=DONT_USE_KERBEROS)
net = Net(creds, self.lp, server=self.server)
# NOTE WELL: We must not run more than one successful
# net.join_member per file (process), as the shared
# secrets.ldb handle will be kept between runs.
try:
(join_password, sid, domain_name) = net.join_member(
self.domain, netbios_name, LIBNET_JOIN_AUTOMATIC,
machinepass=machinepass)
except NTSTATUSError as e:
code = ctypes.c_uint32(e[0]).value
if code == ntstatus.NT_STATUS_CONNECTION_DISCONNECTED:
self.fail("Connection failure")
raise
os.unlink(os.path.join(self.tempdir, "secrets.ldb"))
pass
| gpl-3.0 |
plusk01/roboclaw-v5 | examples/roboclaw_speeddistance.py | 2 | 2263 | #***Before using this example the motor/controller combination must be
#***tuned and the settings saved to the Roboclaw using IonMotion.
#***The Min and Max Positions must be at least 0 and 50000
import time
import roboclaw
def displayspeed():
enc1 = roboclaw.ReadEncM1(address)
enc2 = roboclaw.ReadEncM2(address)
speed1 = roboclaw.ReadSpeedM1(address)
speed2 = roboclaw.ReadSpeedM2(address)
print("Encoder1:"),
if(enc1[0]==1):
print enc1[1],
print format(enc1[2],'02x'),
else:
print "failed",
print "Encoder2:",
if(enc2[0]==1):
print enc2[1],
print format(enc2[2],'02x'),
else:
print "failed " ,
print "Speed1:",
if(speed1[0]):
print speed1[1],
else:
print "failed",
print("Speed2:"),
if(speed2[0]):
print speed2[1]
else:
print "failed "
#Windows comport name
roboclaw.Open("COM3",38400)
#Linux comport name
#roboclaw.Open("/dev/ttyACM0",115200)
address = 0x80
version = roboclaw.ReadVersion(address)
if version[0]==False:
print "GETVERSION Failed"
else:
print repr(version[1])
while(1):
roboclaw.SpeedDistanceM1(address,12000,48000,1)
roboclaw.SpeedDistanceM2(address,-12000,48000,1)
buffers = (0,0,0)
while(buffers[1]!=0x80 and buffers[2]!=0x80): #Loop until distance command has completed
displayspeed();
buffers = roboclaw.ReadBuffers(address);
time.sleep(2)
roboclaw.SpeedDistanceM1(address,-12000,48000,1)
roboclaw.SpeedDistanceM2(address,12000,48000,1)
buffers = (0,0,0)
while(buffers[1]!=0x80 and buffers[2]!=0x80): #Loop until distance command has completed
displayspeed()
buffers = roboclaw.ReadBuffers(address)
time.sleep(2); #When no second command is given the motors will automatically slow down to 0 which takes 1 second
roboclaw.SpeedDistanceM1(address,12000,48000,1)
roboclaw.SpeedDistanceM2(address,-12000,48000,1)
roboclaw.SpeedDistanceM1(address,-12000,48000,0)
roboclaw.SpeedDistanceM2(address,12000,48000,0)
roboclaw.SpeedDistanceM1(address,0,48000,0)
roboclaw.SpeedDistanceM2(address,0,48000,0)
buffers = (0,0,0)
while(buffers[1]!=0x80 and buffers[2]!=0x80): #Loop until distance command has completed
displayspeed()
buffers = roboclaw.ReadBuffers(address)
time.sleep(1)
| mit |
s142857/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_stream.py | 446 | 6264 | from __future__ import absolute_import, division, unicode_literals
from . import support # flake8: noqa
import unittest
import codecs
from io import BytesIO
from six.moves import http_client
from html5lib.inputstream import (BufferedStream, HTMLInputStream,
HTMLUnicodeInputStream, HTMLBinaryInputStream)
class BufferedStreamTest(unittest.TestCase):
def test_basic(self):
s = b"abc"
fp = BufferedStream(BytesIO(s))
read = fp.read(10)
assert read == s
def test_read_length(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert read1 == b"a"
read2 = fp.read(2)
assert read2 == b"bc"
read3 = fp.read(3)
assert read3 == b"def"
read4 = fp.read(4)
assert read4 == b""
def test_tell(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert fp.tell() == 1
read2 = fp.read(2)
assert fp.tell() == 3
read3 = fp.read(3)
assert fp.tell() == 6
read4 = fp.read(4)
assert fp.tell() == 6
def test_seek(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert read1 == b"a"
fp.seek(0)
read2 = fp.read(1)
assert read2 == b"a"
read3 = fp.read(2)
assert read3 == b"bc"
fp.seek(2)
read4 = fp.read(2)
assert read4 == b"cd"
fp.seek(4)
read5 = fp.read(2)
assert read5 == b"ef"
def test_seek_tell(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert fp.tell() == 1
fp.seek(0)
read2 = fp.read(1)
assert fp.tell() == 1
read3 = fp.read(2)
assert fp.tell() == 3
fp.seek(2)
read4 = fp.read(2)
assert fp.tell() == 4
fp.seek(4)
read5 = fp.read(2)
assert fp.tell() == 6
class HTMLUnicodeInputStreamShortChunk(HTMLUnicodeInputStream):
_defaultChunkSize = 2
class HTMLBinaryInputStreamShortChunk(HTMLBinaryInputStream):
_defaultChunkSize = 2
class HTMLInputStreamTest(unittest.TestCase):
def test_char_ascii(self):
stream = HTMLInputStream(b"'", encoding='ascii')
self.assertEqual(stream.charEncoding[0], 'ascii')
self.assertEqual(stream.char(), "'")
def test_char_utf8(self):
stream = HTMLInputStream('\u2018'.encode('utf-8'), encoding='utf-8')
self.assertEqual(stream.charEncoding[0], 'utf-8')
self.assertEqual(stream.char(), '\u2018')
def test_char_win1252(self):
stream = HTMLInputStream("\xa9\xf1\u2019".encode('windows-1252'))
self.assertEqual(stream.charEncoding[0], 'windows-1252')
self.assertEqual(stream.char(), "\xa9")
self.assertEqual(stream.char(), "\xf1")
self.assertEqual(stream.char(), "\u2019")
def test_bom(self):
stream = HTMLInputStream(codecs.BOM_UTF8 + b"'")
self.assertEqual(stream.charEncoding[0], 'utf-8')
self.assertEqual(stream.char(), "'")
def test_utf_16(self):
stream = HTMLInputStream((' ' * 1025).encode('utf-16'))
self.assertTrue(stream.charEncoding[0] in ['utf-16-le', 'utf-16-be'], stream.charEncoding)
self.assertEqual(len(stream.charsUntil(' ', True)), 1025)
def test_newlines(self):
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\r\nccc\rddddxe")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
self.assertEqual(stream.position(), (3, 0))
self.assertEqual(stream.charsUntil('x'), "ccc\ndddd")
self.assertEqual(stream.position(), (4, 4))
self.assertEqual(stream.charsUntil('e'), "x")
self.assertEqual(stream.position(), (4, 5))
def test_newlines2(self):
size = HTMLUnicodeInputStream._defaultChunkSize
stream = HTMLInputStream("\r" * size + "\n")
self.assertEqual(stream.charsUntil('x'), "\n" * size)
def test_position(self):
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\nccc\nddde\nf\ngh")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
self.assertEqual(stream.position(), (3, 0))
stream.unget("\n")
self.assertEqual(stream.position(), (2, 2))
self.assertEqual(stream.charsUntil('c'), "\n")
self.assertEqual(stream.position(), (3, 0))
stream.unget("\n")
self.assertEqual(stream.position(), (2, 2))
self.assertEqual(stream.char(), "\n")
self.assertEqual(stream.position(), (3, 0))
self.assertEqual(stream.charsUntil('e'), "ccc\nddd")
self.assertEqual(stream.position(), (4, 3))
self.assertEqual(stream.charsUntil('h'), "e\nf\ng")
self.assertEqual(stream.position(), (6, 1))
def test_position2(self):
stream = HTMLUnicodeInputStreamShortChunk("abc\nd")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.char(), "a")
self.assertEqual(stream.position(), (1, 1))
self.assertEqual(stream.char(), "b")
self.assertEqual(stream.position(), (1, 2))
self.assertEqual(stream.char(), "c")
self.assertEqual(stream.position(), (1, 3))
self.assertEqual(stream.char(), "\n")
self.assertEqual(stream.position(), (2, 0))
self.assertEqual(stream.char(), "d")
self.assertEqual(stream.position(), (2, 1))
def test_python_issue_20007(self):
"""
Make sure we have a work-around for Python bug #20007
http://bugs.python.org/issue20007
"""
class FakeSocket(object):
def makefile(self, _mode, _bufsize=None):
return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
source = http_client.HTTPResponse(FakeSocket())
source.begin()
stream = HTMLInputStream(source)
self.assertEqual(stream.charsUntil(" "), "Text")
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
| mpl-2.0 |
Lilykos/invenio | invenio/legacy/pdfchecker/__init__.py | 13 | 1091 | #!@PYTHON@
# -*- mode: python; coding: utf-8; -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Arxiv Pdf Checker Task.
It checks periodically if arxiv papers are not missing pdfs
"""
from invenio.base.factory import with_app_context
@with_app_context()
def main():
from .arxiv import main as arxiv_main
return arxiv_main()
| gpl-2.0 |
keyurpatel076/MissionPlannerGit | Lib/site-packages/numpy/core/_internal.py | 53 | 18020 | #A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
import warnings
from numpy.compat import asbytes, bytes
if (sys.byteorder == 'little'):
_nbo = asbytes('<')
else:
_nbo = asbytes('>')
def _makenames_list(adict):
from numpy.core import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0])
if (format.itemsize == 0):
raise ValueError("all itemsizes must be fixed.")
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from numpy.core import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.dtinfo is None:
return descriptor.str
else:
# TODO: This used to put a copy of the metadata
# in the tuple. Now we put in the dtinfo tuple.
# I have no idea if this is right.
new = descriptor.dtinfo
return (descriptor.str, new)
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re and _split were taken from numarray by J. Todd Miller
def _split(input):
"""Split the input formats string into field formats without splitting
the tuple used to specify multi-dimensional arrays."""
newlist = []
hold = asbytes('')
listinput = input.split(asbytes(','))
for element in listinput:
if hold != asbytes(''):
item = hold + asbytes(',') + element
else:
item = element
left = item.count(asbytes('('))
right = item.count(asbytes(')'))
# if the parenthesis is not balanced, hold the string
if left > right :
hold = item
# when balanced, append to the output list and reset the hold
elif left == right:
newlist.append(item.strip())
hold = asbytes('')
# too many close parenthesis is unacceptable
else:
raise SyntaxError(item)
# if there is string left over in hold
if hold != asbytes(''):
raise SyntaxError(hold)
return newlist
format_datetime = re.compile(r"""
(?P<typecode>M8|m8|datetime64|timedelta64)
([[]
((?P<num>\d+)?
(?P<baseunit>Y|M|W|B|D|h|m|s|ms|us|ns|ps|fs|as)
(/(?P<den>\d+))?
[]])
(//(?P<events>\d+))?)?""", re.X)
# Return (baseunit, num, den, events), datetime
# from date-time string
def _datetimestring(astr):
res = format_datetime.match(astr)
if res is None:
raise ValueError("Incorrect date-time string.")
typecode = res.group('typecode')
datetime = (typecode == asbytes('M8') or typecode == asbytes('datetime64'))
defaults = [asbytes('us'), 1, 1, 1]
names = ['baseunit', 'num', 'den', 'events']
func = [bytes, int, int, int]
dt_tuple = []
for i, name in enumerate(names):
value = res.group(name)
if value:
dt_tuple.append(func[i](value))
else:
dt_tuple.append(defaults[i])
return tuple(dt_tuple), datetime
format_re = re.compile(asbytes(r'(?P<order1>[<>|=]?)(?P<repeats> *[(]?[ ,0-9]*[)]? *)(?P<order2>[<>|=]?)(?P<dtype>[A-Za-z0-9.]*)'))
# astr is a string (perhaps comma separated)
_convorder = {asbytes('='): _nbo}
def _commastring(astr):
res = _split(astr)
if (len(res)) < 1:
raise ValueError("unrecognized formant")
result = []
for k,item in enumerate(res):
# convert item
try:
(order1, repeats, order2, dtype) = format_re.match(item).groups()
except (TypeError, AttributeError):
raise ValueError('format %s is not recognized' % item)
if order2 == asbytes(''):
order = order1
elif order1 == asbytes(''):
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in [asbytes('|'), asbytes('='), _nbo]:
order = asbytes('')
dtype = order + dtype
if (repeats == asbytes('')):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from numpy.core import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError("unknown field name: %s" % (name,))
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
from multiarray import empty, dtype
dt = ary.dtype
new_dtype = [(name, dt[name]) for name in dt.names if name in fields]
future_dtype = [(name, dt[name]) for name in fields if name in dt.names]
if not new_dtype == future_dtype:
depdoc = "Out of order field selection on recarrays currently returns \
fields in order. This behavior is deprecated in numpy 1.5 and will change in \
2.0. See ticket #1431."
warnings.warn(depdoc, DeprecationWarning)
if ary.flags.f_contiguous:
order = 'F'
else:
order = 'C'
newarray = empty(ary.shape, dtype=new_dtype, order=order)
for name in fields:
newarray[name] = ary[name]
return newarray
# Given a string containing a PEP 3118 format specifier,
# construct a Numpy dtype
_pep3118_native_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
from numpy.core import dtype
fields = {}
offset = 0
explicit_name = False
this_explicit_name = False
common_alignment = 1
is_padding = False
last_offset = 0
dummy_name_index = [0]
def next_dummy_name():
dummy_name_index[0] += 1
def get_dummy_name():
while True:
name = 'f%d' % dummy_name_index[0]
if name not in fields:
return name
next_dummy_name()
# Parse spec
while spec:
value = None
# End of structure, bail out to upper level
if spec[0] == '}':
spec = spec[1:]
break
# Sub-arrays (1)
shape = None
if spec[0] == '(':
j = spec.index(')')
shape = tuple(map(int, spec[1:j].split(',')))
spec = spec[j+1:]
# Byte order
if spec[0] in ('@', '=', '<', '>', '^', '!'):
byteorder = spec[0]
if byteorder == '!':
byteorder = '>'
spec = spec[1:]
# Byte order characters also control native vs. standard type sizes
if byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize = 1
if spec[0].isdigit():
j = 1
for j in xrange(1, len(spec)):
if not spec[j].isdigit():
break
itemsize = int(spec[:j])
spec = spec[j:]
# Data types
is_padding = False
if spec[:2] == 'T{':
value, spec, align, next_byteorder = _dtype_from_pep3118(
spec[2:], byteorder=byteorder, is_subdtype=True)
elif spec[0] in type_map_chars:
next_byteorder = byteorder
if spec[0] == 'Z':
j = 2
else:
j = 1
typechar = spec[:j]
spec = spec[j:]
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % spec)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = (align*common_alignment
/ _gcd(align, common_alignment))
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
this_explicit_name = False
if spec and spec.startswith(':'):
i = spec[1:].index(':') + 1
name = spec[1:i]
spec = spec[i+1:]
explicit_name = True
this_explicit_name = True
else:
name = get_dummy_name()
if not is_padding or this_explicit_name:
if name in fields:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
fields[name] = (value, offset)
last_offset = offset
if not this_explicit_name:
next_dummy_name()
byteorder = next_byteorder
offset += value.itemsize
offset += extra_offset
# Check if this was a simple 1-item type
if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \
and not is_subdtype:
ret = fields['f0'][0]
else:
ret = dtype(fields)
# Trailing padding must be explicitly added
padding = offset - ret.itemsize
if byteorder == '@':
padding += (-offset) % common_alignment
if is_padding and not this_explicit_name:
ret = _add_trailing_padding(ret, padding)
# Finished
if is_subdtype:
return ret, spec, common_alignment, byteorder
else:
return ret
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
from numpy.core import dtype
if value.fields is None:
vfields = {'f0': (value, 0)}
else:
vfields = dict(value.fields)
if value.names and value.names[-1] == '' and \
value[''].char == 'V':
# A trailing padding field is already present
vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
vfields[''][1])
value = dtype(vfields)
else:
# Get a free name for the padding field
j = 0
while True:
name = 'pad%d' % j
if name not in vfields:
vfields[name] = ('V%d' % padding, value.itemsize)
break
j += 1
value = dtype(vfields)
if '' not in vfields:
# Strip out the name of the padding field
names = list(value.names)
names[-1] = ''
value.names = tuple(names)
return value
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a%b
return a
| gpl-3.0 |
neuroidss/nupic.research | projects/sequence_classification/run_encoder_with_union.py | 9 | 8995 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run sequence classification experiment with
Input -> RDSE encoder -> Union model
Search for the optimal union window
One needs to run the script "run_encoder_only.py" first to get the
optimal encoder resolution
"""
import pickle
import time
import matplotlib.pyplot as plt
import multiprocessing
from util_functions import *
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
plt.ion()
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams.update({'figure.autolayout': True})
def unionForOneSequence(activeColumns, unionLength=1):
activeColumnTrace = []
unionStepInBatch = 0
unionBatchIdx = 0
unionCols = set()
for t in range(len(activeColumns)):
unionCols = unionCols.union(activeColumns[t])
unionStepInBatch += 1
if unionStepInBatch == unionLength:
activeColumnTrace.append(unionCols)
unionStepInBatch = 0
unionBatchIdx += 1
unionCols = set()
if unionStepInBatch > 0:
activeColumnTrace.append(unionCols)
return activeColumnTrace
def runUnionStep(activeColumns, unionLength=1):
"""
Run encoder -> tm network over dataset, save activeColumn and activeCells
traces
:param tm:
:param encoder:
:param dataset:
:return:
"""
numSequence = len(activeColumns)
activeColumnUnionTrace = []
for i in range(numSequence):
activeColumnTrace = unionForOneSequence(activeColumns[i], unionLength)
activeColumnUnionTrace.append(activeColumnTrace)
# print "{} out of {} done ".format(i, numSequence)
return activeColumnUnionTrace
def runEncoderOverDataset(encoder, dataset):
activeColumnsData = []
for i in range(dataset.shape[0]):
activeColumnsTrace = []
for element in dataset[i, :]:
encoderOutput = encoder.encode(element)
activeColumns = set(np.where(encoderOutput > 0)[0])
activeColumnsTrace.append(activeColumns)
activeColumnsData.append(activeColumnsTrace)
return activeColumnsData
def calcualteEncoderModelWorker(taskQueue, resultQueue, *args):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
nBuckets = nextTask["nBuckets"]
accuracyColumnOnly = calculateEncoderModelAccuracy(nBuckets, *args)
resultQueue.put({nBuckets: accuracyColumnOnly})
print "Column Only model, Resolution: {} Accuracy: {}".format(
nBuckets, accuracyColumnOnly)
return
def calculateEncoderModelAccuracy(nBuckets, numCols, w, trainData, trainLabel):
maxValue = np.max(trainData)
minValue = np.min(trainData)
resolution = (maxValue - minValue) / nBuckets
encoder = RandomDistributedScalarEncoder(resolution, w=w, n=numCols)
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTrain,
trainLabel, trainLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
return accuracyColumnOnly
def runDataSet(dataName, datasetName):
if not os.path.exists('results'):
os.makedirs('results')
trainData, trainLabel, testData, testLabel = loadDataset(dataName,
datasetName)
numTest = len(testLabel)
numTrain = len(trainLabel)
sequenceLength = len(trainData[0])
classList = np.unique(trainLabel).tolist()
numClass = len(classList)
print "Processing {}".format(dataName)
print "Train Sample # {}, Test Sample # {}".format(numTrain, numTest)
print "Sequence Length {} Class # {}".format(sequenceLength, len(classList))
if (max(numTrain, numTest) * sequenceLength < 600 * 600):
print "skip this small dataset for now"
return
try:
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
expResultTM = pickle.load(
open('results/modelPerformance/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'r'))
return
except:
print "run data set: ", dataName
EuclideanDistanceMat = calculateEuclideanDistanceMat(testData, trainData)
outcomeEuclidean = calculateEuclideanModelAccuracy(trainData, trainLabel,
testData, testLabel)
accuracyEuclideanDist = np.mean(outcomeEuclidean)
print
print "Euclidean model accuracy: {}".format(accuracyEuclideanDist)
print
# # Use SDR overlap instead of Euclidean distance
print "Running Encoder model"
maxValue = np.max(trainData)
minValue = np.min(trainData)
numCols = 2048
w = 41
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
nBucketList = searchResolution['nBucketList']
accuracyVsResolution = searchResolution['accuracyVsResolution']
optNumBucket = nBucketList[smoothArgMax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue) / optNumBucket
except:
return
print "optimal bucket # {}".format((maxValue - minValue) / optimalResolution)
encoder = RandomDistributedScalarEncoder(optimalResolution, w=w, n=numCols)
print "encoding train data ..."
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
print "encoding test data ..."
activeColumnsTest = runEncoderOverDataset(encoder, testData)
print "calculate column distance matrix ..."
# run encoder -> union model, search for the optimal union window
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
activeColumnUnionTrain = runUnionStep(activeColumnsTrain, unionLength)
activeColumnUnionTest = runUnionStep(activeColumnsTest, unionLength)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnUnionTrain)
distMatColumnTest = calculateDistanceMat(activeColumnUnionTest,
activeColumnUnionTrain)
trainAccuracyColumnOnly, outcomeColumn = calculateAccuracy(distMatColumnTest,
trainLabel,
testLabel)
testAccuracyColumnOnly, outcomeColumn = calculateAccuracy(distMatColumnTest,
trainLabel,
testLabel)
expResults = {'distMatColumnTrain': distMatColumnTrain,
'distMatColumnTest': distMatColumnTest,
'trainAccuracyColumnOnly': trainAccuracyColumnOnly,
'testAccuracyColumnOnly': testAccuracyColumnOnly}
if not os.path.exists('results/distanceMat'):
os.makedirs('results/distanceMat')
outputFile = open('results/distanceMat/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'w')
pickle.dump(expResults, outputFile)
outputFile.close()
print '--> wrote results to "results/distanceMat"'
def runDataSetWorker(taskQueue, datasetName):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
dataName = nextTask["dataName"]
runDataSet(dataName, datasetName)
return
if __name__ == "__main__":
datasetName = "SyntheticData"
dataSetList = listDataSets(datasetName)
datasetName = 'UCR_TS_Archive_2015'
dataSetList = listDataSets(datasetName)
# dataSetList = ["synthetic_control"]
numCPU = multiprocessing.cpu_count()
numWorker = 2
# Establish communication queues
taskQueue = multiprocessing.JoinableQueue()
for dataName in dataSetList:
taskQueue.put({"dataName": dataName,
"datasetName": datasetName})
for _ in range(numWorker):
taskQueue.put(None)
jobs = []
for i in range(numWorker):
print "Start process ", i
p = multiprocessing.Process(target=runDataSetWorker,
args=(taskQueue, datasetName))
jobs.append(p)
p.daemon = True
p.start()
while not taskQueue.empty():
time.sleep(5)
| agpl-3.0 |
liushuaikobe/evermd | lib/thrift/transport/TTwisted.py | 74 | 6547 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from zope.interface import implements, Interface, Attribute
from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from twisted.web import server, resource, http
from thrift.transport import TTransport
from cStringIO import StringIO
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
for k,v in self.client._reqs.iteritems():
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
d = self.factory.processor.process(iprot, oprot)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
class ThriftResource(resource.Resource):
allowedMethods = ('POST',)
def __init__(self, processor, inputProtocolFactory,
outputProtocolFactory=None):
resource.Resource.__init__(self)
self.inputProtocolFactory = inputProtocolFactory
if outputProtocolFactory is None:
self.outputProtocolFactory = inputProtocolFactory
else:
self.outputProtocolFactory = outputProtocolFactory
self.processor = processor
def getChild(self, path, request):
return self
def _cbProcess(self, _, request, tmo):
msg = tmo.getvalue()
request.setResponseCode(http.OK)
request.setHeader("content-type", "application/x-thrift")
request.write(msg)
request.finish()
def render_POST(self, request):
request.content.seek(0, 0)
data = request.content.read()
tmi = TTransport.TMemoryBuffer(data)
tmo = TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(tmi)
oprot = self.outputProtocolFactory.getProtocol(tmo)
d = self.processor.process(iprot, oprot)
d.addCallback(self._cbProcess, request, tmo)
return server.NOT_DONE_YET
| gpl-2.0 |
azazel75/LasaurApp | docs/conf.py | 1 | 9717 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# LasaurApp documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 10 02:42:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.aafig'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'LasaurApp'
copyright = '2016, Stefan Hechenberger'
author = 'Stefan Hechenberger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '14.11'
# The full version, including alpha/beta/rc tags.
release = '14.11b'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'LasaurApp v14.11b'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'LasaurAppdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LasaurApp.tex', 'LasaurApp Documentation',
'Stefan Hechenberger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lasaurapp', 'LasaurApp Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LasaurApp', 'LasaurApp Documentation',
author, 'LasaurApp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| gpl-3.0 |
b-cannon/my_djae | djangae/db/utils.py | 1 | 10507 | #STANDARD LIB
from datetime import datetime
from decimal import Decimal
from itertools import chain
import warnings
#LIBRARIES
from django.conf import settings
from django.db import models
from django.db.backends.util import format_number
from django.db import IntegrityError
from django.utils import timezone
from google.appengine.api import datastore
from google.appengine.api.datastore import Key, Query
#DJANGAE
from djangae.utils import memoized
from djangae.indexing import special_indexes_for_column, REQUIRES_SPECIAL_INDEXES
from djangae.db.backends.appengine.dbapi import CouldBeSupportedError
def make_timezone_naive(value):
if value is None:
return None
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("Djangae backend does not support timezone-aware datetimes when USE_TZ is False.")
return value
@memoized
def get_model_from_db_table(db_table):
for model in models.get_models(include_auto_created=True, only_installed=False):
if model._meta.db_table == db_table:
return model
def decimal_to_string(value, max_digits=16, decimal_places=0):
"""
Converts decimal to a unicode string for storage / lookup by nonrel
databases that don't support decimals natively.
This is an extension to `django.db.backends.util.format_number`
that preserves order -- if one decimal is less than another, their
string representations should compare the same (as strings).
TODO: Can't this be done using string.format()?
Not in Python 2.5, str.format is backported to 2.6 only.
"""
# Handle sign separately.
if value.is_signed():
sign = u'-'
value = abs(value)
else:
sign = u''
# Let Django quantize and cast to a string.
value = format_number(value, max_digits, decimal_places)
# Pad with zeroes to a constant width.
n = value.find('.')
if n < 0:
n = len(value)
if n < max_digits - decimal_places:
value = u'0' * (max_digits - decimal_places - n) + value
return sign + value
def normalise_field_value(value):
""" Converts a field value to a common type/format to make comparable to another. """
if isinstance(value, datetime):
return make_timezone_naive(value)
elif isinstance(value, Decimal):
return decimal_to_string(value)
return value
def get_datastore_kind(model):
return get_top_concrete_parent(model)._meta.db_table
def get_prepared_db_value(connection, instance, field, raw=False):
value = getattr(instance, field.attname) if raw else field.pre_save(instance, instance._state.adding)
if hasattr(value, "prepare_database_save"):
value = value.prepare_database_save(field)
else:
value = field.get_db_prep_save(
value,
connection=connection
)
value = connection.ops.value_for_db(value, field)
return value
def get_concrete_parents(model, ignore_leaf=False):
ret = [x for x in model.mro() if hasattr(x, "_meta") and not x._meta.abstract and not x._meta.proxy]
if ignore_leaf:
ret = [ x for x in ret if x != model ]
return ret
@memoized
def get_top_concrete_parent(model):
return get_concrete_parents(model)[-1]
def get_concrete_fields(model, ignore_leaf=False):
"""
Returns all the concrete fields for the model, including those
from parent models
"""
concrete_classes = get_concrete_parents(model, ignore_leaf)
fields = []
for klass in concrete_classes:
fields.extend(klass._meta.fields)
return fields
@memoized
def get_concrete_db_tables(model):
return [ x._meta.db_table for x in get_concrete_parents(model) ]
@memoized
def has_concrete_parents(model):
return get_concrete_parents(model) != [model]
def django_instance_to_entity(connection, model, fields, raw, instance):
# uses_inheritance = False
inheritance_root = get_top_concrete_parent(model)
db_table = get_datastore_kind(inheritance_root)
def value_from_instance(_instance, _field):
value = get_prepared_db_value(connection, _instance, _field, raw)
if (not _field.null and not _field.primary_key) and value is None:
raise IntegrityError("You can't set %s (a non-nullable "
"field) to None!" % _field.name)
is_primary_key = False
if _field.primary_key and _field.model == inheritance_root:
is_primary_key = True
return value, is_primary_key
field_values = {}
primary_key = None
for field in fields:
value, is_primary_key = value_from_instance(instance, field)
if is_primary_key:
primary_key = value
else:
field_values[field.column] = value
# Add special indexed fields
for index in special_indexes_for_column(model, field.column):
indexer = REQUIRES_SPECIAL_INDEXES[index]
values = indexer.prep_value_for_database(value)
if values is None:
continue
if not hasattr(values, "__iter__"):
values = [ values ]
for v in values:
column = indexer.indexed_column_name(field.column, v)
if column in field_values:
if not isinstance(field_values[column], list):
field_values[column] = [ field_values[column], v ]
else:
field_values[column].append(v)
else:
field_values[column] = v
kwargs = {}
if primary_key:
if isinstance(primary_key, (int, long)):
kwargs["id"] = primary_key
elif isinstance(primary_key, basestring):
if len(primary_key) > 500:
warnings.warn("Truncating primary key that is over 500 characters. "
"THIS IS AN ERROR IN YOUR PROGRAM.",
RuntimeWarning)
primary_key = primary_key[:500]
kwargs["name"] = primary_key
else:
raise ValueError("Invalid primary key value")
entity = datastore.Entity(db_table, **kwargs)
entity.update(field_values)
classes = get_concrete_db_tables(model)
if len(classes) > 1:
entity["class"] = classes
return entity
def get_datastore_key(model, pk):
""" Return a datastore.Key for the given model and primary key.
"""
kind = get_top_concrete_parent(model)._meta.db_table
return Key.from_path(kind, model._meta.pk.get_prep_value(pk))
class MockInstance(object):
"""
This creates a mock instance for use when passing a datastore entity
into get_prepared_db_value. This is used when performing updates to prevent a complete
conversion to a Django instance before writing back the entity
"""
def __init__(self, **kwargs):
is_adding = kwargs.pop('_is_adding', False)
class State:
adding = is_adding
self.fields = {}
for field_name, value in kwargs.items():
self.fields[field_name] = value
self._state = State()
def __getattr__(self, attr):
if attr in self.fields:
return self.fields[attr]
raise AttributeError(attr)
def key_exists(key):
qry = Query(keys_only=True)
qry.Ancestor(key)
return qry.Count(limit=1) > 0
def django_ordering_comparison(ordering, lhs, rhs):
if not ordering:
return -1 # Really doesn't matter
ASCENDING = 1
DESCENDING = 2
for order, direction in ordering:
lhs_value = lhs.key() if order == "__key__" else lhs[order]
rhs_value = rhs.key() if order == "__key__" else rhs[order]
if direction == ASCENDING and lhs_value != rhs_value:
return -1 if lhs_value < rhs_value else 1
elif direction == DESCENDING and lhs_value != rhs_value:
return 1 if lhs_value < rhs_value else -1
return 0
def entity_matches_query(entity, query):
"""
Return True if the entity would potentially be returned by the datastore
query
"""
OPERATORS = {
"=": lambda x, y: x == y,
"<": lambda x, y: x < y,
">": lambda x, y: x > y,
"<=": lambda x, y: x <= y,
">=": lambda x, y: x >= y
}
queries = [query]
if isinstance(query, datastore.MultiQuery):
raise CouldBeSupportedError("We just need to separate the multiquery "
"into 'queries' then everything should work")
for query in queries:
comparisons = chain(
[("kind", "=", "_Query__kind") ],
[tuple(x.split(" ") + [ x ]) for x in query.keys()]
)
for ent_attr, op, query_attr in comparisons:
if ent_attr == "__key__":
continue
op = OPERATORS[op] # We want this to throw if there's some op we don't know about
if ent_attr == "kind":
ent_attr = entity.kind()
else:
ent_attr = entity.get(ent_attr)
if callable(ent_attr):
# entity.kind() is a callable, so we need this to save special casing it in a more
# ugly way
ent_attr = ent_attr()
if not isinstance(query_attr, (list, tuple)):
query_attrs = [query_attr]
else:
# The query value can be a list of ANDed values
query_attrs = query_attr
query_attrs = [ getattr(query, x) if x == "_Query__kind" else query.get(x) for x in query_attrs ]
if not isinstance(ent_attr, (list, tuple)):
ent_attr = [ ent_attr ]
matches = False
for query_attr in query_attrs: # [22, 23]
#If any of the values don't match then this query doesn't match
if not any([op(attr, query_attr) for attr in ent_attr]):
matches = False
break
else:
# One of the ent_attrs matches the query_attrs
matches = True
if not matches:
# One of the AND values didn't match
break
else:
# If we got through the loop without breaking, then the entity matches
return True
return False
| bsd-3-clause |
abramhindle/marsyas-fork | scripts/sfplugin-test-wrapper.py | 5 | 1546 | #!/usr/bin/env python
import os
import sys
import subprocess
import shutil
import commands
def runCommand(workingDir, bextractLocation, sfpluginLocation, extraBextractCmdArgs,
mfFileLocation, outputMplFile, testFile, goodFilename):
os.chdir(workingDir)
bextractCmd = ("%s %s -t %s -p %s") % (bextractLocation, extraBextractCmdArgs, mfFileLocation, outputMplFile)
a = commands.getoutput(bextractCmd)
sfpluginCmd = ("%s -pl %s %s") % (sfpluginLocation, outputMplFile, testFile)
a = commands.getoutput(sfpluginCmd)
returnCode = compareSfpluginOutput(a, goodFilename)
return returnCode
def compareSfpluginOutput(oneData, twoFilename):
oneLines = oneData.split("\n")
twoLines = open(twoFilename).readlines()
for a, b in zip(oneLines, twoLines):
if a.rstrip() != b.rstrip():
return False
return True
if __name__ == "__main__":
try:
workingDir = sys.argv[1]
bextractLocation = sys.argv[2]
sfpluginLocation = sys.argv[3]
extraBextractCmdArgs = sys.argv[4]
mfFileLocation = sys.argv[5]
outputMplFile = sys.argv[6]
testFile = sys.argv[7]
goodFilename = sys.argv[8]
except:
print "Syntax: cmake-test-wrapper.py WORKING_DIR ENV_VARS CMD ARG1 ARG2 ... ARGn "
sys.exit(0)
returnCode = runCommand(workingDir, bextractLocation, sfpluginLocation, extraBextractCmdArgs,
mfFileLocation, outputMplFile, testFile, goodFilename)
sys.exit(returnCode)
| gpl-2.0 |
stefan-andritoiu/upm | examples/python/oled_ssd1308.py | 6 | 5876 | #!/usr/bin/env python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Load lcd display module
from __future__ import print_function
import time, signal, sys
from upm import pyupm_lcd as upmLCD
def main():
myLCD = upmLCD.SSD1308(0, 0x3C);
logoArr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 192, 192, 192, 224,
224, 224, 224, 240, 240, 248, 248, 120, 120, 120, 120, 60, 60, 60, 60, 60,
62, 30, 30, 30, 30, 30, 30, 30, 31, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 31, 31, 31, 31, 31,
30, 62, 62, 62, 62, 126, 126, 124, 124, 252, 252, 248, 248, 240, 240, 240,
224, 224, 224, 192, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128,
128, 0, 56, 56, 28, 30, 14, 15, 15, 7, 7, 7, 7, 3, 3, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
192, 192, 192, 192, 192, 192, 192, 192, 0, 0, 0, 0, 192, 193, 195, 195,
195, 7, 15, 15, 63, 127, 255, 255, 255, 254, 252, 252, 240, 192, 0, 0, 0,
0, 0, 0, 0, 0, 128, 192, 192, 240, 248, 124, 124, 60, 0, 0, 0, 0, 159, 159,
159, 159, 159, 159, 159, 159, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0,
0, 0, 0, 0, 254, 254, 254, 254, 254, 254, 254, 254, 128, 128, 128, 128,
128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 192, 192, 192, 192, 192, 192, 128,
128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255,
0, 0, 0, 0, 3, 7, 3, 3, 3, 0, 0, 0, 0, 0, 1, 1, 255, 255, 255, 255, 255,
255, 255, 0, 0, 224, 248, 252, 252, 255, 127, 15, 15, 3, 1, 0, 0, 0, 0, 0,
0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255,
255, 255, 255, 255, 255, 15, 15, 15, 15, 15, 15, 255, 255, 255, 255, 255,
255, 255, 252, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 15, 15,
15, 15, 15, 224, 224, 252, 254, 255, 255, 255, 255, 159, 159, 143, 143,
135, 135, 143, 159, 255, 255, 255, 255, 255, 255, 252, 248, 0, 0, 0, 255,
255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128,
224, 248, 248, 255, 255, 255, 255, 255, 127, 15, 255, 255, 255, 255, 255,
255, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255,
255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0,
0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255,
255, 255, 255, 255, 255, 192, 192, 192, 192, 192, 31, 31, 255, 255, 255,
255, 255, 255, 231, 231, 199, 199, 199, 199, 199, 199, 199, 199, 231, 231,
231, 231, 199, 135, 0, 0, 0, 63, 255, 255, 255, 255, 255, 255, 255, 0, 0,
0, 0, 224, 240, 248, 248, 252, 254, 255, 255, 255, 127, 63, 63, 31, 15, 7,
7, 1, 0, 0, 63, 63, 255, 255, 255, 255, 255, 240, 192, 192, 128, 0, 0, 0,
0, 0, 0, 0, 0, 1, 3, 3, 7, 7, 7, 7, 7, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 7,
0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, 3, 3, 7, 7, 7,
7, 7, 7, 7, 7, 7, 0, 0, 0, 1, 3, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 7, 7, 7,
7, 7, 3, 3, 3, 1, 0, 0, 0, 0, 1, 3, 3, 7, 135, 135, 135, 192, 192, 0, 0, 7,
7, 3, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 7, 15, 15,
31, 127, 127, 127, 255, 255, 252, 252, 252, 248, 240, 240, 240, 224, 224,
224, 192, 192, 192, 192, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 192, 192, 192, 192, 192,
224, 224, 224, 224, 240, 240, 240, 240, 248, 248, 248, 248, 252, 252, 252,
254, 254, 255, 255, 255, 255, 255, 255, 127, 127, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
3, 3, 3, 7, 7, 7, 15, 15, 31, 31, 31, 63, 63, 63, 63, 63, 127, 127, 127,
127, 127, 255, 255, 255, 255, 254, 254, 254, 254, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
255, 255, 255, 255, 255, 255, 255, 127, 127, 127, 127, 127, 127, 127, 127,
63, 63, 63, 63, 63, 31, 31, 31, 31, 31, 15, 15, 15, 15, 7, 7, 7, 7, 3, 3,
3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0]
intelLogo = upmLCD.uint8Array(len(logoArr))
for x in range(len(logoArr)):
intelLogo.__setitem__(x, logoArr[x])
myLCD.clear()
myLCD.draw(intelLogo, 1024)
del intelLogo
del myLCD
print("Exiting")
if __name__ == '__main__':
main()
| mit |
wubr2000/googleads-python-lib | examples/adxbuyer/v201502/basic_operations/remove_placement.py | 4 | 2203 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deletes an ad group criterion using the 'REMOVE' operator.
To get ad group criteria, run get_placements.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
CRITERION_ID = 'INSERT_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201502')
# Construct operations and delete ad group criteria.
operations = [
{
'operator': 'REMOVE',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Placement',
'id': criterion_id
}
}
}
]
result = ad_group_criterion_service.mutate(operations)
# Display results.
for criterion in result['value']:
print ('Ad group criterion with ad group id \'%s\', criterion id \'%s\', '
'and type \'%s\' was deleted.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['Criterion.Type']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID, CRITERION_ID)
| apache-2.0 |
hrjn/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
paulrouget/servo | tests/wpt/web-platform-tests/webdriver/tests/close_window/user_prompts.py | 22 | 4182 | # META: timeout=long
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def close(session):
return session.transport.send(
"DELETE", "session/{session_id}/window".format(**vars(session)))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog, create_window):
def check_user_prompt_closed_without_exception(dialog_type, retval):
original_handle = session.window_handle
new_handle = create_window()
session.window_handle = new_handle
create_dialog(dialog_type, text=dialog_type)
response = close(session)
assert_success(response)
# Asserting that the dialog was handled requires valid top-level browsing
# context, so we must switch to the original window.
session.window_handle = original_handle
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert new_handle not in session.handles
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog, create_window):
def check_user_prompt_closed_with_exception(dialog_type, retval):
new_handle = create_window()
session.window_handle = new_handle
create_dialog(dialog_type, text=dialog_type)
response = close(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert new_handle in session.handles
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog, create_window):
def check_user_prompt_not_closed_but_exception(dialog_type):
new_handle = create_window()
session.window_handle = new_handle
create_dialog(dialog_type, text=dialog_type)
response = close(session)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert new_handle in session.handles
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_accept(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window is gone
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window is gone
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 |
PsychoTV/PsychoTeam.repository | plugin.video.PsychoTV/resources/lib/sources/wsonline_tv.py | 6 | 4721 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://watchseries-online.ch'
self.search_link = 'index'
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
tvshowtitle = cleantitle.tv(tvshowtitle)
query = urlparse.urljoin(self.base_link, self.search_link)
result = client.source(query)
result = re.compile('(<li>.+?</li>)').findall(result)
result = [re.compile('href="(.+?)">(.+?)<').findall(i) for i in result]
result = [i[0] for i in result if len(i[0]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i[0] for i in result][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
year, month = re.compile('(\d{4})-(\d{2})').findall(date)[-1]
if int(year) <= 2008: raise Exception()
cat = urlparse.urljoin(self.base_link, url)
cat = cat.split('category/', 1)[-1].rsplit('/')[0]
url = urlparse.urljoin(self.base_link, '/episode/%s-s%02de%02d' % (cat, int(season), int(episode)))
result = client.source(url, output='response', error=True)
if '404' in result[0]:
url = urlparse.urljoin(self.base_link, '/%s/%s/%s-s%02de%02d' % (year, month, cat, int(season), int(episode)))
result = client.source(url, output='response', error=True)
if '404' in result[0]:
url = urlparse.urljoin(self.base_link, '/%s/%s/%s-%01dx%01d' % (year, month, cat, int(season), int(episode)))
result = client.source(url, output='response', error=True)
if '404' in result[0]: raise Exception()
try: url = re.compile('//.+?(/.+)').findall(url)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
links = client.parseDOM(result, 'td', attrs = {'class': 'even tdhost'})
links += client.parseDOM(result, 'td', attrs = {'class': 'odd tdhost'})
for i in links:
try:
host = client.parseDOM(i, 'a')[0]
host = host.split('<', 1)[0]
host = host.rsplit('.', 1)[0].split('.', 1)[-1]
host = host.strip().lower()
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = client.parseDOM(i, 'a', ret='href')[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'provider': 'WSOnline', 'url': url})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
result = client.request(url)
try: url = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'wsoButton'})[0]
except: pass
url = resolvers.request(url)
return url
except:
return
| gpl-2.0 |
sam-tsai/django-old | tests/regressiontests/utils/datastructures.py | 27 | 1212 | """
>>> from django.utils.datastructures import SortedDict
>>> d = SortedDict()
>>> d[7] = 'seven'
>>> d[1] = 'one'
>>> d[9] = 'nine'
>>> d.keys()
[7, 1, 9]
>>> d.values()
['seven', 'one', 'nine']
>>> d.items()
[(7, 'seven'), (1, 'one'), (9, 'nine')]
# Overwriting an item keeps it's place.
>>> d[1] = 'ONE'
>>> d.values()
['seven', 'ONE', 'nine']
# New items go to the end.
>>> d[0] = 'nil'
>>> d.keys()
[7, 1, 9, 0]
# Deleting an item, then inserting the same key again will place it at the end.
>>> del d[7]
>>> d.keys()
[1, 9, 0]
>>> d[7] = 'lucky number 7'
>>> d.keys()
[1, 9, 0, 7]
# Changing the keys won't do anything, it's only a copy of the keys dict.
>>> k = d.keys()
>>> k.remove(9)
>>> d.keys()
[1, 9, 0, 7]
# Initialising a SortedDict with two keys will just take the first one. A real
# dict will actually take the second value so we will too, but we'll keep the
# ordering from the first key found.
>>> tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
>>> d = SortedDict(tuples)
>>> d.keys()
[2, 1]
>>> real_dict = dict(tuples)
>>> sorted(real_dict.values())
['one', 'second-two']
>>> d.values() # Here the order of SortedDict values *is* what we are testing
['second-two', 'one']
"""
| bsd-3-clause |
fldc/CouchPotatoServer | couchpotato/core/plugins/category/main.py | 61 | 3963 | import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from .index import CategoryIndex, CategoryMediaIndex
log = CPLog(__name__)
class CategoryPlugin(Plugin):
_database = {
'category': CategoryIndex,
'category_media': CategoryMediaIndex,
}
def __init__(self):
addApiView('category.save', self.save)
addApiView('category.save_order', self.saveOrder)
addApiView('category.delete', self.delete)
addApiView('category.list', self.allView, docs = {
'desc': 'List all available categories',
'return': {'type': 'object', 'example': """{
'success': True,
'categories': array, categories
}"""}
})
addEvent('category.all', self.all)
def allView(self, **kwargs):
return {
'success': True,
'categories': self.all()
}
def all(self):
db = get_db()
categories = db.all('category', with_doc = True)
return [x['doc'] for x in categories]
def save(self, **kwargs):
try:
db = get_db()
category = {
'_t': 'category',
'order': kwargs.get('order', 999),
'label': toUnicode(kwargs.get('label', '')),
'ignored': toUnicode(kwargs.get('ignored', '')),
'preferred': toUnicode(kwargs.get('preferred', '')),
'required': toUnicode(kwargs.get('required', '')),
'destination': toUnicode(kwargs.get('destination', '')),
}
try:
c = db.get('id', kwargs.get('id'))
category['order'] = c.get('order', category['order'])
c.update(category)
db.update(c)
except:
c = db.insert(category)
c.update(category)
return {
'success': True,
'category': c
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False,
'category': None
}
def saveOrder(self, **kwargs):
try:
db = get_db()
order = 0
for category_id in kwargs.get('ids', []):
c = db.get('id', category_id)
c['order'] = order
db.update(c)
order += 1
return {
'success': True
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def delete(self, id = None, **kwargs):
try:
db = get_db()
success = False
message = ''
try:
c = db.get('id', id)
db.delete(c)
# Force defaults on all empty category movies
self.removeFromMovie(id)
success = True
except:
message = log.error('Failed deleting category: %s', traceback.format_exc())
return {
'success': success,
'message': message
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def removeFromMovie(self, category_id):
try:
db = get_db()
movies = [x['doc'] for x in db.get_many('category_media', category_id, with_doc = True)]
if len(movies) > 0:
for movie in movies:
movie['category_id'] = None
db.update(movie)
except:
log.error('Failed: %s', traceback.format_exc())
| gpl-3.0 |
aviweit/libcloud | libcloud/test/test_utils.py | 42 | 13018 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import socket
import codecs
import unittest
import warnings
import os.path
from itertools import chain
# In Python > 2.7 DeprecationWarnings are disabled by default
warnings.simplefilter('default')
import libcloud.utils.files
from libcloud.utils.misc import get_driver, set_driver
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import b
from libcloud.utils.py3 import bchr
from libcloud.utils.py3 import hexadigits
from libcloud.utils.py3 import urlquote
from libcloud.compute.types import Provider
from libcloud.compute.providers import DRIVERS
from libcloud.utils.misc import get_secure_random_string
from libcloud.utils.networking import is_public_subnet
from libcloud.utils.networking import is_private_subnet
from libcloud.utils.networking import is_valid_ip_address
from libcloud.utils.networking import join_ipv4_segments
from libcloud.utils.networking import increment_ipv4_segments
from libcloud.storage.drivers.dummy import DummyIterator
WARNINGS_BUFFER = []
if PY3:
from io import FileIO as file
def show_warning(msg, cat, fname, lno, line=None):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.files.guess_file_mime_type(
file_path=file_path)
self.assertTrue(mimetype.find('python') != -1)
def test_get_driver(self):
driver = get_driver(drivers=DRIVERS, provider=Provider.DUMMY)
self.assertTrue(driver is not None)
try:
driver = get_driver(drivers=DRIVERS, provider='fooba')
except AttributeError:
pass
else:
self.fail('Invalid provider, but an exception was not thrown')
def test_set_driver(self):
# Set an existing driver
try:
driver = set_driver(DRIVERS, Provider.DUMMY,
'libcloud.storage.drivers.dummy',
'DummyStorageDriver')
except AttributeError:
pass
# Register a new driver
driver = set_driver(DRIVERS, 'testingset',
'libcloud.storage.drivers.dummy',
'DummyStorageDriver')
self.assertTrue(driver is not None)
# Register it again
try:
set_driver(DRIVERS, 'testingset',
'libcloud.storage.drivers.dummy',
'DummyStorageDriver')
except AttributeError:
pass
# Register an invalid module
try:
set_driver(DRIVERS, 'testingnew',
'libcloud.storage.drivers.dummy1',
'DummyStorageDriver')
except ImportError:
pass
# Register an invalid class
try:
set_driver(DRIVERS, 'testingnew',
'libcloud.storage.drivers.dummy',
'DummyStorageDriver1')
except AttributeError:
pass
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_read_in_chunks_iterator_no_data(self):
iterator = DummyIterator()
generator1 = libcloud.utils.files.read_in_chunks(iterator=iterator,
yield_empty=False)
generator2 = libcloud.utils.files.read_in_chunks(iterator=iterator,
yield_empty=True)
# yield_empty=False
count = 0
for data in generator1:
count += 1
self.assertEqual(data, b(''))
self.assertEqual(count, 0)
# yield_empty=True
count = 0
for data in generator2:
count += 1
self.assertEqual(data, b(''))
self.assertEqual(count, 1)
def test_read_in_chunks_iterator(self):
def iterator():
for x in range(0, 1000):
yield 'aa'
for result in libcloud.utils.files.read_in_chunks(iterator(),
chunk_size=10,
fill_size=False):
self.assertEqual(result, b('aa'))
for result in libcloud.utils.files.read_in_chunks(iterator(), chunk_size=10,
fill_size=True):
self.assertEqual(result, b('aaaaaaaaaa'))
def test_read_in_chunks_filelike(self):
class FakeFile(file):
def __init__(self):
self.remaining = 500
def read(self, size):
self.remaining -= 1
if self.remaining == 0:
return ''
return 'b' * (size + 1)
for index, result in enumerate(libcloud.utils.files.read_in_chunks(
FakeFile(), chunk_size=10,
fill_size=False)):
self.assertEqual(result, b('b' * 11))
self.assertEqual(index, 498)
for index, result in enumerate(libcloud.utils.files.read_in_chunks(
FakeFile(), chunk_size=10,
fill_size=True)):
if index != 548:
self.assertEqual(result, b('b' * 10))
else:
self.assertEqual(result, b('b' * 9))
self.assertEqual(index, 548)
def test_exhaust_iterator(self):
def iterator_func():
for x in range(0, 1000):
yield 'aa'
data = b('aa' * 1000)
iterator = libcloud.utils.files.read_in_chunks(iterator=iterator_func())
result = libcloud.utils.files.exhaust_iterator(iterator=iterator)
self.assertEqual(result, data)
result = libcloud.utils.files.exhaust_iterator(iterator=iterator_func())
self.assertEqual(result, data)
data = '12345678990'
iterator = StringIO(data)
result = libcloud.utils.files.exhaust_iterator(iterator=iterator)
self.assertEqual(result, b(data))
def test_exhaust_iterator_empty_iterator(self):
data = ''
iterator = StringIO(data)
result = libcloud.utils.files.exhaust_iterator(iterator=iterator)
self.assertEqual(result, b(data))
def test_unicode_urlquote(self):
# Regression tests for LIBCLOUD-429
if PY3:
# Note: this is a unicode literal
val = '\xe9'
else:
val = codecs.unicode_escape_decode('\xe9')[0]
uri = urlquote(val)
self.assertEqual(b(uri), b('%C3%A9'))
# Unicode without unicode characters
uri = urlquote('~abc')
self.assertEqual(b(uri), b('%7Eabc'))
# Already-encoded bytestring without unicode characters
uri = urlquote(b('~abc'))
self.assertEqual(b(uri), b('%7Eabc'))
def test_get_secure_random_string(self):
for i in range(1, 500):
value = get_secure_random_string(size=i)
self.assertEqual(len(value), i)
def test_hexadigits(self):
self.assertEqual(hexadigits(b('')), [])
self.assertEqual(hexadigits(b('a')), ['61'])
self.assertEqual(hexadigits(b('AZaz09-')),
['41', '5a', '61', '7a', '30', '39', '2d'])
def test_bchr(self):
if PY3:
self.assertEqual(bchr(0), b'\x00')
self.assertEqual(bchr(97), b'a')
else:
self.assertEqual(bchr(0), '\x00')
self.assertEqual(bchr(97), 'a')
class NetworkingUtilsTestCase(unittest.TestCase):
def test_is_public_and_is_private_subnet(self):
public_ips = [
'213.151.0.8',
'86.87.86.1',
'8.8.8.8',
'8.8.4.4'
]
private_ips = [
'192.168.1.100',
'10.0.0.1',
'172.16.0.0'
]
for address in public_ips:
is_public = is_public_subnet(ip=address)
is_private = is_private_subnet(ip=address)
self.assertTrue(is_public)
self.assertFalse(is_private)
for address in private_ips:
is_public = is_public_subnet(ip=address)
is_private = is_private_subnet(ip=address)
self.assertFalse(is_public)
self.assertTrue(is_private)
def test_is_valid_ip_address(self):
valid_ipv4_addresses = [
'192.168.1.100',
'10.0.0.1',
'213.151.0.8',
'77.77.77.77'
]
invalid_ipv4_addresses = [
'10.1',
'256.256.256.256',
'0.567.567.567',
'192.168.0.257'
]
valid_ipv6_addresses = [
'fe80::200:5aee:feaa:20a2',
'2607:f0d0:1002:51::4',
'2607:f0d0:1002:0051:0000:0000:0000:0004',
'::1'
]
invalid_ipv6_addresses = [
'2607:f0d',
'2607:f0d0:0004',
]
for address in valid_ipv4_addresses:
status = is_valid_ip_address(address=address,
family=socket.AF_INET)
self.assertTrue(status)
for address in valid_ipv6_addresses:
status = is_valid_ip_address(address=address,
family=socket.AF_INET6)
self.assertTrue(status)
for address in chain(invalid_ipv4_addresses, invalid_ipv6_addresses):
status = is_valid_ip_address(address=address,
family=socket.AF_INET)
self.assertFalse(status)
for address in chain(invalid_ipv4_addresses, invalid_ipv6_addresses):
status = is_valid_ip_address(address=address,
family=socket.AF_INET6)
self.assertFalse(status)
def test_join_ipv4_segments(self):
values = [
(('127', '0', '0', '1'), '127.0.0.1'),
(('255', '255', '255', '0'), '255.255.255.0'),
]
for segments, joined_ip in values:
result = join_ipv4_segments(segments=segments)
self.assertEqual(result, joined_ip)
def test_increment_ipv4_segments(self):
values = [
(('127', '0', '0', '1'), '127.0.0.2'),
(('255', '255', '255', '0'), '255.255.255.1'),
(('254', '255', '255', '255'), '255.0.0.0'),
(('100', '1', '0', '255'), '100.1.1.0'),
]
for segments, incremented_ip in values:
result = increment_ipv4_segments(segments=segments)
result = join_ipv4_segments(segments=result)
self.assertEqual(result, incremented_ip)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
django-nonrel/djangoappengine | docs/conf.py | 3 | 7964 | # -*- coding: utf-8 -*-
#
# Django App Engine documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 20 20:01:39 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django App Engine'
copyright = u'2011, AllButtonsPressed, Potato London, Wilfred Hughes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6.0'
# The full version, including alpha/beta/rc tags.
release = '1.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoAppEnginedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoAppEngine.tex', u'Django App Engine Documentation',
u'AllButtonsPressed, Potato London, Wilfred Hughes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoappengine', u'Django App Engine Documentation',
[u'AllButtonsPressed, Potato London, Wilfred Hughes'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoAppEngine', u'Django App Engine Documentation',
u'AllButtonsPressed, Potato London, Wilfred Hughes', 'DjangoAppEngine',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause |
funbaker/astropy | astropy/io/misc/asdf/__init__.py | 2 | 1456 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
The **asdf** subpackage contains code that is used to serialize astropy types
so that they can be represented and stored using the Advanced Scientific Data
Format (ASDF). This subpackage defines classes, referred to as **tags**, that
implement the logic for serialization and deserialization.
ASDF makes use of abstract data type definitons called **schemas**. The tags
provided here are simply specific implementations of particular schemas.
Currently astropy only implements tags for a subset of schemas that are defined
externally by the ASDF Standard. However, it is likely that astropy will
eventually define schemas of its own.
Astropy currently has no ability to read or write ASDF files itself. In order
to process ASDF files it is necessary to make use of the standalone **asdf**
package. Users should never need to refer to tag implementations directly.
Their presence should be entirely transparent when processing ASDF files.
If both **asdf** and **astropy** are installed, no futher configuration is
required in order to process ASDF files. The **asdf** package has been designed
to automatically detect the presence of the tags defined by **astropy**.
Documentation on the ASDF Standard can be found `here
<https://asdf-standard.readthedocs.io>`__. Documentation on the ASDF Python
module can be found `here <https://asdf.readthedocs.io>`__.
"""
| bsd-3-clause |
ingkebil/GMDBot | page_exists.py | 1 | 1236 | #!/usr/bin/env python
"""Check if a '(data page)' for a biomolecule exists"""
__author__ = "Kenny Billiau"
__copyright__ = "2014, GMD"
__license__ = "GPL v2"
__version__ = "0.1"
import sys
import urllib2
import re
import downloadinchi as inchi
import openpyxl as px
import urllib
def get_molecules_from_xlsx(fn):
workbook = px.load_workbook(fn)
page = workbook.get_sheet_by_name(name='Wikipedia')
res = []
for row in page.range('A7:E208'):
if row[4].value not in ('#N/A', None):
res.append(row[0].value)
return res
def main(argv):
links = []
if len(argv) == 0:
lines = inchi.get_page('https://en.wikipedia.org/wiki/List_of_biomolecules')
links = inchi.get_molecule_links(lines)
else:
links = get_molecules_from_xlsx(argv[0])
pageid_re = re.compile('pageid')
for title in links:
print(title + ' '),
url = urllib2.urlopen("https://en.wikipedia.org/w/api.php?action=query&format=yaml&titles=%s" % urllib.quote(title + '_(data_page)'))
lines = url.read()
if len(pageid_re.findall(lines)) > 0:
print 'found'
else:
print 'NOT FOUND'
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 |
ldgarcia/django-allauth | allauth/socialaccount/providers/vk/provider.py | 65 | 1616 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.socialaccount import app_settings
class VKAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('link')
def get_avatar_url(self):
ret = None
photo_big_url = self.account.extra_data.get('photo_big')
photo_medium_url = self.account.extra_data.get('photo_medium')
if photo_big_url:
return photo_big_url
elif photo_medium_url:
return photo_medium_url
else:
return ret
def to_str(self):
first_name = self.account.extra_data.get('first_name', '')
last_name = self.account.extra_data.get('last_name', '')
name = ' '.join([first_name, last_name]).strip()
return name or super(VKAccount, self).to_str()
class VKProvider(OAuth2Provider):
id = 'vk'
name = 'VK'
package = 'allauth.socialaccount.providers.vk'
account_class = VKAccount
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('email')
return scope
def extract_uid(self, data):
return str(data['uid'])
def extract_common_fields(self, data):
return dict(email=data.get('email'),
last_name=data.get('last_name'),
username=data.get('screen_name'),
first_name=data.get('first_name'))
providers.registry.register(VKProvider)
| mit |
adviti/melange | thirdparty/google_appengine/lib/django_0_96/django/utils/feedgenerator.py | 32 | 11349 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> feed = feedgenerator.Rss201rev2Feed(
... title=u"Poynter E-Media Tidbits",
... link=u"http://www.poynter.org/column.asp?id=31",
... description=u"A group weblog by the sharpest minds in online media/journalism/publishing.",
... language=u"en",
... )
>>> feed.add_item(title="Hello", link=u"http://www.holovaty.com/test/", description="Testing.")
>>> fp = open('test.rss', 'w')
>>> feed.write(fp, 'utf-8')
>>> fp.close()
For definitions of the different versions of RSS, see:
http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from django.utils.xmlutils import SimplerXMLGenerator
import datetime, re, time
import email.Utils
def rfc2822_date(date):
return email.Utils.formatdate(time.mktime(date.timetuple()))
def rfc3339_date(date):
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_tag_uri(url, date):
"Creates a TagURI. See http://diveintomark.org/archives/2004/05/28/howto-atom-id"
tag = re.sub('^http://', '', url)
if date is not None:
tag = re.sub('/', ',%s:/' % date.strftime('%Y-%m-%d'), tag, 1)
tag = re.sub('#', '/', tag)
return 'tag:' + tag
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None):
self.feed = {
'title': title,
'link': link,
'description': description,
'language': language,
'author_email': author_email,
'author_name': author_name,
'author_link': author_link,
'subtitle': subtitle,
'categories': categories or (),
'feed_url': feed_url,
'feed_copyright': feed_copyright,
}
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
self.items.append({
'title': title,
'link': link,
'description': description,
'author_email': author_email,
'author_name': author_name,
'author_link': author_link,
'pubdate': pubdate,
'comments': comments,
'unique_id': unique_id,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': item_copyright,
})
def num_items(self):
return len(self.items)
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
from StringIO import StringIO
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.url, self.length, self.mime_type = url, length, mime_type
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u"rss", {u"version": self._version})
handler.startElement(u"channel", {})
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", self.feed['link'])
handler.addQuickElement(u"description", self.feed['description'])
if self.feed['language'] is not None:
handler.addQuickElement(u"language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"copyright", self.feed['feed_copyright'])
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement(u"rss")
def endChannelElement(self, handler):
handler.endElement(u"channel")
class RssUserland091Feed(RssFeed):
_version = u"0.91"
def write_items(self, handler):
for item in self.items:
handler.startElement(u"item", {})
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
handler.endElement(u"item")
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = u"2.0"
def write_items(self, handler):
for item in self.items:
handler.startElement(u"item", {})
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement(u"author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement(u"author", item["author_email"])
if item['pubdate'] is not None:
handler.addQuickElement(u"pubDate", rfc2822_date(item['pubdate']).decode('ascii'))
if item['comments'] is not None:
handler.addQuickElement(u"comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement(u"guid", item['unique_id'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"enclosure", '',
{u"url": item['enclosure'].url, u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", cat)
handler.endElement(u"item")
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml'
ns = u"http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
if self.feed['language'] is not None:
handler.startElement(u"feed", {u"xmlns": self.ns, u"xml:lang": self.feed['language']})
else:
handler.startElement(u"feed", {u"xmlns": self.ns})
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", "", {u"rel": u"alternate", u"href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement(u"link", "", {u"rel": u"self", u"href": self.feed['feed_url']})
handler.addQuickElement(u"id", self.feed['link'])
handler.addQuickElement(u"updated", rfc3339_date(self.latest_post_date()).decode('ascii'))
if self.feed['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement(u"email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement(u"uri", self.feed['author_link'])
handler.endElement(u"author")
if self.feed['subtitle'] is not None:
handler.addQuickElement(u"subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", "", {u"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"rights", self.feed['feed_copyright'])
self.write_items(handler)
handler.endElement(u"feed")
def write_items(self, handler):
for item in self.items:
handler.startElement(u"entry", {})
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", u"", {u"href": item['link'], u"rel": u"alternate"})
if item['pubdate'] is not None:
handler.addQuickElement(u"updated", rfc3339_date(item['pubdate']).decode('ascii'))
# Author information.
if item['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement(u"email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement(u"uri", item['author_link'])
handler.endElement(u"author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement(u"id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement(u"summary", item['description'], {u"type": u"html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"link", '',
{u"rel": u"enclosure",
u"href": item['enclosure'].url,
u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", u"", {u"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement(u"rights", item['item_copyright'])
handler.endElement(u"entry")
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| apache-2.0 |
chidea/GoPythonDLLWrapper | bin/lib/wsgiref/util.py | 119 | 5634 | """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def __next__(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib.parse import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/', encoding='latin1')
return url
def request_uri(environ, include_query=True):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib.parse import quote
path_info = quote(environ.get('PATH_INFO',''), safe='/;=,', encoding='latin1')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from io import StringIO, BytesIO
environ.setdefault('wsgi.input', BytesIO())
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
| mit |
apigee/edx-platform | common/djangoapps/student/migrations/0031_drop_student_anonymoususerid_temp_archive.py | 5 | 15979 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
db.execute("DROP TABLE student_anonymoususerid_temp_archive")
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
symmetrical = True
| agpl-3.0 |
codrut3/tensorflow | tensorflow/compiler/tests/reduce_ops_test.py | 19 | 5418 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reduction operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ReduceOpsTest(XLATestCase):
def _testReduction(self, tf_reduce_fn, np_reduce_fn, dtype, test_inputs,
rtol=1e-4, atol=1e-4):
"""Tests that the output of 'tf_reduce_fn' matches numpy's output."""
for test_input in test_inputs:
with self.test_session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = tf_reduce_fn(a, index)
result = sess.run(out, {a: test_input, index: [0]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=0),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [-1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [-33]})
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [2]})
FLOAT_DATA = [
np.zeros(shape=(2, 0)),
np.zeros(shape=(0, 30)),
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
COMPLEX_DATA = [
np.zeros(shape=(2, 0)).astype(np.complex64),
np.zeros(shape=(0, 30)).astype(np.complex64),
np.arange(1, 13, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-14, -2, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-4, 8, dtype=np.float32).view(np.complex64).reshape(2, 3),
]
NONEMPTY_FLOAT_DATA = [x for x in FLOAT_DATA if np.size(x) > 0]
NONEMPTY_COMPLEX_DATA = [x for x in COMPLEX_DATA if np.size(x) > 0]
BOOL_DATA = [
np.array([], dtype=np.bool).reshape(2, 0),
np.array([], dtype=np.bool).reshape(0, 3),
np.array([[False, True, False], [True, True, False]]),
]
def testReduceSumF32(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.float32,
self.FLOAT_DATA)
def testReduceSumC64(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.complex64,
self.COMPLEX_DATA)
def testReduceProdF32(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.float32,
self.FLOAT_DATA)
def testReduceProdC64(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.complex64,
self.COMPLEX_DATA)
def testReduceMin(self):
def reference_min(inp, axis):
"""Wrapper around np.amin that returns +infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf'))
return np.amin(inp, axis)
self._testReduction(math_ops.reduce_min, reference_min, np.float32,
self.FLOAT_DATA)
def testReduceMax(self):
def reference_max(inp, axis):
"""Wrapper around np.amax that returns -infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('-inf'))
return np.amax(inp, axis)
self._testReduction(math_ops.reduce_max, reference_max, np.float32,
self.FLOAT_DATA)
def testReduceMeanF32(self):
# TODO(phawkins): mean on XLA currently returns 0 instead of NaN when
# reducing across zero inputs.
self._testReduction(math_ops.reduce_mean, np.mean, np.float32,
self.NONEMPTY_FLOAT_DATA)
def testReduceMeanC64(self):
self._testReduction(math_ops.reduce_mean, np.mean, np.complex64,
self.NONEMPTY_COMPLEX_DATA)
def testReduceAll(self):
self._testReduction(math_ops.reduce_all, np.all, np.bool, self.BOOL_DATA)
def testReduceAny(self):
self._testReduction(math_ops.reduce_any, np.any, np.bool, self.BOOL_DATA)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
johndpope/tensorflow | tensorflow/compiler/tests/clustering_test.py | 123 | 3878 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the behavior of the auto-compilation pass."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
class ClusteringTest(XLATestCase):
def testAdd(self):
val1 = np.array([4, 3, 2, 1], dtype=np.float32)
val2 = np.array([5, 6, 7, 8], dtype=np.float32)
expected = val1 + val2
with self.test_session():
with self.test_scope():
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
output = math_ops.add(input1, input2)
result = output.eval()
self.assertAllClose(result, expected, rtol=1e-3)
def testAddFromCpuMultiple(self):
val1 = np.array([4, 3, 2, 1]).astype(np.float32)
val2 = np.array([5, 6, 7, 8]).astype(np.float32)
expected = val1 + val2
with self.test_session():
with ops.device(CPU_DEVICE):
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
with self.test_scope():
output = math_ops.add(input1, input2)
for _ in xrange(10):
result = output.eval()
self.assertAllClose(result, expected, rtol=1e-3)
def testDeadlock(self):
# Builds a graph of the form:
# x -> y
# | \
# z -> w
# where x and z are placed on the CPU and y and w are placed on the XLA
# device. If y and w are clustered for compilation, then the graph will
# deadlock since the clustered graph will contain a self-loop.
with self.test_session() as sess:
with ops.device(CPU_DEVICE):
x = array_ops.placeholder(dtypes.float32, [2])
with self.test_scope():
y = x * 2
with ops.device(CPU_DEVICE):
z = y * y
with self.test_scope():
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
def testHostMemory(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.int32)
with self.test_scope():
y = x + 1
with ops.device(CPU_DEVICE):
# Place a computation on the CPU, so y and w cannot be merged into the
# same JIT compilation.
z = y * 2
with self.test_scope():
# Argument 'y' is a non-constant output of a previous cluster. Make sure
# it is properly copied to host memory so it can be used as a
# compile-time constant input for this cluster.
w = array_ops.reshape(z, y)
result = sess.run(w, {x: [1, 0]})
expected = np.array([[4], [2]], dtype=np.int32)
self.assertAllClose(expected, result, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
s1lvester/heuteinmannheim | heuteinma.py | 1 | 6121 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import facebook
import websites
import feeds
#import beachstatus
from event import EventVault
import logging
import datetime
import time
import locale
locale.setlocale(locale.LC_TIME, '') # locale for date, time an the infamous german "Umalaute"
LOG_FILENAME = os.path.join(os.path.dirname(__file__), 'log.log')
logging.basicConfig(filename=LOG_FILENAME, level=logging.ERROR)
class HeuteInMannheim:
def __init__(self):
super(HeuteInMannheim, self).__init__()
self.vault = EventVault() # Initialize main Storage Object
# Initialize Scrapers
self.facebook_scraper = facebook.FacebookScraper(self.vault)
self.website_scraper = websites.WebsiteScraper(self.vault)
self.feed_scraper = feeds.FeedScraper(self.vault)
self.events = self.vault.get_events_for_date(datetime.date.today())
#self.events = self.vault.get_all_events() # Only for testing/debugging
#self.beach_status = beachstatus.BeachStatus()
#self.beach_status = self.beach_status.get_status()
self.state_output = self.make_html()
self.write_html() # Make initial index.html
logging.info("Total amount of Events: " + str(len(self.vault.get_all_events())))
def make_html(self):
"""Generate HTML output from collected events"""
output = """<!DOCTYPE html>
<html>
<head>
<title>Heute in Mannheim</title>
<link href="style.css" media="all" rel="stylesheet" type="text/css">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="description" content="Heute in Mannheim ist eine simple Website, die dir Events in Mannheim anzeigt. Unabhängig, werbefrei, unkommerziell, free as in freedom and free as in beer.">
<meta name="apple-mobile-web-app-capable" content="yes">
</head>
<body>
<table>\n"""
if not self.events: # Guess we're staying home tonight...
output += """<tr><td><p><span class=\"title\">Heute keine
Events.<br> Guess we're staying home tonight...
:-(</span></p></td></tr>\n"""
else:
eo = 0 # Even/Odd table-rows
for event in self.events:
if eo == 0:
output += " <tr class=\"even\">"
eo = 1
else:
output += " <tr class=\"odd\">"
eo = 0
# Facebook Icon by http://shimmi1.deviantart.com/ to warn Users from evil Facebook links
if event.get("event_url").find("facebook") > -1:
output_fb = "<img src=\"img/fb_ico.png\" alt=\"Achtung: Facebook Link!\">"
else:
output_fb = ""
output += """
<td><p><span class=\"title\"><a href=\"{}\">{} {}</a></span></p>
<span class=\"location\"><a href=\"{}\">{}</a></span><br>
<span class=\"adresse\">{} {} | {} {}</span></td>
<td><span class=\"zeit\">{}</span><br>
</tr>\n""".format(event.get("event_url"),
event.get("title"),
output_fb,
event.get("url"),
event.get("name"),
event.get("strasse"),
event.get("hausnr"),
event.get("plz"),
event.get("ort"),
event.get("uhrzeit"))
# output += """
# </table>
# <hr>
# <p><b>Status der Mannheimer Strände:</b></p>
# <table>"""
# for beach in self.beach_status:
# hours = ""
# if beach["status"] == "open":
# hours = str("<b>" + beach["hours_open"] + " - " + beach["hours_closed"] + "</b><br>")
# output += """
# <tr class=\"beach\">
# <td class=\"{}\">
# <span class=\"adresse"><a href=\"{}\">{}: {}</a></span><br>
# {}
# {} {} | {} {}
# </td>
# </tr>""".format(beach["status"],
# beach["event_obj"].get("url"),
# beach["event_obj"].get("name"),
# beach["status"],
# hours,
# beach["event_obj"].get("strasse"),
# beach["event_obj"].get("hausnr"),
# beach["event_obj"].get("plz"),
# beach["event_obj"].get("ort"))
output += """
</table>
<hr>
<p>Last update: {}</p>
<p><b><a href=\"imprint.html\">Contact, Impressum und Datenschutz</a></b></p>
<p class=\"footer\">Heute in Mannheim ist eine automatisch generierte
Website und wurde nach bestem Wissen und Gewissen erstellt. Die
Einträge wurden nicht redaktionell bearbeitet und ich übernehme
keinerlei Haftung für die Inhalte hinter den links</p>
<p class=\"footer\"><a href=\"https://github.com/s1lvester/heuteinmannheim\">Fork me on GitHub</a><br>Danke an die Jungs von <a href=\"http://heuteinstuttgart.de/\">heuteinstuttgart.de</a></p>
</body>
</html>""".format(time.strftime("%d.%m.%Y %H:%M", time.localtime()))
return output.encode("utf-8")
def write_html(self):
"""Write the index.html file. Requires self.state_output to be set"""
f = open(os.path.join(os.path.dirname(__file__), "static/index.html"),
"wb")
f.write(self.state_output)
f.close()
# Gooo !!!!11einself
main_obj = HeuteInMannheim()
| mit |
fhools/doom3.gpl | neo/sys/gllog/logfunc.py | 62 | 2211 | #!/usr/bin/env python
# generate logging code
# this requires an analysis of the parameters for verbose and do actual call
import sys, string, re
from read import read_gl
def do_logfunc(f_in, f_out):
(gl, wgl, glX) = read_gl(f_in)
for l in (gl, glX):
for t in l:
# process ret type to strip trailing spaces
t[0] = string.strip(t[0])
f_out.write('static %s APIENTRY log%s(%s) {\n' % ( t[0], t[2], t[3] ))
# work on parameters
base_params = string.split(t[3], ',')
#f_out.write('// %s\n' % repr(base_params))
# init format string and parameter list
params = []
format = t[1][1:] + t[2]
# a general help list
types = []
names = []
for i in base_params:
regex = re.compile('([a-zA-Z0-9]*)$')
name = regex.search(i).group(1)
type = string.strip(i[0:len(i)-len(name)])
# catch type with no name
if (len(type) == 0):
type = name
name = ''
#f_out.write('// type: "%s" name: "%s"\n' % (type, name))
types.append(type)
names.append(name)
# verbose the types
if (type == 'GLenum'):
format += ' %s'
params.append( 'EnumString(' + name + ')' )
elif (type == 'GLfloat' or type == 'GLclampf' or type == 'GLdouble'):
format += ' %g'
params.append( name )
elif (type == 'GLint' or type == 'GLuint' or type == 'GLsizei' or type == 'GLbyte' or type == 'GLshort'
or type == 'GLubyte' or type == 'GLushort'):
format += ' %d'
params.append( name )
elif (type == 'GLboolean'):
format += ' %s'
params.append( name + ' ? "Y" : "N"' )
elif (type == 'void'):
pass
else:
f_out.write('// unknown type: "%s" name: "%s"\n' % (type, name))
format += ' \'' + type + ' ' + name + '\''
f_out.write('\tfprintf( tr.logFile, "' + format + '\\n"')
for par in params:
f_out.write(', ' + par)
f_out.write(' );\n')
if (t[0] != 'void'):
f_out.write('\treturn dll%s(' % t[2])
else:
f_out.write('\tdll%s(' % t[2])
started = 0
for i in names:
if (started):
f_out.write(', ')
else:
started = 1
f_out.write(i)
f_out.write(');\n')
f_out.write('}\n\n')
if __name__ == '__main__':
do_logfunc(sys.stdin, sys.stdout)
| gpl-3.0 |
ghtmtt/QGIS | tests/src/python/test_qgsrange.py | 23 | 23316 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRange
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '11.04.2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.testing import unittest
from qgis.core import (QgsIntRange,
QgsDoubleRange,
QgsDateRange)
from qgis.PyQt.QtCore import QDate
class TestQgsIntRange(unittest.TestCase):
def testGetters(self):
range = QgsIntRange(1, 11)
self.assertEqual(range.lower(), 1)
self.assertEqual(range.upper(), 11)
self.assertTrue(range.includeLower())
self.assertTrue(range.includeUpper())
range = QgsIntRange(-1, 3, False, False)
self.assertEqual(range.lower(), -1)
self.assertEqual(range.upper(), 3)
self.assertFalse(range.includeLower())
self.assertFalse(range.includeUpper())
def testIsInfinite(self):
range = QgsIntRange()
self.assertTrue(range.isInfinite())
range2 = QgsIntRange(range.lower(), 5)
self.assertFalse(range2.isInfinite())
range2 = QgsIntRange(5, range.upper())
self.assertFalse(range2.isInfinite())
def testEquality(self):
self.assertEqual(QgsIntRange(1, 10), QgsIntRange(1, 10))
self.assertNotEqual(QgsIntRange(1, 10), QgsIntRange(1, 11))
self.assertNotEqual(QgsIntRange(1, 10), QgsIntRange(2, 10))
self.assertNotEqual(QgsIntRange(1, 10, False), QgsIntRange(1, 10))
self.assertNotEqual(QgsIntRange(1, 10, True, False), QgsIntRange(1, 10))
def testIsEmpty(self):
range = QgsIntRange(1, 1)
# should not be empty because 1 is included
self.assertFalse(range.isEmpty())
range = QgsIntRange(1, 1, False, False)
# should be empty because 1 is NOT included
self.assertTrue(range.isEmpty())
# invalid range is empty
range = QgsIntRange(1, -1)
self.assertTrue(range.isEmpty())
def testIsSingleton(self):
range = QgsIntRange(1, 1)
self.assertTrue(range.isSingleton())
range = QgsIntRange(1, 10)
self.assertFalse(range.isSingleton())
range = QgsIntRange(1, 1, False, False)
# should not be singleton because 1 is NOT included
self.assertFalse(range.isSingleton())
# invalid range is not singleton
range = QgsIntRange(1, -1)
self.assertFalse(range.isSingleton())
def testContains(self):
# includes both ends
range = QgsIntRange(0, 10)
self.assertTrue(range.contains(QgsIntRange(1, 9)))
self.assertTrue(range.contains(QgsIntRange(1, 10)))
self.assertTrue(range.contains(QgsIntRange(0, 9)))
self.assertTrue(range.contains(QgsIntRange(0, 10)))
self.assertFalse(range.contains(QgsIntRange(-1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 11)))
# does not include left end
range = QgsIntRange(0, 10, False, True)
self.assertTrue(range.contains(QgsIntRange(1, 9)))
self.assertTrue(range.contains(QgsIntRange(1, 10)))
self.assertFalse(range.contains(QgsIntRange(0, 9)))
self.assertFalse(range.contains(QgsIntRange(0, 10)))
self.assertFalse(range.contains(QgsIntRange(-1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 11)))
# does not include right end
range = QgsIntRange(0, 10, True, False)
self.assertTrue(range.contains(QgsIntRange(1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 10)))
self.assertTrue(range.contains(QgsIntRange(0, 9)))
self.assertFalse(range.contains(QgsIntRange(0, 10)))
self.assertFalse(range.contains(QgsIntRange(-1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 11)))
def testContainsElement(self):
# includes both ends
range = QgsIntRange(0, 10)
self.assertTrue(range.contains(0))
self.assertTrue(range.contains(5))
self.assertTrue(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
# includes left end
range = QgsIntRange(0, 10, True, False)
self.assertTrue(range.contains(0))
self.assertTrue(range.contains(5))
self.assertFalse(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
# includes right end
range = QgsIntRange(0, 10, False, True)
self.assertFalse(range.contains(0))
self.assertTrue(range.contains(5))
self.assertTrue(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
# includes neither end
range = QgsIntRange(0, 10, False, False)
self.assertFalse(range.contains(0))
self.assertTrue(range.contains(5))
self.assertFalse(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
def testOverlaps(self):
# includes both ends
range = QgsIntRange(0, 10)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(10, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
# includes left end
range = QgsIntRange(0, 10, True, False)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertFalse(range.overlaps(QgsIntRange(10, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
# includes right end
range = QgsIntRange(0, 10, False, True)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(10, 11)))
self.assertFalse(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
# includes neither end
range = QgsIntRange(0, 10, False, False)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertFalse(range.overlaps(QgsIntRange(10, 11)))
self.assertFalse(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
class TestQgsDoubleRange(unittest.TestCase):
def testGetters(self):
range = QgsDoubleRange(1.0, 11.0)
self.assertEqual(range.lower(), 1)
self.assertEqual(range.upper(), 11)
self.assertTrue(range.includeLower())
self.assertTrue(range.includeUpper())
range = QgsDoubleRange(-1.0, 3.0, False, False)
self.assertEqual(range.lower(), -1)
self.assertEqual(range.upper(), 3)
self.assertFalse(range.includeLower())
self.assertFalse(range.includeUpper())
def testEquality(self):
self.assertEqual(QgsDoubleRange(1, 10), QgsDoubleRange(1, 10))
self.assertNotEqual(QgsDoubleRange(1, 10), QgsDoubleRange(1, 11))
self.assertNotEqual(QgsDoubleRange(1, 10), QgsDoubleRange(2, 10))
self.assertNotEqual(QgsDoubleRange(1, 10, False), QgsDoubleRange(1, 10))
self.assertNotEqual(QgsDoubleRange(1, 10, True, False), QgsDoubleRange(1, 10))
def testIsInfinite(self):
range = QgsDoubleRange()
self.assertTrue(range.isInfinite())
range2 = QgsDoubleRange(range.lower(), 5)
self.assertFalse(range2.isInfinite())
range2 = QgsDoubleRange(5, range.upper())
self.assertFalse(range2.isInfinite())
class TestQgsDateRange(unittest.TestCase):
def testGetters(self):
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertEqual(range.begin(), QDate(2010, 3, 1))
self.assertEqual(range.end(), QDate(2010, 6, 2))
self.assertTrue(range.includeBeginning())
self.assertTrue(range.includeEnd())
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertFalse(range.begin().isValid())
self.assertEqual(range.end(), QDate(2010, 6, 2))
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertEqual(range.begin(), QDate(2010, 3, 1))
self.assertFalse(range.end().isValid())
def testIsEmpty(self):
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertFalse(range.isEmpty())
# check QgsDateRange docs - this is treated as an infinite range, so is NOT empty
range = QgsDateRange(QDate(), QDate())
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(2017, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.isEmpty())
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1))
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1), False, False)
self.assertTrue(range.isEmpty())
def testContains(self):
# includes both ends
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertFalse(range.contains(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertFalse(range.contains(QgsDateRange(QDate(), QDate(2010, 4, 1))))
# infinite left end
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.contains(QgsDateRange(QDate(), QDate(2010, 4, 1))))
# infinite right end
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertFalse(range.contains(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertFalse(range.contains(QgsDateRange(QDate(), QDate(2010, 4, 1))))
def testContainsElement(self):
# includes both ends
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.contains(QDate(2010, 3, 1)))
self.assertTrue(range.contains(QDate(2010, 5, 2)))
self.assertTrue(range.contains(QDate(2010, 6, 2)))
self.assertFalse(range.contains(QDate(2009, 6, 2)))
self.assertFalse(range.contains(QDate(2017, 6, 2)))
self.assertFalse(range.contains(QDate()))
# infinite left end
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertTrue(range.contains(QDate(2010, 3, 1)))
self.assertTrue(range.contains(QDate(2010, 5, 2)))
self.assertTrue(range.contains(QDate(2010, 6, 2)))
self.assertTrue(range.contains(QDate(2009, 6, 2)))
self.assertFalse(range.contains(QDate(2017, 6, 2)))
self.assertFalse(range.contains(QDate()))
# infinite right end
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertTrue(range.contains(QDate(2010, 3, 1)))
self.assertTrue(range.contains(QDate(2010, 5, 2)))
self.assertTrue(range.contains(QDate(2010, 6, 2)))
self.assertFalse(range.contains(QDate(2009, 6, 2)))
self.assertTrue(range.contains(QDate(2017, 6, 2)))
self.assertFalse(range.contains(QDate()))
def testOverlaps(self):
# includes both ends
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.overlaps(QgsDateRange(QDate(), QDate(2010, 4, 1))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2009, 8, 5))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2019, 4, 1), QDate(2019, 8, 5))))
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.overlaps(QgsDateRange(QDate(), QDate(2010, 4, 1))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2009, 8, 5))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2019, 4, 1), QDate(2019, 8, 5))))
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.overlaps(QgsDateRange(QDate(), QDate(2010, 4, 1))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2009, 8, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2019, 4, 1), QDate(2019, 8, 5))))
def testIsInstant(self):
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2)).isInstant())
self.assertTrue(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1)).isInstant())
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1), False, False).isInstant())
self.assertFalse(QgsDateRange(QDate(), QDate()).isInstant())
def testIsInfinite(self):
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2)).isInfinite())
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1)).isInfinite())
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1), False, False).isInfinite())
self.assertTrue(QgsDateRange(QDate(), QDate()).isInfinite())
def testEquality(self):
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, True))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), True, False))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 3), False, False))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 2), QDate(2010, 6, 2), False, False))
def testExtend(self):
range_empty = QgsDateRange(QDate(2010, 6, 2), QDate(2010, 3, 1))
# Empty
self.assertFalse(range_empty.extend(range_empty))
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertFalse(range.extend(range_empty))
range = QgsDateRange(QDate(2010, 6, 2), QDate(2010, 3, 1))
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False))
# Extend low
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 2, 1), QDate(2010, 6, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 2, 1), QDate(2010, 6, 2), False, False))
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 2, 1), QDate(2010, 5, 2), True, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 2, 1), QDate(2010, 6, 2), True, False))
# Extend high
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 7, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 7, 2), False, False))
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, True)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, True))
# Extend both
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 2, 1), QDate(2010, 7, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 2, 1), QDate(2010, 7, 2), False, False))
# Extend none
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertFalse(range.extend(QgsDateRange(QDate(2010, 4, 6), QDate(2010, 5, 2), False, False)))
# Test infinity
range = QgsDateRange(QDate(), QDate())
self.assertFalse(range.extend(QgsDateRange(QDate(2010, 4, 6), QDate(2010, 5, 2), False, False)))
range = QgsDateRange(QDate(), QDate(2010, 5, 2))
self.assertFalse(range.extend(QgsDateRange(QDate(2010, 4, 6), QDate(2010, 5, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(2010, 5, 2), True, True))
range = QgsDateRange(QDate(2010, 4, 6), QDate())
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 6), QDate(2010, 5, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 6), QDate(), False, True))
range = QgsDateRange(QDate(), QDate(2010, 5, 2))
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 6), QDate(2010, 6, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(2010, 6, 2), True, False))
range = QgsDateRange(QDate(2010, 4, 6), QDate())
self.assertTrue(range.extend(QgsDateRange(QDate(), QDate(2010, 5, 2), True, False)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(), True, True))
range = QgsDateRange(QDate(), QDate(2010, 4, 6))
self.assertTrue(range.extend(QgsDateRange(QDate(), QDate(), True, True)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(), True, True))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
AlCutter/certificate-transparency | python/ct/client/async_log_client_test.py | 8 | 16485 | #!/usr/bin/env trial
import gflags
import json
import mock
import sys
import urlparse
from ct.client import log_client
from ct.client import async_log_client
from ct.client import log_client_test_util as test_util
from ct.client.db import database
from twisted.internet import defer
from twisted.internet import task
from twisted.internet import reactor
from twisted.python import failure
from twisted.test import proto_helpers
from twisted.trial import unittest
FLAGS = gflags.FLAGS
class ResponseBodyHandlerTest(unittest.TestCase):
def test_send(self):
finished = defer.Deferred()
handler = async_log_client.ResponseBodyHandler(finished)
transport = proto_helpers.StringTransportWithDisconnection()
handler.makeConnection(transport)
transport.protocol = handler
handler.dataReceived("test")
transport.loseConnection()
finished.addCallback(self.assertEqual, "test")
return finished
def test_send_chunks(self):
test_msg = "x"*1024
chunk_size = 100
finished = defer.Deferred()
handler = async_log_client.ResponseBodyHandler(finished)
transport = proto_helpers.StringTransportWithDisconnection()
handler.makeConnection(transport)
transport.protocol = handler
sent = 0
while sent < len(test_msg):
handler.dataReceived(test_msg[sent:sent + chunk_size])
sent += chunk_size
transport.loseConnection()
finished.addCallback(self.assertEqual, test_msg)
return finished
def test_buffer_overflow(self):
original = FLAGS.response_buffer_size_bytes
FLAGS.response_buffer_size_bytes = 10
test_msg = "x"*11
finished = defer.Deferred()
handler = async_log_client.ResponseBodyHandler(finished)
transport = proto_helpers.StringTransportWithDisconnection()
handler.makeConnection(transport)
transport.protocol = handler
handler.dataReceived(test_msg)
transport.loseConnection()
# TODO(ekasper): find a more elegant and robust way to save flags.
FLAGS.response_buffer_size_bytes = original
return self.assertFailure(finished,
async_log_client.HTTPResponseSizeExceededError)
class AsyncLogClientTest(unittest.TestCase):
class FakeHandler(test_util.FakeHandlerBase):
# A class that mimics twisted.web.iweb.IResponse. Note: the IResponse
# interface is only partially implemented.
class FakeResponse(object):
def __init__(self, code, reason, json_content=None):
self.code = code
self.phrase = reason
self.headers = AsyncLogClientTest.FakeHandler.FakeHeader()
if json_content is not None:
self._body = json.dumps(json_content)
else:
self._body = ""
def deliverBody(self, protocol):
transport = proto_helpers.StringTransportWithDisconnection()
protocol.makeConnection(transport)
transport.protocol = protocol
protocol.dataReceived(self._body)
transport.loseConnection()
@classmethod
def make_response(cls, code, reason, json_content=None):
return cls.FakeResponse(code, reason, json_content=json_content)
class FakeHeader(object):
def getAllRawHeaders(self):
return []
# Twisted doesn't (yet) have an official fake Agent:
# https://twistedmatrix.com/trac/ticket/4024
class FakeAgent(object):
def __init__(self, responder):
self._responder = responder
def request(self, method, uri):
if method != "GET":
return defer.fail(failure.Failure())
# Naive, for testing.
path, _, params = uri.partition("?")
params = urlparse.parse_qs(params)
# Take the first value of each parameter.
if any([len(params[key]) != 1 for key in params]):
return defer.fail(failure.Failure())
params = {key: params[key][0] for key in params}
response = self._responder.get_response(path, params=params)
return defer.succeed(response)
class FakeDB(object):
def scan_entries(self, first, last):
raise database.KeyError("boom!")
def store_entries(self, entries):
self.entries = list(entries)
def setUp(self):
self.clock = task.Clock()
def one_shot_client(self, json_content):
"""Make a one-shot client and give it a mock response."""
mock_handler = mock.Mock()
response = self.FakeHandler.make_response(200, "OK",
json_content=json_content)
mock_handler.get_response.return_value = response
return async_log_client.AsyncLogClient(self.FakeAgent(mock_handler),
test_util.DEFAULT_URI,
reactor=self.clock)
def default_client(self, entries_db=None, reactor_=None):
# A client whose responder is configured to answer queries for the
# correct uri.
if reactor_ is None:
reactor_ = self.clock
return async_log_client.AsyncLogClient(self.FakeAgent(
self.FakeHandler(test_util.DEFAULT_URI)), test_util.DEFAULT_URI,
entries_db=entries_db,
reactor=reactor_)
def test_get_sth(self):
client = self.default_client()
self.assertEqual(test_util.DEFAULT_STH,
self.successResultOf(client.get_sth()))
def test_get_sth_raises_on_invalid_response(self):
json_sth = test_util.sth_to_json(test_util.DEFAULT_STH)
json_sth.pop("timestamp")
client = self.one_shot_client(json_sth)
return self.assertFailure(client.get_sth(),
log_client.InvalidResponseError)
def test_get_sth_raises_on_invalid_base64(self):
json_sth = test_util.sth_to_json(test_util.DEFAULT_STH)
json_sth["tree_head_signature"] = "garbagebase64^^^"
client = self.one_shot_client(json_sth)
return self.assertFailure(client.get_sth(),
log_client.InvalidResponseError)
class EntryConsumer(object):
def __init__(self):
self.received = []
self.consumed = defer.Deferred()
def done(self, result):
self.result = result
self.consumed.callback("Done")
def consume(self, entries):
self.received += entries
d = defer.Deferred()
d.callback(None)
return d
# Helper method.
def get_entries(self, client, start, end, batch_size=0):
producer = client.get_entries(start, end, batch_size=batch_size)
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# Ensure the tasks scheduled in the reactor are invoked.
# Since start of get entries is delayed, we have to pump to make up for
# that delay. If some test is going to force get_entries to do more than
# one fetch, then that test has to take care of additional pumping.
self.pump_get_entries()
return consumer
def pump_get_entries(self,
delay=None,
pumps=1):
if not delay:
delay = FLAGS.get_entries_retry_delay
# Helper method which advances time past get_entries delay
for _ in range(0, pumps):
self.clock.pump([0, delay])
def test_get_entries(self):
client = self.default_client()
consumer = self.get_entries(client, 0, 9)
self.assertEqual(10, consumer.result)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
def test_get_sth_consistency(self):
client = self.default_client()
self.assertEqual([],
self.successResultOf(client.get_sth_consistency(0, 9)))
def test_get_entries_raises_on_invalid_response(self):
json_entries = test_util.entries_to_json(test_util.make_entries(0, 9))
json_entries["entries"][5]["leaf_input"] = "garbagebase64^^^"
client = self.one_shot_client(json_entries)
producer = client.get_entries(0, 9)
# remove exponential back-off
producer._calculate_retry_delay = lambda _: 1
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# pump through retries (with retries there are 2 delays per request and
# and initial delay)
self.pump_get_entries(1, FLAGS.get_entries_max_retries * 2 + 1)
self.assertTrue(consumer.result.check(log_client.InvalidResponseError))
# The entire response should be discarded upon error.
self.assertFalse(consumer.received)
def test_get_entries_raises_on_too_large_response(self):
large_response = test_util.entries_to_json(
test_util.make_entries(4, 5))
client = self.one_shot_client(large_response)
producer = client.get_entries(4, 4)
# remove exponential back-off
producer._calculate_retry_delay = lambda _: 1
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# pump through retries (with retries there are 2 delays per request and
# initial delay)
self.pump_get_entries(1, FLAGS.get_entries_max_retries * 2 + 1)
self.assertTrue(consumer.result.check(log_client.InvalidResponseError))
def test_get_entries_succedes_after_retry(self):
json_entries = test_util.entries_to_json(test_util.make_entries(0, 9))
json_entries["entries"][5]["leaf_input"] = "garbagebase64^^^"
client = self.one_shot_client(json_entries)
producer = client.get_entries(0, 9)
# remove exponential back-off
producer._calculate_retry_delay = lambda _: 1
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# pump retries halfway through (there are actually two delays before
# firing requests, so this loop will go only through half of retries)
self.pump_get_entries(1, FLAGS.get_entries_max_retries)
self.assertFalse(hasattr(consumer, 'result'))
json_entries = test_util.entries_to_json(test_util.make_entries(0, 9))
response = self.FakeHandler.make_response(200, "OK",
json_content=json_entries)
client._handler._agent._responder.get_response.return_value = response
self.pump_get_entries(1)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
def test_get_entries_raises_if_query_is_larger_than_tree_size(self):
client = async_log_client.AsyncLogClient(
self.FakeAgent(self.FakeHandler(
test_util.DEFAULT_URI, tree_size=3)), test_util.DEFAULT_URI,
reactor=self.clock)
consumer = self.get_entries(client, 0, 9)
# also pump error
self.pump_get_entries()
self.assertTrue(consumer.result.check(log_client.HTTPClientError))
def test_get_entries_returns_all_in_batches(self):
mock_handler = mock.Mock()
fake_responder = self.FakeHandler(test_util.DEFAULT_URI)
mock_handler.get_response.side_effect = (
fake_responder.get_response)
client = async_log_client.AsyncLogClient(self.FakeAgent(mock_handler),
test_util.DEFAULT_URI,
reactor=self.clock)
consumer = self.get_entries(client, 0, 9, batch_size=4)
self.assertEqual(10, consumer.result)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
self.assertEqual(3, len(mock_handler.get_response.call_args_list))
def test_get_entries_returns_all_for_limiting_server(self):
client = async_log_client.AsyncLogClient(
self.FakeAgent(
self.FakeHandler(test_util.DEFAULT_URI, entry_limit=3)),
test_util.DEFAULT_URI, reactor=self.clock)
consumer = self.get_entries(client, 0, 9)
# 1 pump in get_entries and 3 more so we fetch everything
self.pump_get_entries(pumps=3)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
class PausingConsumer(object):
def __init__(self, pause_at):
self.received = []
self.pause_at = pause_at
self.already_paused = False
self.result = None
def registerProducer(self, producer):
self.producer = producer
def done(self, result):
self.result = result
def consume(self, entries):
self.received += entries
if (not self.already_paused and
len(self.received) >= self.pause_at):
self.producer.pauseProducing()
self.already_paused = True
d = defer.Deferred()
d.callback(None)
return d
def test_get_entries_pause_resume(self):
client = self.default_client()
producer = client.get_entries(0, 9, batch_size=4)
consumer = self.PausingConsumer(4)
consumer.registerProducer(producer)
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# fire all pending callbacks, and then fire request
self.pump_get_entries()
self.assertTrue(test_util.verify_entries(consumer.received, 0, 3))
self.assertEqual(4, len(consumer.received))
self.assertIsNone(consumer.result)
producer.resumeProducing()
# pump next 2 batches
self.pump_get_entries(pumps=2)
self.assertEqual(10, consumer.result)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
def test_get_entries_use_stored_entries(self):
fake_db = self.FakeDB()
# if client tries to fetch entries instead of taking them from db, then
# he will get 0 - 9 entries. If he uses db then he will get 10 - 19
fake_db.scan_entries = mock.Mock(
return_value=test_util.make_entries(10, 19))
client = self.default_client(entries_db=fake_db, reactor_=reactor)
consumer = self.get_entries(client, 0, 9)
consumer.consumed.addCallback(lambda _:
self.assertEqual(len(consumer.received), 10))
consumer.consumed.addCallback(lambda _:
[self.assertEqual(test_util.make_entry(i + 10), consumer.received[i])
for i in range(0, 9)])
def test_get_entries_tries_to_fetch_if_not_available_in_db(self):
fake_db = self.FakeDB()
fake_db.scan_entries = mock.Mock(return_value=None)
client = self.default_client(entries_db=fake_db)
consumer = self.get_entries(client, 0, 9)
test_util.verify_entries(consumer.received, 0, 9)
def test_get_entries_stores_entries(self):
fake_db = self.FakeDB()
client = self.default_client(entries_db=fake_db, reactor_=reactor)
consumer = self.get_entries(client, 0, 9)
consumer.consumed.addCallback(lambda _:
test_util.verify_entries(consumer.received, 0, 9))
consumer.consumed.addCallback(lambda _:
test_util.verify_entries(fake_db.entries, 0, 9))
return consumer.consumed
class BadEntryConsumer(EntryConsumer):
def consume(self, entries):
self.received += entries
d = defer.Deferred()
d.errback(ValueError("Boom!"))
return d
def test_get_entries_fires_done_if_consumer_raises(self):
client = self.default_client()
producer = client.get_entries(0, 9)
consumer = self.BadEntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
self.pump_get_entries()
self.assertTrue(consumer.result.check(ValueError))
if __name__ == "__main__" or __name__ == "ct.client.async_log_client_test":
sys.argv = FLAGS(sys.argv)
| apache-2.0 |
GuLinux/PySpectrum | import_image.py | 1 | 5892 | from pyui.import_image import Ui_ImportImage
from PyQt5.QtWidgets import QWidget, QToolBar, QDialog, QDialogButtonBox, QProgressDialog, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QCoreApplication
from qmathplotwidget import QMathPlotWidget, QImPlotWidget
import matplotlib.pyplot as plt
from qtcommons import QtCommons
from pyspectrum_commons import *
import os
import numpy as np
from astropy.io import fits
from object_properties_dialog import ObjectPropertiesDialog
from object_properties import ObjectProperties
from rotate_image_dialog import RotateImageDialog
from project import Project
class ImportImage(QWidget):
def icon():
return QIcon(':/image_20')
ACTION_TEXT = 'Import Image'
def pick(on_ok, settings):
open_file_sticky('Open FITS Image',FITS_IMG_EXTS, on_ok, settings, IMPORT_IMG )
def __init__(self, fits_file, settings, project = None):
super(ImportImage, self).__init__()
self.settings = settings
self.fits_file = fits_file
self.project = project
try:
image_hdu_index = fits_file.index_of('IMAGE')
except KeyError:
image_hdu_index = 0
original_image = fits.ImageHDU(data=fits_file[image_hdu_index].data, header=fits_file[image_hdu_index].header, name='IMAGE')
for hdu in [h for h in self.fits_file if h.name == 'IMAGE']: self.fits_file.remove(hdu)
self.fits_file.append(original_image)
self.ui = Ui_ImportImage()
self.ui.setupUi(self)
self.rotate_dialog = RotateImageDialog(self.fits_file, image_hdu_index, project=project)
self.rotate_dialog.rotated.connect(self.rotated)
self.image_plot = QtCommons.nestWidget(self.ui.image_widget, QImPlotWidget(self.rotate_dialog.data_rotated, cmap='gray'))
self.spatial_plot = QtCommons.nestWidget(self.ui.spatial_plot_widget, QMathPlotWidget())
self.spectrum_plot = QtCommons.nestWidget(self.ui.spectrum_plot_widget, QMathPlotWidget())
self.image_view = self.image_plot.axes_image
self.toolbar = QToolBar('Image Toolbar')
self.toolbar.addAction(QIcon(':/rotate_20'), "Rotate", lambda: self.rotate_dialog.show())
self.toolbar.addAction(QIcon(':/save_20'), "Save", self.save_profile)
self.toolbar.addAction(QIcon(':/select_all_20'), "Select spectrum data", lambda: self.spatial_plot.add_span_selector('select_spectrum', self.spectrum_span_selected,direction='horizontal'))
self.toolbar.addAction(QIcon.fromTheme('edit-select-invert'), "Select background data", lambda: self.spatial_plot.add_span_selector('select_background', self.background_span_selected,direction='horizontal', rectprops = dict(facecolor='blue', alpha=0.5))).setEnabled(False)
#self.toolbar.addAction('Stack', self.show_stack_images_dialog)
self.toolbar.addSeparator()
self.object_properties = ObjectProperties(self.fits_file, project=project)
self.object_properties_dialog = ObjectPropertiesDialog(settings, self.object_properties)
self.toolbar.addAction("Object properties", self.object_properties_dialog.show)
self.rotated()
def rotated(self):
self.image_view.set_data(self.rotate_dialog.data_rotated)
self.image_view.axes.relim()
self.image_view.axes.autoscale_view()
self.image_view.set_extent([self.rotate_dialog.data_rotated.shape[1],0, self.rotate_dialog.data_rotated.shape[0],0])
self.image_view.figure.canvas.draw()
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
self.draw_plot(self.spatial_plot.axes, self.spatial_profile())
def background_span_selected(self, min, max):
self.background_span_selection = (min, max)
self.spatial_plot.add_span('background_window', min, max, 'v', facecolor='gray', alpha=0.5)
self.image_plot.add_span('background_window', min, max, 'h', facecolor='red', alpha=0.5, clip_on=True)
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
def spectrum_span_selected(self, min, max):
self.spectrum_span_selection = (min, max)
self.spatial_plot.add_span('spectrum_window', min, max, 'v', facecolor='g', alpha=0.5)
self.image_plot.add_span('spectrum_window', min, max, 'h', facecolor='y', alpha=0.25, clip_on=True)
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
def draw_plot(self, axes, data):
axes.clear()
axes.plot(data)
axes.figure.tight_layout()
axes.figure.canvas.draw()
def spatial_profile(self):
return self.rotate_dialog.data_rotated.sum(1)
def spectrum_profile(self):
return self.rotate_dialog.data_rotated[self.spectrum_span_selection[0]:self.spectrum_span_selection[1]+1,:].sum(0) if hasattr(self, 'spectrum_span_selection') else self.rotate_dialog.data_rotated.sum(0)
def save(self, save_file):
data = self.spectrum_profile()
data -= np.amin(data)
data /= np.amax(data)
hdu = self.fits_file[0]
hdu.data = data
hdu.header['ORIGIN'] = 'PySpectrum'
self.fits_file.writeto(save_file, clobber=True)
def save_profile(self):
if not self.project:
save_file_sticky('Save plot...', 'FITS file (.fit)', lambda f: self.save(f[0]), self.settings, RAW_PROFILE )
return
if not self.object_properties.name:
QMessageBox.information(self, 'Save FITS', 'Please set file information (name, date, etc) using the Object Properties button before saving')
return
file_path = self.project.add_file(Project.RAW_PROFILE, object_properties = self.object_properties, on_added=self.save)
#self.save(file_path)
| gpl-3.0 |
h-hirokawa/swampdragon | swampdragon/tests/test_selfpub_model.py | 1 | 3350 | from ..route_handler import ModelRouter
from ..pubsub_providers.base_provider import PUBACTIONS
from .dragon_test_case import DragonTestCase
from .models import FooSelfPub, BarSelfPub
from .serializers import FooSelfPubSerializer, BarSelfPubSerializer
from datetime import datetime
class FooModelRouter(ModelRouter):
serializer_class = FooSelfPubSerializer
class BarModelRouter(ModelRouter):
serializer_class = BarSelfPubSerializer
class TestSelfPubModel(DragonTestCase):
def test_self_pub_model(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
self.assertIsNone(self.connection.last_pub)
FooSelfPub.objects.create(name='test')
self.assertIsNotNone(self.connection.last_pub)
def test_self_pub_model_with_fk(self):
router = BarModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
self.assertIsNone(self.connection.last_pub)
foo = FooSelfPub.objects.create(name='test')
BarSelfPub.objects.create(date=datetime.now(), foo=foo)
self.assertIsNotNone(self.connection.last_pub)
def test_ignore_id_when_getting_updated_fields(self):
FooSelfPubSerializer.Meta.publish_fields += ('pk', )
foo = FooSelfPub.objects.create(name='test')
def test_get_changes(self):
foo = FooSelfPub.objects.create(name='test')
self.assertListEqual(foo.get_changed_fields(), [])
foo.number = 12
self.assertListEqual(foo.get_changed_fields(), ['number'])
foo.name = 'updated'
self.assertIn('number', foo.get_changed_fields())
self.assertIn('name', foo.get_changed_fields())
bar = BarSelfPub.objects.create(date=datetime.now(), foo=foo)
self.assertListEqual(bar.get_changed_fields(), [])
update_date = datetime.now()
bar.date = update_date
self.assertListEqual(bar.get_changed_fields(), ['date'])
def test_raise_validation_error(self):
foo = FooSelfPub.objects.create(name='test')
data = foo.serialize()
self.assertEqual(data['name'], foo.name)
def test_create(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
FooSelfPub.objects.create(name='test')
self.assertEqual(self.connection.last_pub['action'], 'created')
def test_update(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
foo = FooSelfPub.objects.create(name='test')
foo.name = 'updated'
foo.save()
self.assertEqual(self.connection.last_pub['action'], 'updated')
def test_remove_on_update(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan', 'name__contains': 'findme'})
foo = FooSelfPub.objects.create(name='test')
self.assertIsNone(self.connection.last_pub)
foo.name = 'findme'
foo.save()
self.assertEqual(self.connection.last_pub['action'], PUBACTIONS.updated)
foo.name = 'hideme'
foo.save()
self.assertEqual(self.connection.last_pub['action'], PUBACTIONS.deleted)
foo.name = 'findmeagain'
foo.save()
self.assertEqual(self.connection.last_pub['action'], PUBACTIONS.updated)
| bsd-3-clause |
Kalyzee/edx-platform | common/djangoapps/xblock_django/tests/test_user_service.py | 132 | 3992 | """
Tests for the DjangoXBlockUserService.
"""
from django.test import TestCase
from xblock_django.user_service import (
DjangoXBlockUserService,
ATTR_KEY_IS_AUTHENTICATED,
ATTR_KEY_USER_ID,
ATTR_KEY_USERNAME,
ATTR_KEY_USER_IS_STAFF,
)
from student.models import anonymous_id_for_user
from student.tests.factories import UserFactory, AnonymousUserFactory
from opaque_keys.edx.keys import CourseKey
class UserServiceTestCase(TestCase):
"""
Tests for the DjangoXBlockUserService.
"""
def setUp(self):
super(UserServiceTestCase, self).setUp()
self.user = UserFactory(username="tester", email="[email protected]")
self.user.profile.name = "Test Tester"
self.anon_user = AnonymousUserFactory()
def assert_is_anon_xb_user(self, xb_user):
"""
A set of assertions for an anonymous XBlockUser.
"""
self.assertFalse(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED])
self.assertIsNone(xb_user.full_name)
self.assertListEqual(xb_user.emails, [])
def assert_xblock_user_matches_django(self, xb_user, dj_user):
"""
A set of assertions for comparing a XBlockUser to a django User
"""
self.assertTrue(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED])
self.assertEqual(xb_user.emails[0], dj_user.email)
self.assertEqual(xb_user.full_name, dj_user.profile.name)
self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USERNAME], dj_user.username)
self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USER_ID], dj_user.id)
self.assertFalse(xb_user.opt_attrs[ATTR_KEY_USER_IS_STAFF])
def test_convert_anon_user(self):
"""
Tests for convert_django_user_to_xblock_user behavior when django user is AnonymousUser.
"""
django_user_service = DjangoXBlockUserService(self.anon_user)
xb_user = django_user_service.get_current_user()
self.assertTrue(xb_user.is_current_user)
self.assert_is_anon_xb_user(xb_user)
def test_convert_authenticate_user(self):
"""
Tests for convert_django_user_to_xblock_user behavior when django user is User.
"""
django_user_service = DjangoXBlockUserService(self.user)
xb_user = django_user_service.get_current_user()
self.assertTrue(xb_user.is_current_user)
self.assert_xblock_user_matches_django(xb_user, self.user)
def test_get_anonymous_user_id_returns_none_for_non_staff_users(self):
"""
Tests for anonymous_user_id method to return None if user is Non-Staff.
"""
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=False)
anonymous_user_id = django_user_service.get_anonymous_user_id(username=self.user.username, course_id='edx/toy/2012_Fall')
self.assertIsNone(anonymous_user_id)
def test_get_anonymous_user_id_returns_none_for_non_existing_users(self):
"""
Tests for anonymous_user_id method to return None username does not exist in system.
"""
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True)
anonymous_user_id = django_user_service.get_anonymous_user_id(username="No User", course_id='edx/toy/2012_Fall')
self.assertIsNone(anonymous_user_id)
def test_get_anonymous_user_id_returns_id_for_existing_users(self):
"""
Tests for anonymous_user_id method returns anonymous user id for a user.
"""
course_key = CourseKey.from_string('edX/toy/2012_Fall')
anon_user_id = anonymous_id_for_user(
user=self.user,
course_id=course_key,
save=True
)
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True)
anonymous_user_id = django_user_service.get_anonymous_user_id(
username=self.user.username,
course_id='edX/toy/2012_Fall'
)
self.assertEqual(anonymous_user_id, anon_user_id)
| agpl-3.0 |
redhat-openstack/ironic | ironic/tests/conf_fixture.py | 11 | 1431 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_config import cfg
from ironic.common import config
CONF = cfg.CONF
CONF.import_opt('host', 'ironic.common.service')
class ConfFixture(fixtures.Fixture):
"""Fixture to manage global conf settings."""
def __init__(self, conf):
self.conf = conf
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('verbose', True)
config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset)
| apache-2.0 |
MingdaZhou/gnuradio | gr-vocoder/examples/codec2_audio_loopback.py | 47 | 1553 | #!/usr/bin/env python
#
# Copyright 2005,2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
from gnuradio.vocoder import codec2
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.codec2_encode_sp(codec2.MODE_2400)
dec = vocoder.codec2_decode_ps(codec2.MODE_2400)
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
| gpl-3.0 |
AakashRaina/radpress | radpress/urls.py | 3 | 1287 | from django.conf.urls import patterns, url
from radpress.views import (
ArticleArchiveView, ArticleDetailView, ArticleListView, PreviewView,
PageDetailView, SearchView, ZenModeView, ZenModeUpdateView)
from radpress.feeds import ArticleFeed
urlpatterns = patterns(
'',
url(r'^$',
view=ArticleListView.as_view(),
name='radpress-article-list'),
url(r'^archives/$',
view=ArticleArchiveView.as_view(),
name='radpress-article-archive'),
url(r'^detail/(?P<slug>[-\w]+)/$',
view=ArticleDetailView.as_view(),
name='radpress-article-detail'),
url(r'^p/(?P<slug>[-\w]+)/$',
view=PageDetailView.as_view(),
name='radpress-page-detail'),
url(r'^preview/$',
view=PreviewView.as_view(),
name='radpress-preview'),
url(r'^search/$',
view=SearchView.as_view(),
name='radpress-search'),
url(r'^zen/$',
view=ZenModeView.as_view(),
name='radpress-zen-mode'),
url(r'zen/(?P<pk>\d+)/$',
view=ZenModeUpdateView.as_view(),
name='radpress-zen-mode-update'),
url(r'^rss/$',
view=ArticleFeed(),
name='radpress-rss'),
url(r'^rss/(?P<tags>[-/\w]+)/$',
view=ArticleFeed(),
name='radpress-rss')
)
| mit |
nexusz99/boto | boto/services/result.py | 153 | 5596 | #!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from datetime import datetime, timedelta
from boto.utils import parse_ts
import boto
class ResultProcessor(object):
LogFileName = 'log.csv'
def __init__(self, batch_name, sd, mimetype_files=None):
self.sd = sd
self.batch = batch_name
self.log_fp = None
self.num_files = 0
self.total_time = 0
self.min_time = timedelta.max
self.max_time = timedelta.min
self.earliest_time = datetime.max
self.latest_time = datetime.min
self.queue = self.sd.get_obj('output_queue')
self.domain = self.sd.get_obj('output_domain')
def calculate_stats(self, msg):
start_time = parse_ts(msg['Service-Read'])
end_time = parse_ts(msg['Service-Write'])
elapsed_time = end_time - start_time
if elapsed_time > self.max_time:
self.max_time = elapsed_time
if elapsed_time < self.min_time:
self.min_time = elapsed_time
self.total_time += elapsed_time.seconds
if start_time < self.earliest_time:
self.earliest_time = start_time
if end_time > self.latest_time:
self.latest_time = end_time
def log_message(self, msg, path):
keys = sorted(msg.keys())
if not self.log_fp:
self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
line = ','.join(keys)
self.log_fp.write(line+'\n')
values = []
for key in keys:
value = msg[key]
if value.find(',') > 0:
value = '"%s"' % value
values.append(value)
line = ','.join(values)
self.log_fp.write(line+'\n')
def process_record(self, record, path, get_file=True):
self.log_message(record, path)
self.calculate_stats(record)
outputs = record['OutputKey'].split(',')
if 'OutputBucket' in record:
bucket = boto.lookup('s3', record['OutputBucket'])
else:
bucket = boto.lookup('s3', record['Bucket'])
for output in outputs:
if get_file:
key_name = output.split(';')[0]
key = bucket.lookup(key_name)
file_name = os.path.join(path, key_name)
print('retrieving file: %s to %s' % (key_name, file_name))
key.get_contents_to_filename(file_name)
self.num_files += 1
def get_results_from_queue(self, path, get_file=True, delete_msg=True):
m = self.queue.read()
while m:
if 'Batch' in m and m['Batch'] == self.batch:
self.process_record(m, path, get_file)
if delete_msg:
self.queue.delete_message(m)
m = self.queue.read()
def get_results_from_domain(self, path, get_file=True):
rs = self.domain.query("['Batch'='%s']" % self.batch)
for item in rs:
self.process_record(item, path, get_file)
def get_results_from_bucket(self, path):
bucket = self.sd.get_obj('output_bucket')
if bucket:
print('No output queue or domain, just retrieving files from output_bucket')
for key in bucket:
file_name = os.path.join(path, key)
print('retrieving file: %s to %s' % (key, file_name))
key.get_contents_to_filename(file_name)
self.num_files + 1
def get_results(self, path, get_file=True, delete_msg=True):
if not os.path.isdir(path):
os.mkdir(path)
if self.queue:
self.get_results_from_queue(path, get_file)
elif self.domain:
self.get_results_from_domain(path, get_file)
else:
self.get_results_from_bucket(path)
if self.log_fp:
self.log_fp.close()
print('%d results successfully retrieved.' % self.num_files)
if self.num_files > 0:
self.avg_time = float(self.total_time)/self.num_files
print('Minimum Processing Time: %d' % self.min_time.seconds)
print('Maximum Processing Time: %d' % self.max_time.seconds)
print('Average Processing Time: %f' % self.avg_time)
self.elapsed_time = self.latest_time-self.earliest_time
print('Elapsed Time: %d' % self.elapsed_time.seconds)
tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
print('Throughput: %f transactions / minute' % tput)
| mit |
admcrae/tensorflow | tensorflow/examples/adding_an_op/zero_out_2_test.py | 111 | 1988 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 2 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.adding_an_op import zero_out_grad_2 # pylint: disable=unused-import
from tensorflow.examples.adding_an_op import zero_out_op_2
class ZeroOut2Test(tf.test.TestCase):
def test(self):
with self.test_session():
result = zero_out_op_2.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
def test_2d(self):
with self.test_session():
result = zero_out_op_2.zero_out([[6, 5, 4], [3, 2, 1]])
self.assertAllEqual(result.eval(), [[6, 0, 0], [0, 0, 0]])
def test_grad(self):
with self.test_session():
shape = (5,)
x = tf.constant([5, 4, 3, 2, 1], dtype=tf.float32)
y = zero_out_op_2.zero_out(x)
err = tf.test.compute_gradient_error(x, shape, y, shape)
self.assertLess(err, 1e-4)
def test_grad_2d(self):
with self.test_session():
shape = (2, 3)
x = tf.constant([[6, 5, 4], [3, 2, 1]], dtype=tf.float32)
y = zero_out_op_2.zero_out(x)
err = tf.test.compute_gradient_error(x, shape, y, shape)
self.assertLess(err, 1e-4)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
mszewczy/odoo | addons/board/__openerp__.py | 261 | 1647 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dashboards',
'version': '1.0',
'category': 'Hidden',
'description': """
Lets the user create a custom dashboard.
========================================
Allows users to create custom dashboard.
""",
'author': 'OpenERP SA',
'depends': ['base', 'web'],
'data': [
'security/ir.model.access.csv',
'board_view.xml',
'board_mydashboard_view.xml',
'views/board.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ecino/compassion-modules | thankyou_letters/__manifest__.py | 2 | 2325 | # -*- coding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2016-2020 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# pylint: disable=C8101
{
'name': 'Thank You Letters',
'version': '10.0.2.1.1',
'category': 'Other',
'author': 'Compassion CH',
'license': 'AGPL-3',
'website': 'http://www.compassion.ch',
'depends': [
'partner_communication',
'advanced_translation',
'web_widget_digitized_signature',
],
'data': [
'security/ir.model.access.csv',
'report/donation_report.xml',
'data/email_template.xml',
'data/communication_config.xml',
'data/ir_cron.xml',
'views/success_story_view.xml',
'views/communication_job_view.xml',
'views/account_invoice_view.xml',
'views/product_view.xml',
'views/res_partner_view.xml',
'views/thankyou_config_view.xml',
'views/generate_communication_wizard_view.xml',
],
'demo': [
'demo/demo_data.xml'
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
Hazelsuko07/17WarmingUp | py3.6/lib/python3.6/site-packages/pip/operations/check.py | 342 | 1590 |
def check_requirements(installed_dists):
missing_reqs_dict = {}
incompatible_reqs_dict = {}
for dist in installed_dists:
key = '%s==%s' % (dist.project_name, dist.version)
missing_reqs = list(get_missing_reqs(dist, installed_dists))
if missing_reqs:
missing_reqs_dict[key] = missing_reqs
incompatible_reqs = list(get_incompatible_reqs(
dist, installed_dists))
if incompatible_reqs:
incompatible_reqs_dict[key] = incompatible_reqs
return (missing_reqs_dict, incompatible_reqs_dict)
def get_missing_reqs(dist, installed_dists):
"""Return all of the requirements of `dist` that aren't present in
`installed_dists`.
"""
installed_names = set(d.project_name.lower() for d in installed_dists)
missing_requirements = set()
for requirement in dist.requires():
if requirement.project_name.lower() not in installed_names:
missing_requirements.add(requirement)
yield requirement
def get_incompatible_reqs(dist, installed_dists):
"""Return all of the requirements of `dist` that are present in
`installed_dists`, but have incompatible versions.
"""
installed_dists_by_name = {}
for installed_dist in installed_dists:
installed_dists_by_name[installed_dist.project_name] = installed_dist
for requirement in dist.requires():
present_dist = installed_dists_by_name.get(requirement.project_name)
if present_dist and present_dist not in requirement:
yield (requirement, present_dist)
| mit |
DefyVentures/edx-platform | lms/djangoapps/survey/models.py | 81 | 7589 | """
Models to support Course Surveys feature
"""
import logging
from lxml import etree
from collections import OrderedDict
from django.db import models
from student.models import User
from django.core.exceptions import ValidationError
from model_utils.models import TimeStampedModel
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
log = logging.getLogger("edx.survey")
class SurveyForm(TimeStampedModel):
"""
Model to define a Survey Form that contains the HTML form data
that is presented to the end user. A SurveyForm is not tied to
a particular run of a course, to allow for sharing of Surveys
across courses
"""
name = models.CharField(max_length=255, db_index=True, unique=True)
form = models.TextField()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""
Override save method so we can validate that the form HTML is
actually parseable
"""
self.validate_form_html(self.form)
# now call the actual save method
super(SurveyForm, self).save(*args, **kwargs)
@classmethod
def validate_form_html(cls, html):
"""
Makes sure that the html that is contained in the form field is valid
"""
try:
fields = cls.get_field_names_from_html(html)
except Exception as ex:
log.exception("Cannot parse SurveyForm html: {}".format(ex))
raise ValidationError("Cannot parse SurveyForm as HTML: {}".format(ex))
if not len(fields):
raise ValidationError("SurveyForms must contain at least one form input field")
@classmethod
def create(cls, name, form, update_if_exists=False):
"""
Helper class method to create a new Survey Form.
update_if_exists=True means that if a form already exists with that name, then update it.
Otherwise throw an SurveyFormAlreadyExists exception
"""
survey = cls.get(name, throw_if_not_found=False)
if not survey:
survey = SurveyForm(name=name, form=form)
else:
if update_if_exists:
survey.form = form
else:
raise SurveyFormNameAlreadyExists()
survey.save()
return survey
@classmethod
def get(cls, name, throw_if_not_found=True):
"""
Helper class method to look up a Survey Form, throw FormItemNotFound if it does not exists
in the database, unless throw_if_not_found=False then we return None
"""
survey = None
exists = SurveyForm.objects.filter(name=name).exists()
if exists:
survey = SurveyForm.objects.get(name=name)
elif throw_if_not_found:
raise SurveyFormNotFound()
return survey
def get_answers(self, user=None, limit_num_users=10000):
"""
Returns all answers for all users for this Survey
"""
return SurveyAnswer.get_answers(self, user, limit_num_users=limit_num_users)
def has_user_answered_survey(self, user):
"""
Returns whether a given user has supplied answers to this
survey
"""
return SurveyAnswer.do_survey_answers_exist(self, user)
def save_user_answers(self, user, answers):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
SurveyAnswer.save_answers(self, user, answers)
def get_field_names(self):
"""
Returns a list of defined field names for all answers in a survey. This can be
helpful for reporting like features, i.e. adding headers to the reports
This is taken from the set of <input> fields inside the form.
"""
return SurveyForm.get_field_names_from_html(self.form)
@classmethod
def get_field_names_from_html(cls, html):
"""
Returns a list of defined field names from a block of HTML
"""
names = []
# make sure the form is wrap in some outer single element
# otherwise lxml can't parse it
# NOTE: This wrapping doesn't change the ability to query it
tree = etree.fromstring(u'<div>{}</div>'.format(html))
input_fields = tree.findall('.//input') + tree.findall('.//select')
for input_field in input_fields:
if 'name' in input_field.keys() and input_field.attrib['name'] not in names:
names.append(input_field.attrib['name'])
return names
class SurveyAnswer(TimeStampedModel):
"""
Model for the answers that a user gives for a particular form in a course
"""
user = models.ForeignKey(User, db_index=True)
form = models.ForeignKey(SurveyForm, db_index=True)
field_name = models.CharField(max_length=255, db_index=True)
field_value = models.CharField(max_length=1024)
@classmethod
def do_survey_answers_exist(cls, form, user):
"""
Returns whether a user has any answers for a given SurveyForm for a course
This can be used to determine if a user has taken a CourseSurvey.
"""
return SurveyAnswer.objects.filter(form=form, user=user).exists()
@classmethod
def get_answers(cls, form, user=None, limit_num_users=10000):
"""
Returns all answers a user (or all users, when user=None) has given to an instance of a SurveyForm
Return is a nested dict which are simple name/value pairs with an outer key which is the
user id. For example (where 'field3' is an optional field):
results = {
'1': {
'field1': 'value1',
'field2': 'value2',
},
'2': {
'field1': 'value3',
'field2': 'value4',
'field3': 'value5',
}
:
:
}
limit_num_users is to prevent an unintentional huge, in-memory dictionary.
"""
if user:
answers = SurveyAnswer.objects.filter(form=form, user=user)
else:
answers = SurveyAnswer.objects.filter(form=form)
results = OrderedDict()
num_users = 0
for answer in answers:
user_id = answer.user.id
if user_id not in results and num_users < limit_num_users:
results[user_id] = OrderedDict()
num_users = num_users + 1
if user_id in results:
results[user_id][answer.field_name] = answer.field_value
return results
@classmethod
def save_answers(cls, form, user, answers):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
for name in answers.keys():
value = answers[name]
# See if there is an answer stored for this user, form, field_name pair or not
# this will allow for update cases. This does include an additional lookup,
# but write operations will be relatively infrequent
answer, __ = SurveyAnswer.objects.get_or_create(user=user, form=form, field_name=name)
answer.field_value = value
answer.save()
| agpl-3.0 |
INM-6/python-neo | neo/io/neuralynxio_v1.py | 2 | 105289 | """
Class for reading data from Neuralynx files.
This IO supports NCS, NEV and NSE file formats.
This module is an older implementation with old neo.io API.
A new class NeuralynxIO compunded by NeuralynxRawIO and BaseFromIO
superseed this one.
Depends on: numpy
Supported: Read
Author: Julia Sprenger, Carlos Canova
Adapted from the exampleIO of python-neo
"""
import sys
import os
import warnings
import codecs
import copy
import re
import datetime
import pkg_resources
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
import neo.io.neuralynxio
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, SpikeTrain,
Event, Unit)
from os import listdir, sep
from os.path import isfile, getsize
import hashlib
import pickle
if hasattr(pkg_resources, 'pkg_resources'):
parse_version = pkg_resources.pkg_resources.parse_version
else:
parse_version = pkg_resources.parse_version
class NeuralynxIO(BaseIO):
"""
Class for reading Neuralynx files.
It enables reading:
- :class:'Block'
- :class:'Segment'
- :class:'AnalogSignal'
- :class:'SpikeTrain'
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
events=True)
seg = block.segments[0]
analogsignal = seg.analogsignals[0]
plt.plot(analogsignal.times.rescale(pq.ms), analogsignal.magnitude)
plt.show()
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
# This class can return either a Block or a Segment
# The first one is the default ( self.read )
# These lists should go from highest object to lowest object because
# common_io_test assumes it.
readable_objects = [Segment, AnalogSignal, SpikeTrain]
# This class is not able to write objects
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff : a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {
Segment: [('waveforms', {'value': True})],
Block: [('waveforms', {'value': False})]
}
# do not supported write so no GUI stuff
write_params = None
name = 'Neuralynx'
description = 'This IO reads .nse/.ncs/.nev files of the Neuralynx (' \
'Cheetah) recordings system (tetrodes).'
extensions = ['nse', 'ncs', 'nev', 'ntt']
# mode can be 'file' or 'dir' or 'fake' or 'database'
# the main case is 'file' but some reader are base on a directory or
# a database this info is for GUI stuff also
mode = 'dir'
# hardcoded parameters from manual, which are not present in Neuralynx
# data files
# unit of timestamps in different files
nev_time_unit = pq.microsecond
ncs_time_unit = pq.microsecond
nse_time_unit = pq.microsecond
ntt_time_unit = pq.microsecond
# unit of sampling rate in different files
ncs_sr_unit = pq.Hz
nse_sr_unit = pq.Hz
ntt_sr_unit = pq.Hz
def __init__(self, sessiondir=None, cachedir=None, use_cache='hash',
print_diagnostic=False, filename=None):
"""
Arguments:
sessiondir: the directory the files of the recording session are
collected. Default 'None'.
print_diagnostic: indicates, whether information about the
loading of
data is printed in terminal or not. Default 'False'.
cachedir: the directory where metadata about the recording
session is
read from and written to.
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
filename: this argument is handles the same as sessiondir and is
only
added for external IO interfaces. The value of
sessiondir
has priority over filename.
"""
warnings.warn('{} is deprecated and will be removed in neo version 0.10. Use {} instead.'
''.format(self.__class__, neo.io.neuralynxio.NeuralynxIO), FutureWarning)
BaseIO.__init__(self)
# possiblity to provide filename instead of sessiondir for IO
# compatibility
if filename is not None and sessiondir is None:
sessiondir = filename
if sessiondir is None:
raise ValueError('Must provide a directory containing data files of'
' of one recording session.')
# remove filename if specific file was passed
if any([sessiondir.endswith('.%s' % ext) for ext in self.extensions]):
sessiondir = sessiondir[:sessiondir.rfind(sep)]
# remove / for consistent directory handling
if sessiondir.endswith(sep):
sessiondir = sessiondir.rstrip(sep)
# set general parameters of this IO
self.sessiondir = sessiondir
self.filename = sessiondir.split(sep)[-1]
self._print_diagnostic = print_diagnostic
self.associated = False
self._associate(cachedir=cachedir, usecache=use_cache)
self._diagnostic_print(
'Initialized IO for session %s' % self.sessiondir)
def read_block(self, lazy=False, cascade=True, t_starts=None,
t_stops=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False,
waveforms=False):
"""
Reads data in a requested time window and returns block with as many
segments
es necessary containing these data.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (segments).
Default 'True'.
t_starts : list of quantities or quantity describing the start of
the requested time window to load. If None or [None]
the complete session is loaded. Default 'None'.
t_stops : list of quantities or quantity describing the end of the
requested time window to load. Has to contain the
same number of values as t_starts. If None or [None]
the complete session is loaded. Default 'None'.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns: Block object containing the requested data in neo structures.
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(lazy = False, cascade = True,
t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
electrode_list = [1,5,10],
unit_list = [1,2,3],
events = True, waveforms = True)
plt.plot(block.segments[0].analogsignals[0])
plt.show()
"""
# Create block
bl = Block(file_origin=self.sessiondir)
bl.name = self.filename
if not cascade:
return bl
# Checking input of t_start and t_stop
# For lazy users that specify x,x instead of [x],[x] for t_starts,
# t_stops
if t_starts is None:
t_starts = [None]
elif type(t_starts) == pq.Quantity:
t_starts = [t_starts]
elif type(t_starts) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_starts]):
raise ValueError('Invalid specification of t_starts.')
if t_stops is None:
t_stops = [None]
elif type(t_stops) == pq.Quantity:
t_stops = [t_stops]
elif type(t_stops) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_stops]):
raise ValueError('Invalid specification of t_stops.')
# adapting t_starts and t_stops to known gap times (extracted in
# association process / initialization)
for gap in self.parameters_global['gaps']:
# gap=gap_list[0]
for e in range(len(t_starts)):
t1, t2 = t_starts[e], t_stops[e]
gap_start = gap[1] * self.ncs_time_unit - \
self.parameters_global['t_start']
gap_stop = gap[2] * self.ncs_time_unit - self.parameters_global[
't_start']
if ((t1 is None and t2 is None)
or (t1 is None and t2 is not None and t2.rescale(
self.ncs_time_unit) > gap_stop)
or (t2 is None and t1 is not None and t1.rescale(
self.ncs_time_unit) < gap_stop)
or (t1 is not None and t2 is not None and t1.rescale(
self.ncs_time_unit) < gap_start
and t2.rescale(self.ncs_time_unit) > gap_stop)):
# adapting first time segment
t_stops[e] = gap_start
# inserting second time segment
t_starts.insert(e + 1, gap_stop)
t_stops.insert(e + 1, t2)
warnings.warn(
'Substituted t_starts and t_stops in order to skip '
'gap in recording session.')
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
# adding a segment for each t_start, t_stop pair
for t_start, t_stop in zip(t_starts, t_stops):
seg = self.read_segment(lazy=lazy, cascade=cascade,
t_start=t_start, t_stop=t_stop,
electrode_list=electrode_list,
unit_list=unit_list,
analogsignals=analogsignals, events=events,
waveforms=waveforms)
bl.segments.append(seg)
# generate units
units = []
channel_unit_collection = {}
for st in [s for seg in bl.segments for s in seg.spiketrains]:
# collecting spiketrains of same channel and unit id to generate
# common unit
chuid = (st.annotations['channel_index'], st.annotations['unit_id'])
if chuid in channel_unit_collection:
channel_unit_collection[chuid].append(st)
else:
channel_unit_collection[chuid] = [st]
for chuid in channel_unit_collection:
sts = channel_unit_collection[chuid]
unit = Unit(name='Channel %i, Unit %i' % chuid)
unit.spiketrains.extend(sts)
units.append(unit)
# generate one channel indexes for each analogsignal
for anasig in [a for seg in bl.segments for a in seg.analogsignals]:
channelids = anasig.annotations['channel_index']
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all analogsignal '
'"%s"' % anasig.name,
channel_ids=channelids)
channelidx.analogsignals.append(anasig)
bl.channel_indexes.append(channelidx)
# generate channel indexes for units
channelids = [unit.spiketrains[0].annotations['channel_index']
for unit in units]
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all spiketrains',
channel_ids=channelids)
channelidx.units.extend(units)
bl.channel_indexes.append(channelidx)
bl.create_many_to_one_relationship()
# Adding global parameters to block annotation
bl.annotations.update(self.parameters_global)
return bl
def read_segment(self, lazy=False, cascade=True, t_start=None, t_stop=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False, waveforms=False):
"""Reads one Segment.
The Segment will contain one AnalogSignal for each channel
and will go from t_start to t_stop.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (SpikeTrains,
AnalogSignals, Events).
Default 'True'.
t_start : time (quantity) that the Segment begins. Default None.
t_stop : time (quantity) that the Segment ends. Default None.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded. If False, no unit will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns:
Segment object containing neo objects, which contain the data.
"""
# input check
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
elif electrode_list is None:
raise ValueError('Electrode_list can not be None.')
elif [v for v in electrode_list if
v in self.parameters_ncs.keys()] == []:
# warn if non of the requested channels are present in this session
warnings.warn('Requested channels %s are not present in session '
'(contains only %s)' % (
electrode_list, self.parameters_ncs.keys()))
electrode_list = []
seg = Segment(file_origin=self.filename)
if not cascade:
return seg
# generate empty segment for analogsignal collection
empty_seg = Segment(file_origin=self.filename)
# Reading NCS Files #
# selecting ncs files to load based on electrode_list requested
if analogsignals:
for chid in electrode_list:
if chid in self.parameters_ncs:
file_ncs = self.parameters_ncs[chid]['filename']
self.read_ncs(file_ncs, empty_seg, lazy, cascade,
t_start=t_start, t_stop=t_stop)
else:
self._diagnostic_print('Can not load ncs of channel %i. '
'No corresponding ncs file '
'present.' % (chid))
# supplementory merge function, should be replaced by neo utility
# function
def merge_analogsignals(anasig_list):
for aid, anasig in enumerate(anasig_list):
anasig.channel_index = None
if aid == 0:
full_analogsignal = anasig
else:
full_analogsignal = full_analogsignal.merge(anasig)
for key in anasig_list[0].annotations.keys():
listified_values = [a.annotations[key] for a in anasig_list]
full_analogsignal.annotations[key] = listified_values
return full_analogsignal
analogsignal = merge_analogsignals(empty_seg.analogsignals)
seg.analogsignals.append(analogsignal)
analogsignal.segment = seg
# Reading NEV Files (Events)#
# reading all files available
if events:
for filename_nev in self.nev_asso:
self.read_nev(filename_nev, seg, lazy, cascade, t_start=t_start,
t_stop=t_stop)
# Reading Spike Data only if requested
if unit_list is not False:
# Reading NSE Files (Spikes)#
# selecting nse files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_nse:
filename_nse = self.parameters_nse[chid]['filename']
self.read_nse(filename_nse, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load nse of channel %i. '
'No corresponding nse file '
'present.' % (chid))
# Reading ntt Files (Spikes)#
# selecting ntt files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_ntt:
filename_ntt = self.parameters_ntt[chid]['filename']
self.read_ntt(filename_ntt, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load ntt of channel %i. '
'No corresponding ntt file '
'present.' % (chid))
return seg
def read_ncs(self, filename_ncs, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reading a single .ncs file from the associated Neuralynx recording
session.
In case of a recording gap between t_start and t_stop, data are only
loaded until gap start.
For loading data across recording gaps use read_block(...).
Arguments:
filename_ncs : Name of the .ncs file to be loaded.
seg : Neo Segment, to which the AnalogSignal containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
AnalogSignal. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
AnalogSignal begins.
Default None.
t_stop : time or sample (quantity or integer) that the
AnalogSignal ends.
Default None.
Returns:
None
'''
# checking format of filename and correcting if necessary
if filename_ncs[-4:] != '.ncs':
filename_ncs = filename_ncs + '.ncs'
if sep in filename_ncs:
filename_ncs = filename_ncs.split(sep)[-1]
# Extracting the channel id from prescan (association) of ncs files with
# this recording session
chid = self.get_channel_id_by_file_name(filename_ncs)
if chid is None:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ncs))
if not cascade:
return
# read data
header_time_data = self.__mmap_ncs_packet_timestamps(filename_ncs)
data = self.__mmap_ncs_data(filename_ncs)
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
if isinstance(t_start, int):
t_start = t_start / self.parameters_ncs[chid]['sampling_rate']
if isinstance(t_stop, int):
t_stop = t_stop / self.parameters_ncs[chid]['sampling_rate']
# rescaling to global start time of recording (time of first sample
# in any file type)
if t_start is None or t_start < (
self.parameters_ncs[chid]['t_start']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_ncs[chid]['t_start'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ncs[chid]['t_stop']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_stop']
- self.parameters_global['t_start']),
filename_ncs))
if t_stop is None or t_stop > (
self.parameters_ncs[chid]['t_stop']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_ncs[chid]['t_stop'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ncs[chid]['t_start']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_start']
- self.parameters_global['t_start']),
filename_ncs))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ncs))
# Extracting data signal in requested time window
unit = pq.dimensionless # default value
if lazy:
sig = []
p_id_start = 0
else:
tstamps = header_time_data * self.ncs_time_unit - \
self.parameters_global['t_start']
# find data packet to start with signal construction
starts = np.where(tstamps <= t_start)[0]
if len(starts) == 0:
self._diagnostic_print(
'Requested AnalogSignal not present in this time '
'interval.')
return
else:
# first packet to be included into signal
p_id_start = starts[-1]
# find data packet where signal ends (due to gap or t_stop)
stops = np.where(tstamps >= t_stop)[0]
if len(stops) != 0:
first_stop = [stops[0]]
else:
first_stop = []
# last packet to be included in signal
p_id_stop = min(first_stop + [len(data)])
# search gaps in recording in time range to load
gap_packets = [gap_id[0] for gap_id in
self.parameters_ncs[chid]['gaps'] if
gap_id[0] > p_id_start]
if len(gap_packets) > 0 and min(gap_packets) < p_id_stop:
p_id_stop = min(gap_packets)
warnings.warn(
'Analogsignalarray was shortened due to gap in '
'recorded '
'data '
' of file %s at packet id %i' % (
filename_ncs, min(gap_packets)))
# search broken packets in time range to load
broken_packets = []
if 'broken_packet' in self.parameters_ncs[chid]:
broken_packets = [packet[0] for packet in
self.parameters_ncs[chid]['broken_packet']
if packet[0] > p_id_start]
if len(broken_packets) > 0 and min(broken_packets) < p_id_stop:
p_id_stop = min(broken_packets)
warnings.warn(
'Analogsignalarray was shortened due to broken data '
'packet in recorded data '
' of file %s at packet id %i' % (
filename_ncs, min(broken_packets)))
# construct signal in valid packet range
sig = np.array(data[p_id_start:p_id_stop + 1], dtype=float)
sig = sig.reshape(len(sig) * len(sig[0]))
# ADBitVolts is not guaranteed to be present in the header!
if 'ADBitVolts' in self.parameters_ncs[chid]:
sig *= self.parameters_ncs[chid]['ADBitVolts']
unit = pq.V
else:
warnings.warn(
'Could not transform data from file %s into physical '
'signal. '
'Missing "ADBitVolts" value in text header.')
# defining sampling rate for rescaling purposes
sampling_rate = self.parameters_ncs[chid]['sampling_unit'][0]
# creating neo AnalogSignal containing data
anasig = AnalogSignal(signal=pq.Quantity(sig, unit, copy=False),
sampling_rate=1 * sampling_rate,
# rescaling t_start to sampling time units
t_start=(header_time_data[p_id_start] * self.ncs_time_unit
- self.parameters_global['t_start']).rescale(
1 / sampling_rate),
name='channel_%i' % (chid),
channel_index=chid)
# removing protruding parts of first and last data packet
if anasig.t_start < t_start.rescale(anasig.t_start.units):
anasig = anasig.time_slice(t_start.rescale(anasig.t_start.units),
None)
if anasig.t_stop > t_stop.rescale(anasig.t_start.units):
anasig = anasig.time_slice(None,
t_stop.rescale(anasig.t_start.units))
annotations = copy.deepcopy(self.parameters_ncs[chid])
for pop_key in ['sampling_rate', 't_start']:
if pop_key in annotations:
annotations.pop(pop_key)
anasig.annotations.update(annotations)
anasig.annotations['electrode_id'] = chid
# this annotation is necesary for automatic genereation of
# recordingchannels
anasig.annotations['channel_index'] = chid
anasig.segment = seg # needed for merge function of analogsignals
seg.analogsignals.append(anasig)
def read_nev(self, filename_nev, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reads associated nev file and attaches its content as eventarray to
provided neo segment. In constrast to read_ncs times can not be provided
in number of samples as a nev file has no inherent sampling rate.
Arguments:
filename_nev : Name of the .nev file to be loaded.
seg : Neo Segment, to which the Event containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
Event. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the Events begin.
Default None.
t_stop : time (quantity) that the Event end.
Default None.
Returns:
None
'''
if filename_nev[-4:] != '.nev':
filename_nev += '.nev'
if sep in filename_nev:
filename_nev = filename_nev.split(sep)[-1]
if filename_nev not in self.nev_asso:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nev))
# # ensure meaningful values for requested start and stop times
# # providing time is samples for nev file does not make sense as we
# don't know the underlying sampling rate
if isinstance(t_start, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_start %s' % t_start)
if isinstance(t_stop, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_stop %s' % t_stop)
# ensure meaningful values for requested start and stop times
if t_start is None or t_start < (
self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start']):
t_start = (self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start'])
if t_start > (self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']),
filename_nev))
if t_stop is None or t_stop > (
self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']):
t_stop = (self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start'])
if t_stop < (self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(
self.parameters_nev[filename_nev][
't_start']
- self.parameters_global['t_start']),
filename_nev))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_nev))
data = self.__mmap_nev_file(filename_nev)
# Extracting all events for one event type and put it into an event
# array
# TODO: Check if this is the correct way of event creation.
for event_type in self.parameters_nev[filename_nev]['event_types']:
# Extract all time stamps of digital markers and rescaling time
type_mask = [i for i in range(len(data)) if
(data[i][4] == event_type['event_id']
and data[i][5] == event_type['nttl']
and data[i][10].decode('latin-1') == event_type[
'name'])]
marker_times = [t[3] for t in
data[type_mask]] * self.nev_time_unit - \
self.parameters_global['t_start']
# only consider Events in the requested time window [t_start,
# t_stop]
time_mask = [i for i in range(len(marker_times)) if (
marker_times[i] >= t_start and marker_times[i] <= t_stop)]
marker_times = marker_times[time_mask]
# Do not create an eventarray if there are no events of this type
# in the requested time range
if len(marker_times) == 0:
continue
ev = Event(times=pq.Quantity(marker_times, units=self.nev_time_unit,
dtype="int"),
labels=event_type['name'],
name="Digital Marker " + str(event_type),
file_origin=filename_nev,
marker_id=event_type['event_id'],
digital_marker=True,
analog_marker=False,
nttl=event_type['nttl'])
seg.events.append(ev)
def read_nse(self, filename_nse, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads nse file and attaches content as spike train to provided neo
segment. Times can be provided in samples (integer values). If the
nse file does not contain a sampling rate value, the ncs sampling
rate on the same electrode is used.
Arguments:
filename_nse : Name of the .nse file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
SpikeTrain begins.
Default None.
t_stop : time or sample (quantity or integer) that the SpikeTrain
ends.
Default None.
unit_list : unit ids to be loaded. If [], all units are loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_nse[-4:] != '.nse':
filename_nse += '.nse'
if sep in filename_nse:
filename_nse = filename_nse.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_nse)
if channel_id is not None:
chid = channel_id
else:
# if nse file is empty it is not listed in self.parameters_nse, but
# in self.nse_avail
if filename_nse in self.nse_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) nse file (%s). '
'Not loading nse file.' % (filename_nse))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nse))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for nse values.
if 'sampling_rate' in self.parameters_nse[chid]:
sr = self.parameters_nse[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in nse file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_nse))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling global recording start (first sample in any file type)
# This is not optimal, as there is no way to know how long the
# recording lasted after last spike
if t_start is None or t_start < (
self.parameters_nse[chid]['t_first']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_nse[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_nse[chid]['t_last']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_last']
- self.parameters_global['t_start']),
filename_nse))
if t_stop is None:
t_stop = (sys.maxsize) * self.nse_time_unit
if t_stop is None or t_stop > (
self.parameters_nse[chid]['t_last']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_nse[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_nse[chid]['t_first']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_first']
- self.parameters_global['t_start']),
filename_nse))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) for file %s.' % (t_start, t_stop, filename_nse))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_nse_packets(filename_nse)
# load all units available if unit_list==[] or None
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in nse file %s (contains unit_list %s)' % (
unit_list, filename_nse, np.unique(cell_numbers)))
# extracting spikes unit-wise and generate spiketrains
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
unit_mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[unit_mask] * self.nse_time_unit
spike_times = spike_times - self.parameters_global['t_start']
time_mask = np.where(np.logical_and(spike_times >= t_start,
spike_times < t_stop))
spike_times = spike_times[time_mask]
else:
spike_times = pq.Quantity([], units=self.nse_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_nse,
unit_id=unit_i,
channel_id=chid)
if waveforms and not lazy:
# Collect all waveforms of the specific unit
# For computational reasons: no units, no time axis
st.waveforms = data_points[unit_mask][time_mask]
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations.update(self.parameters_nse[chid])
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
def read_ntt(self, filename_ntt, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads ntt file and attaches content as spike train to provided neo
segment.
Arguments:
filename_ntt : Name of the .ntt file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the SpikeTrain begins. Default None.
t_stop : time (quantity) that the SpikeTrain ends. Default None.
unit_list : unit ids to be loaded. If [] or None all units are
loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_ntt[-4:] != '.ntt':
filename_ntt += '.ntt'
if sep in filename_ntt:
filename_ntt = filename_ntt.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_ntt)
if channel_id is not None:
chid = channel_id
else:
# if ntt file is empty it is not listed in self.parameters_ntt, but
# in self.ntt_avail
if filename_ntt in self.ntt_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) ntt file (%s). '
'Not loading ntt file.' % (filename_ntt))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ntt))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for ntt values.
if 'sampling_rate' in self.parameters_ntt[chid]:
sr = self.parameters_ntt[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in ntt file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_ntt))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling to global recording start (first sample in any
# recording file)
if t_start is None or t_start < (
self.parameters_ntt[chid]['t_first']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_ntt[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ntt[chid]['t_last']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_last']
- self.parameters_global['t_start']),
filename_ntt))
if t_stop is None:
t_stop = (sys.maxsize) * self.ntt_time_unit
if t_stop is None or t_stop > (
self.parameters_ntt[chid]['t_last']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_ntt[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ntt[chid]['t_first']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_first']
- self.parameters_global['t_start']),
filename_ntt))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ntt))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_ntt_packets(filename_ntt)
# TODO: When ntt available: Implement 1 RecordingChannelGroup per
# Tetrode, such that each electrode gets its own recording channel
# load all units available if units==[]
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in ntt file %s (contains units %s)' % (
unit_list, filename_ntt, np.unique(cell_numbers)))
# loading data for each unit and generating spiketrain
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[mask] * self.ntt_time_unit
spike_times = spike_times - self.parameters_global['t_start']
spike_times = spike_times[np.where(
np.logical_and(spike_times >= t_start,
spike_times < t_stop))]
else:
spike_times = pq.Quantity([], units=self.ntt_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_ntt,
unit_id=unit_i,
channel_id=chid)
# Collect all waveforms of the specific unit
if waveforms and not lazy:
# For computational reasons: no units, no time axis
# transposing to adhere to neo guidline, which states that
# time should be in the first axis.
# This is stupid and not intuitive.
st.waveforms = np.array(
[data_points[t, :, :] for t in range(len(timestamps))
if cell_numbers[t] == unit_i]).transpose()
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations = self.parameters_ntt[chid]
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
# private routines
# #################################################
def _associate(self, cachedir=None, usecache='hash'):
"""
Associates the object with a specified Neuralynx session, i.e., a
combination of a .nse, .nev and .ncs files. The meta data is read
into the
object for future reference.
Arguments:
cachedir : Directory for loading and saving hashes of recording
sessions
and pickled meta information about files
extracted during
association process
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
Returns:
-
"""
# If already associated, disassociate first
if self.associated:
raise OSError(
"Trying to associate an already associated NeuralynxIO "
"object.")
# Create parameter containers
# Dictionary that holds different parameters read from the .nev file
self.parameters_nse = {}
# List of parameter dictionaries for all potential file types
self.parameters_ncs = {}
self.parameters_nev = {}
self.parameters_ntt = {}
# combined global parameters
self.parameters_global = {}
# Scanning session directory for recorded files
self.sessionfiles = [f for f in listdir(self.sessiondir) if
isfile(os.path.join(self.sessiondir, f))]
# Listing available files
self.ncs_avail = []
self.nse_avail = []
self.nev_avail = []
self.ntt_avail = []
# Listing associated (=non corrupted, non empty files)
self.ncs_asso = []
self.nse_asso = []
self.nev_asso = []
self.ntt_asso = []
if usecache not in ['hash', 'always', 'datesize', 'never']:
raise ValueError(
"Argument value of usecache '%s' is not valid. Accepted "
"values are 'hash','always','datesize','never'" % usecache)
if cachedir is None and usecache != 'never':
raise ValueError('No cache directory provided.')
# check if there are any changes of the data files -> new data check run
check_files = True if usecache != 'always' else False # never
# checking files if usecache=='always'
if cachedir is not None and usecache != 'never':
self._diagnostic_print(
'Calculating %s of session files to check for cached '
'parameter files.' % usecache)
cachefile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys'
if not os.path.exists(cachedir + sep + self.sessiondir.split(sep)[-1]):
os.makedirs(cachedir + sep + self.sessiondir.split(sep)[-1])
if usecache == 'hash':
hashes_calc = {}
# calculates hash of all available files
for f in self.sessionfiles:
file_hash = self.hashfile(open(self.sessiondir + sep + f,
'rb'), hashlib.sha256())
hashes_calc[f] = file_hash
elif usecache == 'datesize':
hashes_calc = {}
for f in self.sessionfiles:
hashes_calc[f] = self.datesizefile(
self.sessiondir + sep + f)
# load hashes saved for this session in an earlier loading run
if os.path.exists(cachefile):
hashes_read = pickle.load(open(cachefile, 'rb'))
else:
hashes_read = {}
# compare hashes to previously saved meta data und load meta data
# if no changes occured
if usecache == 'always' or all([f in hashes_calc
and f in hashes_read
and hashes_calc[f] == hashes_read[f]
for f in self.sessionfiles]):
check_files = False
self._diagnostic_print(
'Using cached metadata from earlier analysis run in '
'file '
'%s. Skipping file checks.' % cachefile)
# loading saved parameters
parameterfile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache'
if os.path.exists(parameterfile):
parameters_read = pickle.load(open(parameterfile, 'rb'))
else:
raise OSError('Inconsistent cache files.')
for IOdict, dictname in [(self.parameters_global, 'global'),
(self.parameters_ncs, 'ncs'),
(self.parameters_nse, 'nse'),
(self.parameters_nev, 'nev'),
(self.parameters_ntt, 'ntt')]:
IOdict.update(parameters_read[dictname])
self.nev_asso = self.parameters_nev.keys()
self.ncs_asso = [val['filename'] for val in
self.parameters_ncs.values()]
self.nse_asso = [val['filename'] for val in
self.parameters_nse.values()]
self.ntt_asso = [val['filename'] for val in
self.parameters_ntt.values()]
for filename in self.sessionfiles:
# Extracting only continuous signal files (.ncs)
if filename[-4:] == '.ncs':
self.ncs_avail.append(filename)
elif filename[-4:] == '.nse':
self.nse_avail.append(filename)
elif filename[-4:] == '.nev':
self.nev_avail.append(filename)
elif filename[-4:] == '.ntt':
self.ntt_avail.append(filename)
else:
self._diagnostic_print(
'Ignoring file of unknown data type %s' % filename)
if check_files:
self._diagnostic_print('Starting individual file checks.')
# =======================================================================
# # Scan NCS files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ncs file(s).' % (len(self.ncs_avail)))
for ncs_file in self.ncs_avail:
# Loading individual NCS file and extracting parameters
self._diagnostic_print("Scanning " + ncs_file + ".")
# Reading file packet headers
filehandle = self.__mmap_ncs_packet_headers(ncs_file)
if filehandle is None:
continue
try:
# Checking consistency of ncs file
self.__ncs_packet_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ncs_file)
continue
# Reading data packet header information and store them in
# parameters_ncs
self.__read_ncs_data_headers(filehandle, ncs_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(ncs_file)
self.__read_text_header(ncs_file,
self.parameters_ncs[channel_id])
# Check for invalid starting times of data packets in ncs file
self.__ncs_invalid_first_sample_check(filehandle)
# Check ncs file for gaps
self.__ncs_gap_check(filehandle)
self.ncs_asso.append(ncs_file)
# =======================================================================
# # Scan NSE files
# =======================================================================
# Loading individual NSE file and extracting parameters
self._diagnostic_print(
'\nDetected %i .nse file(s).' % (len(self.nse_avail)))
for nse_file in self.nse_avail:
# Loading individual NSE file and extracting parameters
self._diagnostic_print('Scanning ' + nse_file + '.')
# Reading file
filehandle = self.__mmap_nse_packets(nse_file)
if filehandle is None:
continue
try:
# Checking consistency of nse file
self.__nse_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nse_file)
continue
# Reading header information and store them in parameters_nse
self.__read_nse_data_header(filehandle, nse_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(nse_file)
self.__read_text_header(nse_file,
self.parameters_nse[channel_id])
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_nse[channel_id]:
self.parameters_nse[channel_id]['sampling_rate'] = \
(self.parameters_nse[channel_id][
'SamplingFrequency'] * self.nse_sr_unit)
self.nse_asso.append(nse_file)
# =======================================================================
# # Scan NEV files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .nev file(s).' % (len(self.nev_avail)))
for nev_file in self.nev_avail:
# Loading individual NEV file and extracting parameters
self._diagnostic_print('Scanning ' + nev_file + '.')
# Reading file
filehandle = self.__mmap_nev_file(nev_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__nev_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nev_file)
continue
# Reading header information and store them in parameters_nev
self.__read_nev_data_header(filehandle, nev_file)
# Reading txt file header
self.__read_text_header(nev_file, self.parameters_nev[nev_file])
self.nev_asso.append(nev_file)
# =======================================================================
# # Scan NTT files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ntt file(s).' % (len(self.ntt_avail)))
for ntt_file in self.ntt_avail:
# Loading individual NTT file and extracting parameters
self._diagnostic_print('Scanning ' + ntt_file + '.')
# Reading file
filehandle = self.__mmap_ntt_file(ntt_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__ntt_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ntt_file)
continue
# Reading header information and store them in parameters_nev
self.__read_ntt_data_header(filehandle, ntt_file)
# Reading txt file header
self.__read_ntt_text_header(ntt_file)
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_ntt[channel_id]:
self.parameters_ntt[channel_id]['sampling_rate'] = \
(self.parameters_ntt[channel_id][
'SamplingFrequency'] * self.ntt_sr_unit)
self.ntt_asso.append(ntt_file)
# =======================================================================
# # Check consistency across files
# =======================================================================
# check RECORDING_OPENED / CLOSED times (from txt header) for
# different files
for parameter_collection in [self.parameters_ncs,
self.parameters_nse,
self.parameters_nev,
self.parameters_ntt]:
# check recoding_closed times for specific file types
if any(np.abs(np.diff([i['recording_opened'] for i in
parameter_collection.values()]))
> datetime.timedelta(seconds=1)):
raise ValueError(
'NCS files were opened for recording with a delay '
'greater than 0.1 second.')
# check recoding_closed times for specific file types
if any(np.diff([i['recording_closed'] for i in
parameter_collection.values()
if i['recording_closed'] is not None])
> datetime.timedelta(seconds=0.1)):
raise ValueError(
'NCS files were closed after recording with a '
'delay '
'greater than 0.1 second.')
# get maximal duration of any file in the recording
parameter_collection = list(self.parameters_ncs.values()) + \
list(self.parameters_nse.values()) + \
list(self.parameters_ntt.values()) + \
list(self.parameters_nev.values())
self.parameters_global['recording_opened'] = min(
[i['recording_opened'] for i in parameter_collection])
self.parameters_global['recording_closed'] = max(
[i['recording_closed'] for i in parameter_collection])
# Set up GLOBAL TIMING SCHEME
# #############################
for file_type, parameter_collection in [
('ncs', self.parameters_ncs), ('nse', self.parameters_nse),
('nev', self.parameters_nev), ('ntt', self.parameters_ntt)]:
# check starting times
name_t1, name_t2 = ['t_start', 't_stop'] if (
file_type != 'nse' and file_type != 'ntt') \
else ['t_first', 't_last']
# checking if files of same type start at same time point
if file_type != 'nse' and file_type != 'ntt' \
and len(np.unique(np.array(
[i[name_t1].magnitude for i in
parameter_collection.values()]))) > 1:
raise ValueError(
'%s files do not start at same time point.' %
file_type)
# saving t_start and t_stop for each file type available
if len([i[name_t1] for i in parameter_collection.values()]):
self.parameters_global['%s_t_start' % file_type] = min(
[i[name_t1]
for i in parameter_collection.values()])
self.parameters_global['%s_t_stop' % file_type] = min(
[i[name_t2]
for i in parameter_collection.values()])
# extracting minimial t_start and maximal t_stop value for this
# recording session
self.parameters_global['t_start'] = min(
[self.parameters_global['%s_t_start' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
self.parameters_global['t_stop'] = max(
[self.parameters_global['%s_t_stop' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
# checking gap consistency across ncs files
# check number of gaps detected
if len(np.unique([len(i['gaps']) for i in
self.parameters_ncs.values()])) != 1:
raise ValueError('NCS files contain different numbers of gaps!')
# check consistency of gaps across files and create global gap
# collection
self.parameters_global['gaps'] = []
for g in range(len(list(self.parameters_ncs.values())[0]['gaps'])):
integrated = False
gap_stats = np.unique(
[i['gaps'][g] for i in self.parameters_ncs.values()],
return_counts=True)
if len(gap_stats[0]) != 3 or len(np.unique(gap_stats[1])) != 1:
raise ValueError(
'Gap number %i is not consistent across NCS '
'files.' % (
g))
else:
# check if this is second part of already existing gap
for gg in range(len(self.parameters_global['gaps'])):
globalgap = self.parameters_global['gaps'][gg]
# check if stop time of first is start time of second
# -> continuous gap
if globalgap[2] == \
list(self.parameters_ncs.values())[0]['gaps'][
g][1]:
self.parameters_global['gaps'][gg] = \
self.parameters_global['gaps'][gg][:2] + (
list(self.parameters_ncs.values())[0][
'gaps'][g][
2],)
integrated = True
break
if not integrated:
# add as new gap if this is not a continuation of
# existing global gap
self.parameters_global['gaps'].append(
list(self.parameters_ncs.values())[0][
'gaps'][g])
# save results of association for future analysis together with hash
# values for change tracking
if cachedir is not None and usecache != 'never':
pickle.dump({'global': self.parameters_global,
'ncs': self.parameters_ncs,
'nev': self.parameters_nev,
'nse': self.parameters_nse,
'ntt': self.parameters_ntt},
open(cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache', 'wb'))
if usecache != 'always':
pickle.dump(hashes_calc, open(
cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys', 'wb'))
self.associated = True
# private routines
# #########################################################�
# Memory Mapping Methods
def __mmap_nse_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) // 2 // 56, 56),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] \
+ data[:, 1] * 2 ** 16 \
+ data[:, 2] * 2 ** 32 \
+ data[:, 3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:56].astype('i2')
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
def __mmap_ncs_data(self, filename):
""" Memory map of the Neuralynx .ncs file optimized for data
extraction"""
if getsize(self.sessiondir + sep + filename) > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype=np.dtype(('i2', (522))), mode='r',
offset=16384)
# removing data packet headers and flattening data
return data[:, 10:]
else:
return None
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=((filesize - 16384) // 4 // 261, 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + (data[:,1] *2**32)
header_u4 = data[:, 2:5]
return timestamps, header_u4
else:
return None
def __mmap_ncs_packet_timestamps(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=(int((filesize - 16384) / 4 / 261), 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + data[:,1]*2**32
return timestamps
else:
return None
def __mmap_nev_file(self, filename):
""" Memory map the Neuralynx .nev file """
nev_dtype = np.dtype([
('reserved', '<i2'),
('system_id', '<i2'),
('data_size', '<i2'),
('timestamp', '<u8'),
('event_id', '<i2'),
('ttl_input', '<i2'),
('crc_check', '<i2'),
('dummy1', '<i2'),
('dummy2', '<i2'),
('extra', '<i4', (8,)),
('event_string', 'a128'),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nev_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_file(self, filename):
""" Memory map the Neuralynx .nse file """
nse_dtype = np.dtype([
('timestamp', '<u8'),
('sc_number', '<u4'),
('cell_number', '<u4'),
('params', '<u4', (8,)),
('data', '<i2', (32, 4)),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nse_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) / 2 / 152, 152),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + \
data[:, 2] * 2 ** 32 + data[:, 3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:152].astype('i2').reshape((4, 32))
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
# ___________________________ header extraction __________________________
def __read_text_header(self, filename, parameter_dict):
# Reading main file header (plain text, 16kB)
text_header = codecs.open(self.sessiondir + sep + filename, 'r',
'latin-1').read(16384)
parameter_dict['cheetah_version'] = \
self.__get_cheetah_version_from_txt_header(text_header, filename)
parameter_dict.update(self.__get_filename_and_times_from_txt_header(
text_header, parameter_dict['cheetah_version']))
# separating lines of header and ignoring last line (fill), check if
# Linux or Windows OS
if sep == '/':
text_header = text_header.split('\r\n')[:-1]
if sep == '\\':
text_header = text_header.split('\n')[:-1]
# minor parameters possibly saved in header (for any file type)
minor_keys = ['AcqEntName',
'FileType',
'FileVersion',
'RecordSize',
'HardwareSubSystemName',
'HardwareSubSystemType',
'SamplingFrequency',
'ADMaxValue',
'ADBitVolts',
'NumADChannels',
'ADChannel',
'InputRange',
'InputInverted',
'DSPLowCutFilterEnabled',
'DspLowCutFrequency',
'DspLowCutNumTaps',
'DspLowCutFilterType',
'DSPHighCutFilterEnabled',
'DspHighCutFrequency',
'DspHighCutNumTaps',
'DspHighCutFilterType',
'DspDelayCompensation',
'DspFilterDelay_\xb5s',
'DisabledSubChannels',
'WaveformLength',
'AlignmentPt',
'ThreshVal',
'MinRetriggerSamples',
'SpikeRetriggerTime',
'DualThresholding',
'Feature Peak 0',
'Feature Valley 1',
'Feature Energy 2',
'Feature Height 3',
'Feature NthSample 4',
'Feature NthSample 5',
'Feature NthSample 6',
'Feature NthSample 7',
'SessionUUID',
'FileUUID',
'CheetahRev',
'ProbeName',
'OriginalFileName',
'TimeCreated',
'TimeClosed',
'ApplicationName',
'AcquisitionSystem',
'ReferenceChannel']
# extracting minor key values of header (only taking into account
# non-empty lines)
for i, minor_entry in enumerate(text_header):
if minor_entry == '' or minor_entry[0] == '#':
continue
matching_key = [key for key in minor_keys if
minor_entry.strip('-').startswith(key)]
if len(matching_key) == 1:
matching_key = matching_key[0]
minor_value = minor_entry.split(matching_key)[1].strip(
' ').rstrip(' ')
# determine data type of entry
if minor_value.isdigit():
# converting to int if possible
minor_value = int(minor_value)
else:
# converting to float if possible
try:
minor_value = float(minor_value)
except:
pass
if matching_key in parameter_dict:
warnings.warn(
'Multiple entries for {} in text header of {}'.format(
matching_key, filename))
else:
parameter_dict[matching_key] = minor_value
elif len(matching_key) > 1:
raise ValueError(
'Inconsistent minor key list for text header '
'interpretation.')
else:
warnings.warn(
'Skipping text header entry %s, because it is not in '
'minor key list' % minor_entry)
self._diagnostic_print(
'Successfully decoded text header of file (%s).' % filename)
def __get_cheetah_version_from_txt_header(self, text_header, filename):
version_regex = re.compile(r'((-CheetahRev )|'
r'(ApplicationName Cheetah "))'
r'(?P<version>\d{1,3}\.\d{1,3}\.\d{1,3})')
match = version_regex.search(text_header)
if match:
return match.groupdict()['version']
else:
raise ValueError('Can not extract Cheetah version from file '
'header of file %s' % filename)
def __get_filename_and_times_from_txt_header(self, text_header, version):
if parse_version(version) <= parse_version('5.6.4'):
datetime1_regex = re.compile(r'## Time Opened \(m/d/y\): '
r'(?P<date>\S+)'
r' \(h:m:s\.ms\) '
r'(?P<time>\S+)')
datetime2_regex = re.compile(r'## Time Closed \(m/d/y\): '
r'(?P<date>\S+)'
r' \(h:m:s\.ms\) '
r'(?P<time>\S+)')
filename_regex = re.compile(r'## File Name (?P<filename>\S+)')
datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
else:
datetime1_regex = re.compile(r'-TimeCreated '
r'(?P<date>\S+) '
r'(?P<time>\S+)')
datetime2_regex = re.compile(r'-TimeClosed '
r'(?P<date>\S+) '
r'(?P<time>\S+)')
filename_regex = re.compile(r'-OriginalFileName '
r'"?(?P<filename>\S+)"?')
datetimeformat = '%Y/%m/%d %H:%M:%S'
matchtime1 = datetime1_regex.search(text_header).groupdict()
matchtime2 = datetime2_regex.search(text_header).groupdict()
matchfilename = filename_regex.search(text_header)
filename = matchfilename.groupdict()['filename']
if '## Time Closed File was not closed properly' in text_header:
warnings.warn('Text header of file %s does not contain recording '
'closed time. File was not closed properly.'
'' % filename)
datetime1 = datetime.datetime.strptime(matchtime1['date'] + ' '
+ matchtime1['time'],
datetimeformat)
datetime2 = datetime.datetime.strptime(matchtime2['date'] + ' '
+ matchtime2['time'],
datetimeformat)
output = {'recording_opened': datetime1,
'recording_closed': datetime2,
'file_created': datetime1,
'file_closed': datetime2,
'recording_file_name': filename}
return output
def __read_ncs_data_headers(self, filehandle, filename):
'''
Reads the .ncs data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .ncs file.
filename (string):
Name of the ncs file.
Returns:
dict of extracted data
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0][0]
sr = header_u4[0][1] # in Hz
t_start = timestamps[0] # in microseconds
# calculating corresponding time stamp of first sample, that was not
# recorded any more
# t_stop= time of first sample in last packet +(#samples per packet *
# conversion factor / sampling rate)
# conversion factor is needed as times are recorded in ms
t_stop = timestamps[-1] + (
(header_u4[-1][2]) * (
1 / self.ncs_time_unit.rescale(pq.s)).magnitude
/ header_u4[-1][1])
if channel_id in self.parameters_ncs:
raise ValueError(
'Detected multiple ncs files for channel_id %i.'
% channel_id)
else:
sampling_unit = [pq.CompoundUnit('%f*%s'
'' % (sr,
self.ncs_sr_unit.symbol))]
sampling_rate = sr * self.ncs_sr_unit
self.parameters_ncs[channel_id] = {'filename': filename,
't_start': t_start
* self.ncs_time_unit,
't_stop': t_stop
* self.ncs_time_unit,
'sampling_rate': sampling_rate,
'sampling_unit': sampling_unit,
'gaps': []}
return {channel_id: self.parameters_ncs[channel_id]}
def __read_nse_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
self.parameters_nse[channel_id] = {'filename': filename,
't_first': t_first
* self.nse_time_unit,
't_last': t_last
* self.nse_time_unit,
'cell_count': cell_count}
def __read_ntt_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
# spike_parameters = filehandle[0][3]
# else:
# t_first = None
# channel_id = None
# cell_count = 0
# # spike_parameters = None
#
# self._diagnostic_print('Empty file: No information
# contained in %s'%filename)
self.parameters_ntt[channel_id] = {'filename': filename,
't_first': t_first
* self.ntt_time_unit,
't_last': t_last
* self.nse_time_unit,
'cell_count': cell_count}
def __read_nev_data_header(self, filehandle, filename):
'''
Reads the .nev data block headers and stores the relevant information
in the
object's parameters_nev dictionary.
Args:
filehandle (file object):
Handle to the already opened .nev file.
filename (string):
Name of the nev file.
Returns:
-
'''
# Extracting basic recording events to be able to check recording
# consistency
if filename in self.parameters_nev:
raise ValueError(
'Detected multiple nev files of name %s.' % (filename))
else:
self.parameters_nev[filename] = {}
if 'Starting_Recording' in self.parameters_nev[filename]:
raise ValueError('Trying to read second nev file of name %s. '
' Only one can be handled.' % filename)
self.parameters_nev[filename]['Starting_Recording'] = []
self.parameters_nev[filename]['events'] = []
for event in filehandle:
# separately extracting 'Starting Recording'
if ((event[4] in [11, 19])
and (event[10].decode('latin-1') == 'Starting Recording')):
self.parameters_nev[filename]['Starting_Recording'].append(
event[3] * self.nev_time_unit)
# adding all events to parameter collection
self.parameters_nev[filename]['events'].append(
{'timestamp': event[3] * self.nev_time_unit,
'event_id': event[4],
'nttl': event[5],
'name': event[10].decode('latin-1')})
if len(self.parameters_nev[filename]['Starting_Recording']) < 1:
raise ValueError(
'No Event "Starting_Recording" detected in %s' % (
filename))
self.parameters_nev[filename]['t_start'] = min(
self.parameters_nev[filename]['Starting_Recording'])
# t_stop = time stamp of last event in file
self.parameters_nev[filename]['t_stop'] = max(
[e['timestamp'] for e in
self.parameters_nev[filename]['events']])
# extract all occurring event types (= combination of nttl,
# event_id and name/string)
event_types = copy.deepcopy(self.parameters_nev[filename]['events'])
for d in event_types:
d.pop('timestamp')
self.parameters_nev[filename]['event_types'] = [dict(y) for y in
{tuple(
x.items())
for x in
event_types}]
# ________________ File Checks __________________________________
def __ncs_packet_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails. Detected recording gaps are added to parameter_ncs
Args:
filehandle (file object):
Handle to the already opened .ncs file.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
# checking sampling rate of data packets
sr0 = header_u4[0, 1]
assert all(header_u4[:, 1] == sr0)
# checking channel id of data packets
channel_id = header_u4[0, 0]
assert all(header_u4[:, 0] == channel_id)
# time offset of data packets
# TODO: Check if there is a safer way to do the delta_t check for ncs
# data packets
# this is a not safe assumption, that the first two data packets have
# correct time stamps
delta_t = timestamps[1] - timestamps[0]
# valid samples of first data packet
temp_valid_samples = header_u4[0, 2]
# unit test
# time difference between packets corresponds to number of recorded
# samples
assert delta_t == (
temp_valid_samples / (
self.ncs_time_unit.rescale(pq.s).magnitude * sr0))
self._diagnostic_print('NCS packet check successful.')
def __nse_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NSE file check successful.')
def __nev_check(self, filehandle):
'''
Checks consistency of data in nev file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nev file.
'''
# this entry should always equal 2 (see Neuralynx File Description),
# but it is not. For me, this is 0.
assert all([f[2] == 2 or f[2] == 0 for f in filehandle])
# TODO: check with more nev files, if index 0,1,2,6,7,8 and 9 can be
# non-zero. Interpretation? Include in event extraction.
# only observed 0 for index 0,1,2,6,7,8,9 in nev files.
# If they are non-zero, this needs to be included in event extraction
assert all([f[0] == 0 for f in filehandle])
assert all([f[1] == 0 for f in filehandle])
assert all([f[2] in [0, 2] for f in filehandle])
assert all([f[6] == 0 for f in filehandle])
assert all([f[7] == 0 for f in filehandle])
assert all([f[8] == 0 for f in filehandle])
assert all([all(f[9] == 0) for f in filehandle])
self._diagnostic_print('NEV file check successful.')
def __ntt_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
# TODO: check this when first .ntt files are available
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NTT file check successful.')
def __ncs_gap_check(self, filehandle):
'''
Checks individual data blocks of ncs files for consistent starting
times with respect to sample count.
This covers intended recording gaps as well as shortened data packet,
which are incomplete
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
if channel_id not in self.parameters_ncs:
self.parameters_ncs[channel_id] = {}
# time stamps of data packets
delta_t = timestamps[1] - timestamps[0] # in microsec
data_packet_offsets = np.diff(timestamps) # in microsec
# check if delta_t corresponds to number of valid samples present in
# data packets
# NOTE: This also detects recording gaps!
valid_samples = header_u4[:-1, 2]
sampling_rate = header_u4[0, 1]
packet_checks = (valid_samples / (self.ncs_time_unit.rescale(
pq.s).magnitude * sampling_rate)) == data_packet_offsets
if not all(packet_checks):
if 'broken_packets' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['broken_packets'] = []
broken_packets = np.where(np.array(packet_checks) is False)[0]
for broken_packet in broken_packets:
self.parameters_ncs[channel_id]['broken_packets'].append(
(broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet]))
self._diagnostic_print('Detected broken packet in NCS file at '
'packet id %i (sample number %i '
'time offset id %i)'
'' % (broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet])
) # in microsec
# checking for irregular data packet durations -> gaps / shortened
# data packets
if not all(data_packet_offsets == delta_t):
if 'gaps' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['gaps'] = []
# gap identification by (sample of gap start, duration)
# gap packets
gap_packet_ids = np.where(data_packet_offsets != delta_t)[0]
for gap_packet_id in gap_packet_ids:
# skip if this packet starting time is known to be corrupted
# hoping no corruption and gap occurs simultaneously
# corrupted time stamp affects two delta_t comparisons:
if gap_packet_id in self.parameters_ncs[channel_id][
'invalid_first_samples'] \
or gap_packet_id + 1 in self.parameters_ncs[channel_id][
'invalid_first_samples']:
continue
gap_start = timestamps[
gap_packet_id] # t_start of last packet [microsec]
gap_stop = timestamps[
gap_packet_id + 1] # t_stop of first packet [microsec]
self.parameters_ncs[channel_id]['gaps'].append((gap_packet_id,
gap_start,
gap_stop)) #
# [,microsec,microsec]
self._diagnostic_print('Detected gap in NCS file between'
'sample time %i and %i (last correct '
'packet id %i)' % (gap_start, gap_stop,
gap_packet_id))
def __ncs_invalid_first_sample_check(self, filehandle):
'''
Checks data blocks of ncs files for corrupted starting times indicating
a missing first sample in the data packet. These are then excluded from
the gap check, but ignored for further analysis.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
self.parameters_ncs[channel_id]['invalid_first_samples'] = []
# checking if first bit of timestamp is 1, which indicates error
invalid_packet_ids = np.where(timestamps >= 2 ** 55)[0]
if len(invalid_packet_ids) > 0:
warnings.warn('Invalid first sample(s) detected in ncs file'
'(packet id(s) %i)! This error is ignored in'
'subsequent routines.' % (invalid_packet_ids))
self.parameters_ncs[channel_id][
'invalid_first_samples'] = invalid_packet_ids
# checking consistency of data around corrupted packet time
for invalid_packet_id in invalid_packet_ids:
if invalid_packet_id < 2 or invalid_packet_id > len(
filehandle) - 2:
raise ValueError(
'Corrupted ncs data packet at the beginning'
'or end of file.')
elif (timestamps[invalid_packet_id + 1] - timestamps[
invalid_packet_id - 1] != 2 * (
timestamps[invalid_packet_id - 1] - timestamps[
invalid_packet_id - 2])):
raise ValueError('Starting times of ncs data packets around'
'corrupted data packet are not '
'consistent!')
# Supplementory Functions
def get_channel_id_by_file_name(self, filename):
"""
Checking parameters of NCS, NSE and NTT Files for given filename and
return channel_id if result is consistent
:param filename:
:return:
"""
channel_ids = []
channel_ids += [k for k in self.parameters_ncs if
self.parameters_ncs[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_nse if
self.parameters_nse[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_ntt if
self.parameters_ntt[k]['filename'] == filename]
if len(np.unique(np.asarray(channel_ids))) == 1:
return channel_ids[0]
elif len(channel_ids) > 1:
raise ValueError(
'Ambiguous channel ids detected. Filename %s is associated'
' to different channels of NCS and NSE and NTT %s'
'' % (filename, channel_ids))
else: # if filename was not detected
return None
def hashfile(self, afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def datesizefile(self, filename):
return str(os.path.getmtime(filename)) + '_' + str(
os.path.getsize(filename))
def _diagnostic_print(self, text):
'''
Print a diagnostic message.
Args:
text (string):
Diagnostic text to print.
Returns:
-
'''
if self._print_diagnostic:
print('NeuralynxIO: ' + text)
| bsd-3-clause |
captainpete/rethinkdb | external/v8_3.30.33.16/build/landmines.py | 49 | 4879 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as a hook. If it detects that the build should
be clobbered, it will touch the file <build_dir>/.landmine_triggered. The
various build scripts will then check for the presence of this file and clobber
accordingly. The script will also emit the reasons for the clobber to stdout.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import logging
import optparse
import os
import sys
import subprocess
import time
import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_target_build_dir(build_tool, target):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out\Release'
'/mnt/data/b/build/slave/linux/build/src/out/Debug'
'/b/build/slave/ios_rel_device/build/src/xcodebuild/Release-iphoneos'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(SRC_DIR, 'xcodebuild', target)
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
ret = os.path.join(SRC_DIR, 'out', target)
elif build_tool in ['msvs', 'vs', 'ib']:
ret = os.path.join(SRC_DIR, 'build', target)
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def set_up_landmines(target, new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_target_build_dir(landmine_utils.builder(), target)
landmines_path = os.path.join(out_dir, '.landmines')
if not os.path.exists(out_dir):
return
if not os.path.exists(landmines_path):
print "Landmines tracker didn't exists."
# FIXME(machenbach): Clobber deletes the .landmines tracker. Difficult
# to know if we are right after a clobber or if it is first-time landmines
# deployment. Also, a landmine-triggered clobber right after a clobber is
# not possible. Different clobber methods for msvs, xcode and make all
# have different blacklists of files that are not deleted.
if os.path.exists(landmines_path):
triggered = os.path.join(out_dir, '.landmines_triggered')
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
with open(triggered, 'w') as f:
f.writelines(diff)
print "Setting landmine: %s" % triggered
elif os.path.exists(triggered):
# Remove false triggered landmines.
os.remove(triggered)
print "Removing landmine: %s" % triggered
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns a list of landmine emitting scripts."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
return options.landmine_scripts + [extra_script]
else:
return options.landmine_scripts
def main():
landmine_scripts = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
for target in ('Debug', 'Release'):
set_up_landmines(target, landmines)
return 0
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
dhutchis/accumulo | test/system/bench/cloudstone5/cloudstone5.py | 7 | 1070 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from lib import cloudshell
from lib.TableSplitsBenchmark import TableSplitsBenchmark
class CloudStone5(TableSplitsBenchmark):
"Creates a table with many splits"
def suite():
result = unittest.TestSuite([
CloudStone5(),
])
return result
| apache-2.0 |
blamedcloud/AISuite | weight_heuristic.py | 2 | 2291 | #!/usr/bin/env python
#weight_heuristic.py
import random
from alphabeta import UPPER_BOUND
from alphabeta import LOWER_BOUND
class WeightHeuristic(object):
def __init__(self, weight_m):
self.weights = weight_m
self.wins = 0
self.losses = 0
def __call__(self, game_state):
value = 0
state = self.parse(game_state)
winner = state[0]
turn = state[1]
matrix = state[2]
#check if the game is over
if winner == 1:
return UPPER_BOUND
elif winner == 2:
return LOWER_BOUND
elif winner == 0:
return 0
#evaluate based on weights
for y in range(len(matrix)):
for x in range(len(matrix[y])):
token = matrix[y][x]
value += self.weights[token][y][x]
#respect the bounds
if value >= UPPER_BOUND:
value = UPPER_BOUND-1
elif value <= LOWER_BOUND:
value = LOWER_BOUND+1
return value
def get_weights(self):
return self.weights
#method to parse the game_state into a tuple
#containing (winner, turn, matrix)
#parse : Game_State -> (Int, Int, List)
def parse(self, game_state):
pass
def record_game(self, win = False): # this counts draws as losses, which should be fine since it is across the board.
if win:
self.wins += 1
else:
self.losses += 1
def get_fitness(self):
if self.wins + self.losses == 0:
return 0
else:
return float(self.wins)/float(self.wins + self.losses)
def reproduce(self, other, mutation_rate = .001):
child_w = {}
ow = other.get_weights()
for token in self.weights:
matrix = []
for y in range(len(self.weights[token])):
row = []
for x in range(len(self.weights[token][y])):
new_value = 0
if random.random() < mutation_rate: # mutation occured
new_value = random.randint(LOWER_BOUND,UPPER_BOUND)
else:
my_w = self.weights[token][y][x]
other_w = ow[token][y][x]
if my_w*other_w < 0: # they have opposite signs.
new_value = random.choice([my_w,other_w])
elif my_w*other_w > 0: # they have the same sign.
new_value = (my_w + other_w)/2
else: # at least one is zero.
if my_w != 0:
new_value = my_w
else:
new_value = other_w
row += [new_value]
matrix += [row]
child_w[token] = matrix
return self.__class__(child_w)
| mit |
bkirui/odoo | addons/account/company.py | 384 | 2814 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'),
'tax_calculation_rounding_method': fields.selection([
('round_per_line', 'Round per Line'),
('round_globally', 'Round Globally'),
], 'Tax Calculation Rounding Method',
help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."),
'overdue_msg': fields.text('Overdue Payments Message', translate=True),
}
_defaults = {
'expects_chart_of_accounts': True,
'tax_calculation_rounding_method': 'round_per_line',
'overdue_msg': '''Dear Sir/Madam,
Our records indicate that some payments on your account are still due. Please find details below.
If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below.
If you have any queries regarding your account, Please contact us.
Thank you in advance for your cooperation.
Best Regards,'''
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jaapz/werkzeug | examples/couchy/views.py | 44 | 1999 | from werkzeug.utils import redirect
from werkzeug.exceptions import NotFound
from couchy.utils import render_template, expose, \
validate_url, url_for, Pagination
from couchy.models import URL
@expose('/')
def new(request):
error = url = ''
if request.method == 'POST':
url = request.form.get('url')
alias = request.form.get('alias')
if not validate_url(url):
error = "I'm sorry but you cannot shorten this URL."
elif alias:
if len(alias) > 140:
error = 'Your alias is too long'
elif '/' in alias:
error = 'Your alias might not include a slash'
elif URL.load(alias):
error = 'The alias you have requested exists already'
if not error:
url = URL(target=url, public='private' not in request.form, shorty_id=alias if alias else None)
url.store()
uid = url.id
return redirect(url_for('display', uid=uid))
return render_template('new.html', error=error, url=url)
@expose('/display/<uid>')
def display(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return render_template('display.html', url=url)
@expose('/u/<uid>')
def link(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return redirect(url.target, 301)
@expose('/list/', defaults={'page': 1})
@expose('/list/<int:page>')
def list(request, page):
def wrap(doc):
data = doc.value
data['_id'] = doc.id
return URL.wrap(data)
code = '''function(doc) { if (doc.public){ map([doc._id], doc); }}'''
docResults = URL.query(code)
results = [wrap(doc) for doc in docResults]
pagination = Pagination(results, 1, page, 'list')
if pagination.page > 1 and not pagination.entries:
raise NotFound()
return render_template('list.html', pagination=pagination)
def not_found(request):
return render_template('not_found.html')
| bsd-3-clause |
nesi/easybuild-framework | easybuild/framework/easyconfig/constants.py | 6 | 2350 | #
# Copyright 2013-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
#
"""
Easyconfig constants module that provides all constants that can
be used within an Easyconfig file.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import platform
from vsc.utils import fancylogger
from easybuild.tools.systemtools import get_shared_lib_ext, get_os_name, get_os_type, get_os_version
_log = fancylogger.getLogger('easyconfig.constants', fname=False)
EXTERNAL_MODULE_MARKER = 'EXTERNAL_MODULE'
# constants that can be used in easyconfig
EASYCONFIG_CONSTANTS = {
'EXTERNAL_MODULE': (EXTERNAL_MODULE_MARKER, "External module marker"),
'SYS_PYTHON_VERSION': (platform.python_version(), "System Python version (platform.python_version())"),
'OS_TYPE': (get_os_type(), "System type (e.g. 'Linux' or 'Darwin')"),
'OS_NAME': (get_os_name(), "System name (e.g. 'fedora' or 'RHEL')"),
'OS_VERSION': (get_os_version(), "System version"),
}
def constant_documentation():
"""Generate the easyconfig constant documentation"""
indent_l0 = " " * 2
indent_l1 = indent_l0 + " " * 2
doc = []
doc.append("Constants that can be used in easyconfigs")
for cst, (val, descr) in EASYCONFIG_CONSTANTS.items():
doc.append('%s%s: %s (%s)' % (indent_l1, cst, val, descr))
return "\n".join(doc)
| gpl-2.0 |
bbc/kamaelia | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/File/WholeFileWriter.py | 9 | 2403 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""\
=======================
Whole File Writer
=======================
This component accepts file creation jobs and signals the completion of each
jobs. Creation jobs consist of a list [ filename, contents ] added to "inbox".
Completion signals consist of the string "done" being sent to "outbox".
All jobs are processed sequentially.
This component does not terminate.
"""
from Axon.Component import component
class WholeFileWriter(component):
"""\
WholeFileWriter() -> component that creates and writes files
Uses [ filename, contents ] structure to file creation messages in "inbox"
"""
Inboxes = {
"inbox" : "file creation jobs",
"control" : "UNUSED"
}
Outboxes = {
"outbox" : "filename written",
"signal" : "UNUSED"
}
def __init__(self):
super(WholeFileWriter, self).__init__()
def writeFile(self, filename, data):
"""Writes the data to a new file"""
file = open(filename, "wb", 0)
data = file.write(data)
file.close()
def main(self):
"""Main loop"""
while 1:
yield 1
if self.dataReady("inbox"):
command = self.recv("inbox")
self.writeFile(command[0], command[1])
self.send(command[0], "outbox")
else:
self.pause()
__kamaelia_components__ = ( WholeFileWriter, )
| apache-2.0 |
polojacky/ehfpi | ehf/rest_framework/tests/test_nullable_fields.py | 16 | 1069 | from django.core.urlresolvers import reverse
from rest_framework.compat import patterns, url
from rest_framework.test import APITestCase
from rest_framework.tests.models import NullableForeignKeySource
from rest_framework.tests.serializers import NullableFKSourceSerializer
from rest_framework.tests.views import NullableFKSourceDetail
urlpatterns = patterns(
'',
url(r'^objects/(?P<pk>\d+)/$', NullableFKSourceDetail.as_view(), name='object-detail'),
)
class NullableForeignKeyTests(APITestCase):
"""
DRF should be able to handle nullable foreign keys when a test
Client POST/PUT request is made with its own serialized object.
"""
urls = 'rest_framework.tests.test_nullable_fields'
def test_updating_object_with_null_fk(self):
obj = NullableForeignKeySource(name='example', target=None)
obj.save()
serialized_data = NullableFKSourceSerializer(obj).data
response = self.client.put(reverse('object-detail', args=[obj.pk]), serialized_data)
self.assertEqual(response.data, serialized_data)
| apache-2.0 |
tsdmgz/ansible | lib/ansible/utils/module_docs_fragments/aci.py | 36 | 2382 | # -*- coding: utf-8 -*-
# Copyright 2017 Dag Wieers <[email protected]>
# Copyright 2017 Swetha Chunduri (@schunduri)
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = '''
options:
hostname:
description:
- IP Address or hostname of APIC resolvable by Ansible control host.
required: yes
aliases: [ host ]
username:
description:
- The username to use for authentication.
required: yes
default: admin
aliases: [ user ]
password:
description:
- The password to use for authentication.
required: yes
timeout:
description:
- The socket level timeout in seconds.
default: 30
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
default: 'yes'
type: bool
use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
notes:
- By default, if an environment variable C(<protocol>_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see `setting the environment
<http://docs.ansible.com/playbooks_environment.html>`_),
or by using the C(use_proxy) option.
- HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
your proxy environment for both protocols is correct.
'''
| gpl-3.0 |
aabbox/kbengine | kbe/src/lib/python/Lib/test/test_asyncio/test_windows_events.py | 60 | 4592 | import os
import sys
import unittest
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
import _winapi
import asyncio
from asyncio import _overlapped
from asyncio import test_utils
from asyncio import windows_events
class UpperProto(asyncio.Protocol):
def __init__(self):
self.buf = []
def connection_made(self, trans):
self.trans = trans
def data_received(self, data):
self.buf.append(data)
if b'\n' in data:
self.trans.write(b''.join(self.buf).upper())
self.trans.close()
class ProactorTests(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.ProactorEventLoop()
self.set_event_loop(self.loop)
def test_close(self):
a, b = self.loop._socketpair()
trans = self.loop._make_socket_transport(a, asyncio.Protocol())
f = asyncio.async(self.loop.sock_recv(b, 100))
trans.close()
self.loop.run_until_complete(f)
self.assertEqual(f.result(), b'')
b.close()
def test_double_bind(self):
ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid()
server1 = windows_events.PipeServer(ADDRESS)
with self.assertRaises(PermissionError):
windows_events.PipeServer(ADDRESS)
server1.close()
def test_pipe(self):
res = self.loop.run_until_complete(self._test_pipe())
self.assertEqual(res, 'done')
def _test_pipe(self):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
yield from self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
[server] = yield from self.loop.start_serving_pipe(
UpperProto, ADDRESS)
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
for i in range(5):
stream_reader = asyncio.StreamReader(loop=self.loop)
protocol = asyncio.StreamReaderProtocol(stream_reader)
trans, proto = yield from self.loop.create_pipe_connection(
lambda: protocol, ADDRESS)
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream_reader, trans))
for i, (r, w) in enumerate(clients):
w.write('lower-{}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
response = yield from r.readline()
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
w.close()
server.close()
with self.assertRaises(FileNotFoundError):
yield from self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
return 'done'
def test_wait_for_handle(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with 0.5s timeout;
# result should be False at timeout
fut = self.loop._proactor.wait_for_handle(event, 0.5)
start = self.loop.time()
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertFalse(fut.result())
self.assertTrue(0.48 < elapsed < 0.9, elapsed)
_overlapped.SetEvent(event)
# Wait for for set event;
# result should be True immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
start = self.loop.time()
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertTrue(fut.result())
self.assertTrue(0 <= elapsed < 0.3, elapsed)
# Tulip issue #195: cancelling a done _WaitHandleFuture must not crash
fut.cancel()
def test_wait_for_handle_cancel(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with a cancelled future;
# CancelledError should be raised immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
fut.cancel()
start = self.loop.time()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertTrue(0 <= elapsed < 0.1, elapsed)
# Tulip issue #195: cancelling a _WaitHandleFuture twice must not crash
fut = self.loop._proactor.wait_for_handle(event)
fut.cancel()
fut.cancel()
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
grlee77/scipy | scipy/optimize/_lsq/trf.py | 21 | 19479 | """Trust Region Reflective algorithm for least-squares optimization.
The algorithm is based on ideas from paper [STIR]_. The main idea is to
account for the presence of the bounds by appropriate scaling of the variables (or,
equivalently, changing a trust-region shape). Let's introduce a vector v:
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
| 1, otherwise
where g is the gradient of a cost function and lb, ub are the bounds. Its
components are distances to the bounds at which the anti-gradient points (if
this distance is finite). Define a scaling matrix D = diag(v**0.5).
First-order optimality conditions can be stated as
D^2 g(x) = 0.
Meaning that components of the gradient should be zero for strictly interior
variables, and components must point inside the feasible region for variables
on the bound.
Now consider this system of equations as a new optimization problem. If the
point x is strictly interior (not on the bound), then the left-hand side is
differentiable and the Newton step for it satisfies
(D^2 H + diag(g) Jv) p = -D^2 g
where H is the Hessian matrix (or its J^T J approximation in least squares),
Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
elements of matrix C = diag(g) Jv are non-negative. Introduce the change
of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables,
we have a Newton step satisfying
B_h p_h = -g_h,
where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
to "hat" variables. To guarantee global convergence we formulate a
trust-region problem based on the Newton step in the new variables:
0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
problem is
0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
Here, the meaning of the matrix D becomes more clear: it alters the shape
of a trust-region, such that large steps towards the bounds are not allowed.
In the implementation, the trust-region problem is solved in "hat" space,
but handling of the bounds is done in the original space (see below and read
the code).
The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
must keep iterates strictly feasible (to satisfy aforementioned
differentiability), the parameter theta controls step back from the boundary
(see the code for details).
The algorithm does another important trick. If the trust-region solution
doesn't fit into the bounds, then a reflected (from a firstly encountered
bound) search direction is considered. For motivation and analysis refer to
[STIR]_ paper (and other papers of the authors). In practice, it doesn't need
a lot of justifications, the algorithm simply chooses the best step among
three: a constrained trust-region step, a reflected step and a constrained
Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
space).
Another feature is that a trust-region radius control strategy is modified to
account for appearance of the diagonal C matrix (called diag_h in the code).
Note that all described peculiarities are completely gone as we consider
problems without bounds (the algorithm becomes a standard trust-region type
algorithm very similar to ones implemented in MINPACK).
The implementation supports two methods of solving the trust-region problem.
The first, called 'exact', applies SVD on Jacobian and then solves the problem
very accurately using the algorithm described in [JJMore]_. It is not
applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
approach (sometimes called "indefinite dogleg"), where the problem is solved
in a subspace spanned by the gradient and the approximate Gauss-Newton step
found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
reformulated as a 4th order algebraic equation and solved very accurately by
``numpy.roots``. The subspace approach allows to solve very large problems
(up to couple of millions of residuals on a regular PC), provided the Jacobian
matrix is sufficiently sparse.
References
----------
.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
"""
import numpy as np
from numpy.linalg import norm
from scipy.linalg import svd, qr
from scipy.sparse.linalg import lsmr
from scipy.optimize import OptimizeResult
from .common import (
step_size_to_bound, find_active_constraints, in_bounds,
make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
print_iteration_nonlinear)
def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
# For efficiency, it makes sense to run the simplified version of the
# algorithm when no bounds are imposed. We decided to write the two
# separate functions. It violates the DRY principle, but the individual
# functions are kept the most readable.
if np.all(lb == -np.inf) and np.all(ub == np.inf):
return trf_no_bounds(
fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
else:
return trf_bounds(
fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
"""Select the best step according to Trust Region Reflective algorithm."""
if in_bounds(x + p, lb, ub):
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
return p, p_h, -p_value
p_stride, hits = step_size_to_bound(x, p, lb, ub)
# Compute the reflected direction.
r_h = np.copy(p_h)
r_h[hits.astype(bool)] *= -1
r = d * r_h
# Restrict trust-region step, such that it hits the bound.
p *= p_stride
p_h *= p_stride
x_on_bound = x + p
# Reflected direction will cross first either feasible region or trust
# region boundary.
_, to_tr = intersect_trust_region(p_h, r_h, Delta)
to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
# Find lower and upper bounds on a step size along the reflected
# direction, considering the strict feasibility requirement. There is no
# single correct way to do that, the chosen approach seems to work best
# on test problems.
r_stride = min(to_bound, to_tr)
if r_stride > 0:
r_stride_l = (1 - theta) * p_stride / r_stride
if r_stride == to_bound:
r_stride_u = theta * to_bound
else:
r_stride_u = to_tr
else:
r_stride_l = 0
r_stride_u = -1
# Check if reflection step is available.
if r_stride_l <= r_stride_u:
a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
r_stride, r_value = minimize_quadratic_1d(
a, b, r_stride_l, r_stride_u, c=c)
r_h *= r_stride
r_h += p_h
r = r_h * d
else:
r_value = np.inf
# Now correct p_h to make it strictly interior.
p *= theta
p_h *= theta
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
ag_h = -g_h
ag = d * ag_h
to_tr = Delta / norm(ag_h)
to_bound, _ = step_size_to_bound(x, ag, lb, ub)
if to_bound < to_tr:
ag_stride = theta * to_bound
else:
ag_stride = to_tr
a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
ag_h *= ag_stride
ag *= ag_stride
if p_value < r_value and p_value < ag_value:
return p, p_h, -p_value
elif r_value < p_value and r_value < ag_value:
return r, r_h, -r_value
else:
return ag, ag_h, -ag_value
def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
v, dv = CL_scaling_vector(x, g, lb, ub)
v[dv != 0] *= scale_inv[dv != 0]
Delta = norm(x0 * scale_inv / v**0.5)
if Delta == 0:
Delta = 1.0
g_norm = norm(g * v, ord=np.inf)
f_augmented = np.zeros((m + n))
if tr_solver == 'exact':
J_augmented = np.empty((m + n, n))
elif tr_solver == 'lsmr':
reg_term = 0.0
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
v, dv = CL_scaling_vector(x, g, lb, ub)
g_norm = norm(g * v, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
# Now compute variables in "hat" space. Here, we also account for
# scaling introduced by `x_scale` parameter. This part is a bit tricky,
# you have to write down the formulas and see how the trust-region
# problem is formulated when the two types of scaling are applied.
# The idea is that first we apply `x_scale` and then apply Coleman-Li
# approach in the new variables.
# v is recomputed in the variables after applying `x_scale`, note that
# components which were identically 1 not affected.
v[dv != 0] *= scale_inv[dv != 0]
# Here, we apply two types of scaling.
d = v**0.5 * scale
# C = diag(g * scale) Jv
diag_h = g * dv * scale
# After all this has been done, we continue normally.
# "hat" gradient.
g_h = d * g
f_augmented[:m] = f
if tr_solver == 'exact':
J_augmented[:m] = J * d
J_h = J_augmented[:m] # Memory view.
J_augmented[m:] = np.diag(diag_h**0.5)
U, s, V = svd(J_augmented, full_matrices=False)
V = V.T
uf = U.T.dot(f_augmented)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S) # LinearOperator does dot too.
B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
g_S = S.T.dot(g_h)
# theta controls step back step ratio from the bounds.
theta = max(0.995, 1 - g_norm)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
p_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
p_h = S.dot(p_S)
p = d * p_h # Trust-region solution in the original space.
step, step_h, predicted_reduction = select_step(
x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
alpha *= Delta / Delta_new
Delta = Delta_new
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv)
if Delta == 0:
Delta = 1.0
if tr_solver == 'lsmr':
reg_term = 0
damp = tr_options.pop('damp', 0.0)
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
d = scale
g_h = d * g
if tr_solver == 'exact':
J_h = J * d
U, s, V = svd(J_h, full_matrices=False)
V = V.T
uf = U.T.dot(f)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
damp_full = (damp**2 + reg_term)**0.5
gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S)
B_S = np.dot(JS.T, JS)
g_S = S.T.dot(g_h)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
step_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
step_h = S.dot(p_S)
predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
step = d * step_h
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
alpha *= Delta / Delta_new
Delta = Delta_new
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = np.zeros_like(x)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
| bsd-3-clause |
adlius/osf.io | api/base/exceptions.py | 2 | 12106 | from past.builtins import basestring
from rest_framework import status as http_status
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException, AuthenticationFailed, ErrorDetail
def get_resource_object_member(error_key, context):
from api.base.serializers import RelationshipField
field = context['view'].serializer_class._declared_fields.get(error_key, None)
if field:
return 'relationships' if isinstance(field, RelationshipField) else 'attributes'
# If field cannot be found (where read/write operations have different serializers,
# or fields serialized on __init__, assume error was in 'attributes' by default
return 'attributes'
def dict_error_formatting(errors, context, index=None):
"""
Formats all dictionary error messages for both single and bulk requests
"""
formatted_error_list = []
# Error objects may have the following members. Title and id removed to avoid clash with "title" and "id" field errors.
top_level_error_keys = ['links', 'status', 'code', 'detail', 'source', 'meta']
# Resource objects must contain at least 'id' and 'type'
resource_object_identifiers = ['type', 'id']
if index is None:
index = ''
else:
index = str(index) + '/'
for error_key, error_description in errors.items():
if isinstance(error_description, ErrorDetail):
error_description = [error_description]
if error_key in top_level_error_keys:
formatted_error_list.extend({error_key: description} for description in error_description)
elif error_key in resource_object_identifiers:
formatted_error_list.extend([{'source': {'pointer': '/data/{}'.format(index) + error_key}, 'detail': reason} for reason in error_description])
elif error_key == 'non_field_errors':
formatted_error_list.extend([{'detail': description for description in error_description}])
elif isinstance(error_description, list):
for error in error_description:
formatted_error_list += format_validators_errors(error, error_key, context, index)
else:
formatted_error_list += format_validators_errors(error_description, error_key, context, index)
return formatted_error_list
def format_validators_errors(error_description, error_key, context, index):
errors = []
if isinstance(error_description, ErrorDetail):
errors.append({
'source': {
'pointer': f'/data/{index}{get_resource_object_member(error_key, context)}/' + error_key,
},
'detail': error_description,
})
else:
for key, value in error_description.items():
errors.append({
'source': {
'pointer': f'/data/{index}{get_resource_object_member(error_key, context)}/' + error_key,
},
'detail': value,
})
return errors
def json_api_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array
"""
# We're deliberately not stripping html from exception detail.
# This creates potential vulnerabilities to script injection attacks
# when returning raw user input into error messages.
#
# Fortunately, Django's templating language strips markup bu default,
# but if our frontend changes we may lose that protection.
# TODO: write tests to ensure our html frontend strips html
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
errors = []
if response:
message = response.data
if isinstance(exc, TwoFactorRequiredError):
response['X-OSF-OTP'] = 'required; app'
if isinstance(exc, JSONAPIException):
errors.extend([{'source': exc.source or {}, 'detail': exc.detail, 'meta': exc.meta or {}}])
elif isinstance(message, dict):
errors.extend(dict_error_formatting(message, context, index=None))
else:
if isinstance(message, basestring):
message = [message]
for index, error in enumerate(message):
if isinstance(error, dict):
errors.extend(dict_error_formatting(error, context, index=index))
else:
errors.append({'detail': error})
response.data = {'errors': errors}
return response
def format_validation_error(e):
error_list = []
for key, value in e.message_dict.items():
error_list.append('There was an issue with the {} field. {}'.format(key, value[0]))
return error_list
class EndpointNotImplementedError(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('This endpoint is not yet implemented.')
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service is unavailable at this time.')
class JSONAPIException(APIException):
"""Inherits from the base DRF API exception and adds extra metadata to support JSONAPI error objects
:param str detail: a human-readable explanation specific to this occurrence of the problem
:param dict source: A dictionary containing references to the source of the error.
See http://jsonapi.org/format/#error-objects.
Example: ``source={'pointer': '/data/attributes/title'}``
:param dict meta: A meta object containing non-standard meta info about the error.
"""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, source=None, meta=None):
super(JSONAPIException, self).__init__(detail=detail)
self.source = source
self.meta = meta
# Custom Exceptions the Django Rest Framework does not support
class Gone(JSONAPIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
def UserGone(user):
return Gone(
detail='The requested user is no longer available.',
meta={
'full_name': user.fullname, 'family_name': user.family_name, 'given_name': user.given_name,
'middle_names': user.middle_names, 'profile_image': user.profile_image_url(),
},
)
class Conflict(JSONAPIException):
status_code = status.HTTP_409_CONFLICT
default_detail = ('Resource identifier does not match server endpoint.')
class JSONAPIParameterException(JSONAPIException):
def __init__(self, detail=None, parameter=None):
source = {
'parameter': parameter,
}
super(JSONAPIParameterException, self).__init__(detail=detail, source=source)
class JSONAPIAttributeException(JSONAPIException):
def __init__(self, detail=None, attribute=None):
source = {
'pointer': '/data/attributes/{}'.format(attribute),
}
super(JSONAPIAttributeException, self).__init__(detail=detail, source=source)
class InvalidQueryStringError(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query string parameter."""
default_detail = 'Query string contains an invalid value.'
status_code = http_status.HTTP_400_BAD_REQUEST
class InvalidFilterOperator(JSONAPIParameterException):
"""Raised when client passes an invalid operator to a query param filter."""
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, value=None, valid_operators=('eq', 'lt', 'lte', 'gt', 'gte', 'contains', 'icontains')):
if value and not detail:
valid_operators = ', '.join(valid_operators)
detail = "Value '{0}' is not a supported filter operator; use one of {1}.".format(
value,
valid_operators,
)
super(InvalidFilterOperator, self).__init__(detail=detail, parameter='filter')
class InvalidFilterValue(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query param filter."""
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, value=None, field_type=None):
if not detail:
detail = "Value '{0}' is not valid".format(value)
if field_type:
detail += ' for a filter on type {0}'.format(
field_type,
)
detail += '.'
super(InvalidFilterValue, self).__init__(detail=detail, parameter='filter')
class InvalidFilterError(JSONAPIParameterException):
"""Raised when client passes an malformed filter in the query string."""
default_detail = _('Query string contains a malformed filter.')
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None):
super(InvalidFilterError, self).__init__(detail=detail, parameter='filter')
class InvalidFilterComparisonType(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not a date or number type"""
default_detail = _('Comparison operators are only supported for dates and numbers.')
status_code = http_status.HTTP_400_BAD_REQUEST
class InvalidFilterMatchType(JSONAPIParameterException):
"""Raised when client tries to do a match filter on a field that is not a string or a list"""
default_detail = _('Match operators are only supported for strings and lists.')
status_code = http_status.HTTP_400_BAD_REQUEST
class InvalidFilterFieldError(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not supported"""
default_detail = _('Query contained one or more filters for invalid fields.')
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, parameter=None, value=None):
if value and not detail:
detail = "Value '{}' is not a filterable field.".format(value)
super(InvalidFilterFieldError, self).__init__(detail=detail, parameter=parameter)
class UnconfirmedAccountError(APIException):
status_code = 400
default_detail = _('Please confirm your account before using the API.')
class UnclaimedAccountError(APIException):
status_code = 400
default_detail = _('Please claim your account before using the API.')
class DeactivatedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a deactivated account is not allowed.')
class MergedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a merged account is not allowed.')
class InvalidAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with an invalid account is not allowed.')
class TwoFactorRequiredError(AuthenticationFailed):
default_detail = _('Must specify two-factor authentication OTP code.')
pass
class InvalidModelValueError(JSONAPIException):
status_code = 400
default_detail = _('Invalid value in POST/PUT/PATCH request.')
class TargetNotSupportedError(Exception):
"""Raised if a TargetField is used for a resource that isn't supported."""
pass
class RelationshipPostMakesNoChanges(Exception):
"""Raised when a post is on a relationship that already exists, so view can return a 204"""
pass
class NonDescendantNodeError(APIException):
"""Raised when a client attempts to associate a non-descendant node with a view only link"""
status_code = 400
default_detail = _('The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.')
def __init__(self, node_id, detail=None):
if not detail:
detail = self.default_detail.format(node_id)
super(NonDescendantNodeError, self).__init__(detail=detail)
| apache-2.0 |
fosfataza/protwis | common/sequence_signature.py | 1 | 29836 | """
A module for generating sequence signatures for the given two sets of proteins.
"""
from django.conf import settings
from django.core import exceptions
from alignment.functions import strip_html_tags, get_format_props
Alignment = getattr(__import__(
'common.alignment_' + settings.SITE_NAME,
fromlist=['Alignment']
), 'Alignment')
from common.definitions import AMINO_ACIDS, AMINO_ACID_GROUPS, AMINO_ACID_GROUP_NAMES
from protein.models import Protein, ProteinConformation
from residue.models import Residue
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import re
import time
class SequenceSignature:
"""
A class handling the sequence signature.
"""
def __init__(self):
self.aln_pos = Alignment()
self.aln_neg = Alignment()
self.features_normalized_pos = OrderedDict()
self.features_normalized_neg = OrderedDict()
self.features_frequency_difference = OrderedDict()
self.features_frequency_diff_display = []
self.freq_cutoff = 30
self.common_gn = OrderedDict()
def setup_alignments(self, segments, protein_set_positive = None, protein_set_negative = None):
if protein_set_positive:
self.aln_pos.load_proteins(protein_set_positive)
if protein_set_negative:
self.aln_neg.load_proteins(protein_set_negative)
# In case positive and negative sets come from different classes
# unify the numbering schemes
self.common_schemes = self.merge_numbering_schemes()
self.aln_pos.numbering_schemes = self.common_schemes
self.aln_neg.numbering_schemes = self.common_schemes
# now load the segments and generic numbers
self.aln_pos.load_segments(segments)
self.aln_neg.load_segments(segments)
self.aln_pos.build_alignment()
self.aln_neg.build_alignment()
self.common_gn = deepcopy(self.aln_pos.generic_numbers)
for scheme in self.aln_neg.numbering_schemes:
for segment in self.aln_neg.segments:
for pos in self.aln_neg.generic_numbers[scheme[0]][segment].items():
if pos[0] not in self.common_gn[scheme[0]][segment].keys():
self.common_gn[scheme[0]][segment][pos[0]] = pos[1]
self.common_gn[scheme[0]][segment] = OrderedDict(sorted(
self.common_gn[scheme[0]][segment].items(),
key=lambda x: x[0].split('x')
))
self.common_segments = OrderedDict([
(x, sorted(list(set(self.aln_pos.segments[x]) | set(self.aln_neg.segments[x])), key=lambda x: x.split('x'))) for x in self.aln_neg.segments
])
# tweaking alignment
self._update_alignment(self.aln_pos)
self.aln_pos.calculate_statistics()
# tweaking consensus seq
self._update_consensus_sequence(self.aln_pos)
# tweaking negative alignment
self._update_alignment(self.aln_neg)
self.aln_neg.calculate_statistics()
# tweaking consensus seq
self._update_consensus_sequence(self.aln_neg)
def _update_alignment(self, alignment):
for prot in alignment.proteins:
for seg, resi in prot.alignment.items():
consensus = []
aln_list = [x[0] for x in resi]
aln_dict = dict([
(x[0], x) for x in resi
])
for pos in self.common_segments[seg]:
if pos not in aln_list:
consensus.append([pos, False, '-', 0])
else:
consensus.append(aln_dict[pos])
prot.alignment[seg] = consensus
def _update_consensus_sequence(self, alignment):
for seg, resi in alignment.consensus.items():
consensus = OrderedDict()
aln_list = [x for x in resi.keys()]
aln_dict = dict([
(x, resi[x]) for x in resi.keys()
])
for pos in self.common_segments[seg]:
if pos not in aln_list:
consensus[pos] = ['_', 0, 100]
else:
consensus[pos] = aln_dict[pos]
alignment.consensus[seg] = consensus
def _convert_feature_stats(self, fstats, aln):
tmp_fstats = []
for row in range(len(AMINO_ACID_GROUPS.keys())):
tmp_row = []
for segment in self.common_segments:
print(fstats[segment][row])
tmp_row.append([[
str(x),
str(int(x/10)),
] for x in fstats[segment][row]])
tmp_fstats.append(tmp_row)
aln.feature_stats = tmp_fstats
def setup_alignments_from_selection(self, positive_selection, negative_selection):
"""
The function gathers necessary information from provided selections
and runs the calculations of the sequence alignments independently for
both protein sets. It also finds the common set of residue positions.
Arguments:
positive_selection {Selection} -- selection containing first group of proteins
negative_selection {[type]} -- selction containing second group of proteins along with the user-selcted sequence segments for the alignment
"""
self.aln_pos.load_proteins_from_selection(positive_selection)
self.aln_neg.load_proteins_from_selection(negative_selection)
# local segment list
segments = []
# read selection
for segment in negative_selection.segments:
segments.append(segment)
self.setup_alignments(segments)
def calculate_signature(self):
"""
Calculates the feature frequency difference between two protein sets.
Generates the full differential matrix as well as maximum difference for a position (for scatter plot).
"""
for sid, segment in enumerate(self.aln_neg.segments):
self.features_normalized_pos[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_pos.feature_stats],
dtype='int'
)
self.features_normalized_neg[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_neg.feature_stats],
dtype='int'
)
for segment in self.aln_neg.segments:
#TODO: get the correct default numering scheme from settings
for idx, res in enumerate(self.common_gn[self.common_schemes[0][0]][segment].keys()):
if res not in self.aln_pos.generic_numbers[self.common_schemes[0][0]][segment].keys():
self.features_normalized_pos[segment] = np.insert(self.features_normalized_pos[segment], idx, 0, axis=1)
# Set 100% occurence for a gap feature
self.features_normalized_pos[segment][-1, idx] = 100
elif res not in self.aln_neg.generic_numbers[self.common_schemes[0][0]][segment].keys():
self.features_normalized_neg[segment] = np.insert(self.features_normalized_neg[segment], idx, 0, axis=1)
# Set 100% occurence for a gap feature
self.features_normalized_neg[segment][-1, idx] = 100
# now the difference
self.features_frequency_difference[segment] = np.subtract(
self.features_normalized_pos[segment],
self.features_normalized_neg[segment]
)
self._convert_feature_stats(self.features_normalized_pos, self.aln_pos)
self._convert_feature_stats(self.features_normalized_neg, self.aln_neg)
# Version with display data
for row in range(len(AMINO_ACID_GROUPS.keys())):
tmp_row = []
for segment in self.aln_neg.segments:
#first item is the real value,
# second is the assignmnent of color (via css)
# 0 - red, 5 - yellow, 10 - green
#third item is a tooltip
tmp_row.append([[
x,
int(x/20)+5,
"{} - {}".format(
self.features_normalized_pos[segment][row][y],
self.features_normalized_neg[segment][row][y]
)
] for y, x in enumerate(self.features_frequency_difference[segment][row])])
self.features_frequency_diff_display.append(tmp_row)
self.signature = OrderedDict([(x, []) for x in self.aln_neg.segments])
for segment in self.aln_neg.segments:
tmp = np.array(self.features_frequency_difference[segment])
signature_map = np.absolute(tmp).argmax(axis=0)
self.signature[segment] = []
for col, pos in enumerate(list(signature_map)):
self.signature[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
self.features_frequency_difference[segment][pos][col],
int(self.features_frequency_difference[segment][pos][col]/20)+5
])
features_pos = OrderedDict()
features_neg = OrderedDict()
self.features_consensus_pos = OrderedDict([(x, []) for x in self.aln_neg.segments])
self.features_consensus_neg = OrderedDict([(x, []) for x in self.aln_neg.segments])
for sid, segment in enumerate(self.aln_neg.segments):
features_pos[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_pos.feature_stats],
dtype='int'
)
features_neg[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_neg.feature_stats],
dtype='int'
)
features_cons_pos = np.absolute(features_pos[segment]).argmax(axis=0)
features_cons_neg = np.absolute(features_neg[segment]).argmax(axis=0)
for col, pos in enumerate(list(features_cons_pos)):
self.features_consensus_pos[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
features_pos[segment][pos][col],
int(features_pos[segment][pos][col]/20)+5
])
for col, pos in enumerate(list(features_cons_neg)):
self.features_consensus_neg[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
features_neg[segment][pos][col],
int(features_neg[segment][pos][col]/20)+5
])
self._convert_feature_stats(self.features_normalized_pos, self.aln_pos)
self._convert_feature_stats(self.features_normalized_neg, self.aln_neg)
def prepare_display_data(self):
options = {
'num_residue_columns': len(sum([[x for x in self.common_gn[self.common_schemes[0][0]][segment]] for segment in self.aln_neg.segments], [])),
'num_of_sequences_pos': len(self.aln_pos.proteins),
'num_residue_columns_pos': len(self.aln_pos.positions),
'num_of_sequences_neg': len(self.aln_neg.proteins),
'num_residue_columns_neg': len(self.aln_neg.positions),
'common_segments': self.common_segments,
'common_generic_numbers': self.common_gn,
'feats_signature': self.features_frequency_diff_display,
'signature_consensus': self.signature,
'feats_cons_pos': self.features_consensus_pos,
'feats_cons_neg': self.features_consensus_neg,
'a_pos': self.aln_pos,
'a_neg': self.aln_neg,
}
return options
def prepare_session_data(self):
session_signature = {
'common_positions': self.common_gn,
'diff_matrix': self.features_frequency_difference,
'numbering_schemes': self.common_schemes,
'common_segments': self.common_segments,
}
return session_signature
def merge_numbering_schemes(self):
"""
Extract all of the numbering schemes used for a set of proteins.
Arguments:
proteins {selection} -- A set of proteins to analyze
"""
numbering_schemes = {}
for prot in self.aln_pos.proteins + self.aln_neg.proteins:
if prot.protein.residue_numbering_scheme.slug not in numbering_schemes:
rnsn = prot.protein.residue_numbering_scheme.name
numbering_schemes[prot.protein.residue_numbering_scheme.slug] = rnsn
# order and convert numbering scheme dict to tuple
return sorted(numbering_schemes.items(), key=lambda x: x[0])
def prepare_excel_worksheet(self, workbook, worksheet_name, aln='positive', data='alignment'):
"""
A function saving alignment data subset into the excel spreadsheet.
It adds a worksheet to an existing workbook and saves only a selected subset of alignment data.
For a complete save of the alignment it needs to be wrapped with additional code.
The outline of the excel worksheet is similar to the one of html page.
First column shows nunbering schemes, protein list, etc
The frequency data start from column B
Arguments:
workbook {xlrsxwriter.Workbook} -- object to add workseet to
worksheet_name {string} -- name for the new workseet
Keyword Arguments:
alignment {string} -- alignment to extract data from.
Possible choices: positive, negative, signature
data {string} -- data type to save to workshet: 'alignment' or 'features' frequencies
"""
props = AMINO_ACID_GROUP_NAMES.values()
worksheet = workbook.add_worksheet(worksheet_name)
if aln == 'positive':
numbering_schemes = self.aln_pos.numbering_schemes
generic_numbers_set = self.aln_pos.generic_numbers
alignment = self.aln_pos
if data == 'features':
data_block = self.aln_pos.feature_stats
elif aln == 'negative':
numbering_schemes = self.aln_neg.numbering_schemes
generic_numbers_set = self.aln_neg.generic_numbers
alignment = self.aln_neg
if data == 'features':
data_block = self.aln_neg.feature_stats
else:
numbering_schemes = self.common_schemes
generic_numbers_set = self.common_gn
if data == 'features':
data_block = self.features_frequency_diff_display
# First column, numbering schemes
for row, scheme in enumerate(numbering_schemes):
worksheet.write(1 + 3*row, 0, scheme[1])
# First column, stats
if data == 'features':
for offset, prop in enumerate(props):
worksheet.write(1 + 3 * len(numbering_schemes) + offset, 0, prop)
# First column, protein list (for alignment) and line for consensus sequence
else:
for offset, prot in enumerate(alignment.proteins):
worksheet.write(
1 + 3 * len(numbering_schemes) + offset,
0,
prot.protein.entry_name
)
worksheet.write(
1 + len(numbering_schemes) + len(alignment.proteins),
0,
'CONSENSUS'
)
# Second column and on
# Segments
offset = 0
for segment in generic_numbers_set[numbering_schemes[0][0]].keys():
worksheet.merge_range(
0,
1 + offset,
0,
len(generic_numbers_set[numbering_schemes[0][0]][segment]) + offset - 1,
segment
)
offset += len(generic_numbers_set[numbering_schemes[0][0]][segment])
# Generic numbers
# for row, item in enumerate(generic_numbers_set.items()):
for row, item in enumerate(numbering_schemes):
scheme = item[0]
offset = 1
for sn, gn_list in generic_numbers_set[scheme].items():
for col, gn_pair in enumerate(gn_list.items()):
try:
tm, bw, gpcrdb = re.split('\.|x', strip_html_tags(gn_pair[1]))
except:
tm, bw, gpcrdb = ('', '', '')
worksheet.write(
1 + 3 * row,
col + offset,
tm
)
worksheet.write(
2 + 3 * row,
col + offset,
bw
)
worksheet.write(
3 + 3*row,
col + offset,
gpcrdb
)
offset += len(gn_list.items())
# Stats
if data == 'features':
offset = 1 + 3 * len(numbering_schemes)
for row, prop in enumerate(data_block):
col_offset = 0
for segment in prop:
for col, freq in enumerate(segment):
cell_format = workbook.add_format(get_format_props(freq[1]))
worksheet.write(
offset + row,
1 + col + col_offset,
freq[0] if isinstance(freq[0], int) else int(freq[0]),
cell_format
)
col_offset += len(segment)
col_offset = 0
for segment, cons_feat in self.signature.items():
for col, chunk in enumerate(cons_feat):
worksheet.write(
offset + len(AMINO_ACID_GROUPS),
1 + col + col_offset,
chunk[0]
)
cell_format = workbook.add_format(get_format_props(int(chunk[2]/20)+5))
worksheet.write(
1 + offset + len(AMINO_ACID_GROUPS),
1 + col + col_offset,
chunk[2],
cell_format
)
col_offset += len(cons_feat)
# Alignment
else:
offset = 1 + 3 * len(alignment.numbering_schemes)
for row, data in enumerate(alignment.proteins):
col_offset = 0
for segment, sequence in data.alignment.items():
for col, res in enumerate(sequence):
cell_format = workbook.add_format(get_format_props(res=res[2]))
worksheet.write(
offset + row,
1 + col + col_offset,
res[2],
cell_format
)
col_offset += len(sequence)
# Consensus sequence
row = 1 + 3 * len(alignment.numbering_schemes) + len(alignment.proteins)
col_offset = 0
for segment, sequence in alignment.consensus.items():
for col, data in enumerate(sequence.items()):
res = data[1]
cell_format = workbook.add_format(get_format_props(res=res[0]))
worksheet.write(
row,
1 + col + col_offset,
res[0],
cell_format
)
cell_format = workbook.add_format(get_format_props(res[1]))
worksheet.write(
row + 1,
1 + col + col_offset,
res[2],
cell_format
)
col_offset += len(sequence.items())
class SignatureMatch():
def __init__(self, common_positions, numbering_schemes, segments, difference_matrix, protein_set, cutoff=40):
self.cutoff = cutoff
self.common_gn = common_positions
self.schemes = numbering_schemes
self.segments = segments
self.diff_matrix = difference_matrix
self.signature_matrix_filtered = OrderedDict()
self.signature_consensus = OrderedDict()
self.protein_set = protein_set
self.relevant_gn = OrderedDict([(x[0], OrderedDict()) for x in self.schemes])
self.relevant_segments = OrderedDict()
self.scored_proteins = []
self.protein_report = OrderedDict()
self.protein_signatures = OrderedDict()
self.find_relevant_gns()
self.residue_to_feat = dict(
[(x, set()) for x in AMINO_ACIDS.keys()]
)
for fidx, feat in enumerate(AMINO_ACID_GROUPS.items()):
for res in feat[1].split(','):
self.residue_to_feat[res].add(fidx)
def find_relevant_gns(self):
matrix_consensus = OrderedDict()
for segment in self.segments:
print(segment)
segment_consensus = []
signature_map = np.absolute(self.diff_matrix[segment]).argmax(axis=0)
for col, pos in enumerate(list(signature_map)):
if abs(self.diff_matrix[segment][pos][col]) > self.cutoff:
segment_consensus.append(self.diff_matrix[segment][ : , col])
for scheme in self.schemes:
gnum = list(self.common_gn[scheme[0]][segment].items())[col]
try:
self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
except:
self.relevant_gn[scheme[0]][segment] = OrderedDict()
self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
segment_consensus = np.array(segment_consensus).T
if segment_consensus != []:
matrix_consensus[segment] = segment_consensus
self.signature_matrix_filtered = matrix_consensus
self.relevant_segments = OrderedDict([
(
x[0],
self.relevant_gn[self.schemes[0][0]][x[0]].keys()
) for x in self.signature_matrix_filtered.items()
])
signature = OrderedDict([(x[0], []) for x in matrix_consensus.items()])
for segment in self.relevant_segments:
signature_map = np.absolute(self.signature_matrix_filtered[segment]).argmax(axis=0)
tmp = np.array(self.signature_matrix_filtered[segment])
for col, pos in enumerate(list(signature_map)):
signature[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
tmp[pos][col],
int(tmp[pos][col]/20)+5
])
self.signature_consensus = signature
def score_protein_class(self, pclass_slug='001'):
start = time.time()
protein_scores = {}
protein_signature_match = {}
class_proteins = Protein.objects.filter(
species__common_name='Human',
family__slug__startswith=pclass_slug
).exclude(
id__in=[x.id for x in self.protein_set]
)
class_a_pcf = ProteinConformation.objects.order_by('protein__family__slug',
'protein__entry_name').filter(protein__in=class_proteins, protein__sequence_type__slug='wt').exclude(protein__entry_name__endswith='-consensus')
for pcf in class_a_pcf:
p_start = time.time()
score, signature_match = self.score_protein(pcf)
protein_scores[pcf] = score
protein_signature_match[pcf] = signature_match
p_end = time.time()
print("Time elapsed for {}: ".format(pcf.protein.entry_name), p_end - p_start)
end = time.time()
self.protein_report = OrderedDict(sorted(protein_scores.items(), key=lambda x: x[1], reverse=True))
for prot in self.protein_report.items():
self.protein_signatures[prot[0]] = protein_signature_match[prot[0]]
self.scored_proteins = list(self.protein_report.keys())
print("Total time: ", end - start)
def score_protein(self, pcf):
prot_score = 0.0
consensus_match = OrderedDict([(x, []) for x in self.relevant_segments])
for segment in self.relevant_segments:
tmp = []
signature_map = np.absolute(self.signature_matrix_filtered[segment]).argmax(axis=0)
resi = Residue.objects.filter(
protein_segment__slug=segment,
protein_conformation=pcf,
generic_number__label__in=self.relevant_gn[self.schemes[0][0]][segment].keys(),
)
for idx, pos in enumerate(self.relevant_gn[self.schemes[0][0]][segment].keys()):
feat = signature_map[idx]
feat_abr = list(AMINO_ACID_GROUPS.keys())[feat]
feat_name = list(AMINO_ACID_GROUP_NAMES.values())[feat]
val = self.signature_matrix_filtered[segment][feat][idx]
try:
res = resi.get(generic_number__label=pos)
r_name = res.amino_acid if res.amino_acid != 'Gap' else '_'
if feat in self.residue_to_feat[res.amino_acid]:
prot_score += val
tmp.append([feat_abr, feat_name, val, "green", res.amino_acid, pos]) if val > 0 else tmp.append([feat_abr, feat_name, val, "white", res.amino_acid, pos])
else:
prot_score -= val
tmp.append([feat_abr, feat_name, val, "red", res.amino_acid, pos]) if val > 0 else tmp.append([feat_abr, feat_name, val, "white", res.amino_acid, pos])
except (exceptions.ObjectDoesNotExist, exceptions.MultipleObjectsReturned):
prot_score -= val
tmp.append([feat_abr, feat_name, val, "red", '_', pos]) if val > 0 else tmp.append([feat_abr, feat_name, val, "white", '_', pos])
consensus_match[segment] = tmp
return (prot_score/100, consensus_match)
def signature_score_excel(workbook, scores, protein_signatures, signature_filtered, relevant_gn, relevant_segments, numbering_schemes):
worksheet = workbook.add_worksheet('scored_proteins')
# First column, numbering schemes
for row, scheme in enumerate(numbering_schemes):
worksheet.write(1 + 3*row, 0, scheme[1])
# Score header
worksheet.write(1, 1, 'Score')
offset = 0
# Segments
for segment, resi in relevant_segments.items():
worksheet.merge_range(
0,
2 + offset,
0,
len(resi) + offset,
segment
)
offset += len(resi)
# Generic numbers
# for row, item in enumerate(generic_numbers_set.items()):
for row, item in enumerate(numbering_schemes):
scheme = item[0]
offset = 1
for sn, gn_list in relevant_gn[scheme].items():
for col, gn_pair in enumerate(gn_list.items()):
try:
tm, bw, gpcrdb = re.split('\.|x', strip_html_tags(gn_pair[1]))
except:
tm, bw, gpcrdb = ('', '', '')
worksheet.write(
1 + 3 * row,
1 + col + offset,
tm
)
worksheet.write(
2 + 3 * row,
1 + col + offset,
bw
)
worksheet.write(
3 + 3*row,
1 + col + offset,
gpcrdb
)
offset += len(gn_list.items())
# Line for sequence signature
worksheet.write(
1 + 3 * len(numbering_schemes),
0,
'CONSENSUS'
)
col_offset = 0
for segment, cons_feat in signature_filtered.items():
for col, chunk in enumerate(cons_feat):
worksheet.write(
2 + 3 * len(numbering_schemes),
2 + col + col_offset,
chunk[0]
)
cell_format = workbook.add_format(get_format_props(int(chunk[2]/20)+5))
worksheet.write(
1 + 3 * len(numbering_schemes),
2 + col + col_offset,
chunk[2],
cell_format
)
col_offset += len(cons_feat)
# Score lines
row_offset = 0
for protein, score in scores.items():
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
0,
protein.protein.entry_name,
)
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
1,
score,
)
col_offset = 0
for segment, data in protein_signatures[protein].items():
for col, res in enumerate(data):
cell_format = workbook.add_format({'bg_color': res[3],})
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
2 + col + col_offset,
res[4],
cell_format
)
col_offset += len(data)
row_offset += 1
| apache-2.0 |
anirudhSK/chromium | tools/perf/benchmarks/thread_times.py | 2 | 1721 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from benchmarks import silk_flags
from measurements import thread_times
class ThreadTimesKeySilkCases(test.Test):
"""Measures timeline metrics while performing smoothness action on key silk
cases."""
test = thread_times.ThreadTimes
page_set = 'page_sets/key_silk_cases.json'
options = {"report_silk_results": True}
class ThreadTimesFastPathKeySilkCases(test.Test):
"""Measures timeline metrics while performing smoothness action on key silk
cases using bleeding edge rendering fast paths."""
tag = 'fast_path'
test = thread_times.ThreadTimes
page_set = 'page_sets/key_silk_cases.json'
options = {"report_silk_results": True}
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForFastPath(options)
class LegacySilkBenchmark(ThreadTimesKeySilkCases):
"""Same as thread_times.key_silk_cases but with the old name."""
@classmethod
def GetName(cls):
return "silk.key_silk_cases"
class ThreadTimesFastPathMobileSites(test.Test):
"""Measures timeline metrics while performing smoothness action on
key mobile sites labeled with fast-path tag.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
test = thread_times.ThreadTimes
page_set = 'page_sets/key_mobile_sites.json'
options = {'page_label_filter' : 'fastpath'}
class LegacyFastPathBenchmark(ThreadTimesFastPathMobileSites):
"""Same as thread_times.fast_path_mobile_sites but with the old name."""
@classmethod
def GetName(cls):
return "fast_path.key_mobile_sites"
| bsd-3-clause |
cmelange/ansible | lib/ansible/modules/system/aix_inittab.py | 26 | 7988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Joris Weijters <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: "Joris Weijters (@molekuul)"
module: aix_inittab
short_description: Manages the inittab on AIX.
description:
- Manages the inittab on AIX.
version_added: "2.3"
options:
name:
description: Name of the inittab entry.
required: True
aliases: ['service']
runlevel:
description: Runlevel of the entry.
required: True
action:
description: Action what the init has to do with this entry.
required: True
choices: [
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
]
command:
description: What command has to run.
required: True
insertafter:
description: After which inittabline should the new entry inserted.
state:
description: Whether the entry should be present or absent in the inittab file
choices: [ "present", "absent" ]
default: present
notes:
- The changes are persistent across reboots, you need root rights to read or adjust the inittab with the lsitab, chitab,
mkitab or rmitab commands.
- tested on AIX 7.1.
requirements: [ 'itertools']
'''
EXAMPLES = '''
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 4
action: once
command: "echo hello"
insertafter: existingservice
state: present
become: yes
# Change inittab enrty startmyservice to runlevel "2" and processaction "wait".
- name: Change startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: present
become: yes
# Remove inittab entry startmyservice.
- name: remove startmyservice from inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: absent
become: yes
'''
RETURN = '''
name:
description: name of the adjusted inittab entry
returned: always
type: string
sample: startmyservice
mgs:
description: action done with the inittab entry
returned: changed
type: string
sample: changed inittab entry startmyservice
changed:
description: whether the inittab changed or not
return: always
type: boolean
sample: true
'''
# Import necessary libraries
import itertools
from ansible.module_utils.basic import AnsibleModule
# end import modules
# start defining the functions
def check_current_entry(module):
# Check if entry exists, if not return False in exists in return dict,
# if true return True and the entry in return dict
existsdict = {'exist': False}
lsitab = module.get_bin_path('lsitab')
(rc, out, err) = module.run_command([lsitab, module.params['name']])
if rc == 0:
keys = ('name', 'runlevel', 'action', 'command')
values = out.split(":")
# strip non readable characters as \n
values = map(lambda s: s.strip(), values)
existsdict = dict(itertools.izip(keys, values))
existsdict.update({'exist': True})
return existsdict
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['service']),
runlevel=dict(required=True, type='str'),
action=dict(choices=[
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
], type='str'),
command=dict(required=True, type='str'),
insertafter=dict(type='str'),
state=dict(choices=[
'present',
'absent',
], required=True, type='str'),
),
supports_check_mode=True,
)
result = {
'name': module.params['name'],
'changed': False,
'msg': ""
}
# Find commandline strings
mkitab = module.get_bin_path('mkitab')
rmitab = module.get_bin_path('rmitab')
chitab = module.get_bin_path('chitab')
rc = 0
# check if the new entry exists
current_entry = check_current_entry(module)
# if action is install or change,
if module.params['state'] == 'present':
# create new entry string
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
# exists, then the entry wil be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
module.params['command'] != current_entry['command']):
# If the entry does exist then change the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command([chitab, new_entry])
if rc != 0:
module.fail_json(
msg="could not change inittab", rc=rc, err=err)
result['msg'] = "changed inittab entry" + " " + current_entry['name']
result['changed'] = True
# If the entry does not exist create the entry
elif not current_entry['exist']:
if module.params['insertafter']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, '-i', module.params['insertafter'], new_entry])
else:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, new_entry])
if rc != 0:
module.fail_json(
"could not adjust inittab", rc=rc, err=err)
result['msg'] = "add inittab entry" + " " + module.params['name']
result['changed'] = True
elif module.params['state'] == 'absent':
# If the action is remove and the entry exists then remove the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[rmitab, module.params['name']])
if rc != 0:
module.fail_json(
msg="could not remove entry grom inittab)", rc=rc, err=err)
result['msg'] = "removed inittab entry" + " " + current_entry['name']
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
wrigri/libcloud | libcloud/test/storage/test_oss.py | 11 | 36276 | # -*- coding=utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os
import sys
import unittest
try:
import mock
except ImportError:
from unittest import mock
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import PY3
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import MalformedResponseError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.oss import OSSConnection
from libcloud.storage.drivers.oss import OSSStorageDriver
from libcloud.storage.drivers.oss import CHUNK_SIZE
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611
from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_OSS_PARAMS
class OSSConnectionTestCase(unittest.TestCase):
def setUp(self):
self.conn = OSSConnection('44CF9590006BF252F707',
'OtxrzxIsfpFjA7SwPzILwy8Bw21TLhquhboDYROV')
def test_signature(self):
expected = b('26NBxoKdsyly4EDv6inkoDft/yA=')
headers = {
'Content-MD5': 'ODBGOERFMDMzQTczRUY3NUE3NzA5QzdFNUYzMDQxNEM=',
'Content-Type': 'text/html',
'Expires': 'Thu, 17 Nov 2005 18:49:58 GMT',
'X-OSS-Meta-Author': '[email protected]',
'X-OSS-Magic': 'abracadabra',
'Host': 'oss-example.oss-cn-hangzhou.aliyuncs.com'
}
action = '/oss-example/nelson'
actual = OSSConnection._get_auth_signature('PUT', headers, {},
headers['Expires'],
self.conn.key,
action,
'x-oss-')
self.assertEqual(expected, actual)
class ObjectTestCase(unittest.TestCase):
def test_object_with_chinese_name(self):
driver = OSSStorageDriver(*STORAGE_OSS_PARAMS)
obj = Object(name='中文', size=0, hash=None, extra=None,
meta_data=None, container=None, driver=driver)
self.assertTrue(obj.__repr__() is not None)
class OSSMockHttp(StorageMockHttp, MockHttpTestCase):
fixtures = StorageFileFixtures('oss')
base_headers = {}
def _unauthorized(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers_empty(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_empty(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_chinese(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_chinese.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_prefix(self, method, url, body, headers):
params = {'prefix': self.test.prefix}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('list_container_objects_prefix.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _get_container(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _get_object(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _notexisted_get_object(self, method, url, body, headers):
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_get_object(self, method, url, body, headers):
self.base_headers.update(
{'accept-ranges': 'bytes',
'connection': 'keep-alive',
'content-length': '0',
'content-type': 'application/octet-stream',
'date': 'Sat, 16 Jan 2016 15:38:14 GMT',
'etag': '"D41D8CD98F00B204E9800998ECF8427E"',
'last-modified': 'Fri, 15 Jan 2016 14:43:15 GMT',
'server': 'AliyunOSS',
'x-oss-object-type': 'Normal',
'x-oss-request-id': '569A63E6257784731E3D877F',
'x-oss-meta-rabbits': 'monkeys'})
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _invalid_name(self, method, url, body, headers):
# test_create_container_bad_request
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.OK])
def _already_exists(self, method, url, body, headers):
# test_create_container_already_existed
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _create_container(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
self.assertEqual('', body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _create_container_location(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
location_constraint = ('<CreateBucketConfiguration>'
'<LocationConstraint>%s</LocationConstraint>'
'</CreateBucketConfiguration>' %
self.test.ex_location)
self.assertEqual(location_constraint, body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_doesnt_exist(self, method, url, body, headers):
# test_delete_container_doesnt_exist
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_not_empty(self, method, url, body, headers):
# test_delete_container_not_empty
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container(self, method, url, body, headers):
return (httplib.NO_CONTENT,
body,
self.base_headers,
httplib.responses[httplib.NO_CONTENT])
def _foo_bar_object_not_found(self, method, url, body, headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object(self, method, url, body, headers):
# test_delete_object
return (httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data_multipart(self, method, url, body, headers):
headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
TEST_UPLOAD_ID = '0004B9894A22E5B1888A1E29F8236E2D'
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if not query.get('uploadId', False):
self.fail('Request doesnt contain uploadId query parameter')
upload_id = query['uploadId'][0]
if upload_id != TEST_UPLOAD_ID:
self.fail('first uploadId doesnt match')
if method == 'PUT':
# PUT is used for uploading the part. part number is mandatory
if not query.get('partNumber', False):
self.fail('Request is missing partNumber query parameter')
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
elif method == 'DELETE':
# DELETE is done for aborting the upload
body = ''
return (httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.NO_CONTENT])
else:
commit = ET.fromstring(body)
count = 0
for part in commit.findall('Part'):
count += 1
part_no = part.find('PartNumber').text
etag = part.find('ETag').text
self.assertEqual(part_no, str(count))
self.assertEqual(etag, headers['etag'])
# Make sure that manifest contains at least one part
self.assertTrue(count >= 1)
body = self.fixtures.load('complete_multipart_upload.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _list_multipart(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'key-marker' not in query:
body = self.fixtures.load('ex_iterate_multipart_uploads_p1.xml')
else:
body = self.fixtures.load('ex_iterate_multipart_uploads_p2.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
class OSSMockRawResponse(MockRawResponse, MockHttpTestCase):
fixtures = StorageFileFixtures('oss')
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
if PY3:
parser = ET.XMLParser(encoding='utf-8')
body = ET.XML(self.body.encode('utf-8'), parser=parser)
else:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body,
driver=self.connection.driver)
return body
def _foo_bar_object(self, method, url, body, headers):
# test_download_object_success
body = self._generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_invalid_size(self, method, url, body, headers):
# test_upload_object_invalid_file_size
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_not_found(self, method, url, body, headers):
# test_upload_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_test_upload_invalid_hash1(self, method, url, body, headers):
body = ''
headers = {}
headers['etag'] = '"foobar"'
# test_upload_object_invalid_hash1
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_upload(self, method, url, body, headers):
# test_upload_object_success
body = ''
headers = {'etag': '"0CC175B9C0F1B6A831C399E269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_upload_acl(self, method, url, body, headers):
# test_upload_object_with_acl
body = ''
headers = {'etag': '"0CC175B9C0F1B6A831C399E269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data(self, method, url, body, headers):
# test_upload_object_via_stream
body = ''
headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data_multipart(self, method, url, body, headers):
headers = {}
# POST is done for initiating multipart upload
if method == 'POST':
body = self.fixtures.load('initiate_multipart_upload.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
else:
body = ''
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.BAD_REQUEST])
class OSSStorageDriverTestCase(unittest.TestCase):
driver_type = OSSStorageDriver
driver_args = STORAGE_OSS_PARAMS
mock_response_klass = OSSMockHttp
mock_raw_response_klass = OSSMockRawResponse
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_classes = (
None, self.mock_response_klass)
self.driver_type.connectionCls.rawResponseCls = \
self.mock_raw_response_klass
self.mock_response_klass.type = None
self.mock_response_klass.test = self
self.mock_raw_response_klass.type = None
self.mock_raw_response_klass.test = self
self.driver = self.create_driver()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_invalid_credentials(self):
self.mock_response_klass.type = 'unauthorized'
self.assertRaises(InvalidCredsError, self.driver.list_containers)
def test_list_containers_empty(self):
self.mock_response_klass.type = 'list_containers_empty'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = 'list_containers'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
container = containers[0]
self.assertEqual('xz02tphky6fjfiuc0', container.name)
self.assertTrue('creation_date' in container.extra)
self.assertEqual('2014-05-15T11:18:32.000Z',
container.extra['creation_date'])
self.assertTrue('location' in container.extra)
self.assertEqual('oss-cn-hangzhou-a', container.extra['location'])
self.assertEqual(self.driver, container.driver)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = 'list_container_objects_empty'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = 'list_container_objects'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = objects[0]
self.assertEqual(obj.name, 'en/')
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:15.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_chinese(self):
self.mock_response_klass.type = 'list_container_objects_chinese'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = [o for o in objects
if o.name == 'WEB控制台.odp'][0]
self.assertEqual(obj.hash, '281371EA1618CF0E645D6BB90A158276')
self.assertEqual(obj.size, 1234567)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:06.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_prefix(self):
self.mock_response_klass.type = 'list_container_objects_prefix'
container = Container(name='test_container', extra={},
driver=self.driver)
self.prefix = 'test_prefix'
objects = self.driver.list_container_objects(container=container,
ex_prefix=self.prefix)
self.assertEqual(len(objects), 2)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_container'
self.assertRaises(ContainerDoesNotExistError,
self.driver.get_container,
container_name='not-existed')
def test_get_container_success(self):
self.mock_response_klass.type = 'get_container'
container = self.driver.get_container(
container_name='xz02tphky6fjfiuc0')
self.assertTrue(container.name, 'xz02tphky6fjfiuc0')
def test_get_object_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_object'
self.assertRaises(ObjectDoesNotExistError,
self.driver.get_object,
container_name='xz02tphky6fjfiuc0',
object_name='notexisted')
def test_get_object_success(self):
self.mock_response_klass.type = 'get_object'
obj = self.driver.get_object(container_name='xz02tphky6fjfiuc0',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'xz02tphky6fjfiuc0')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.extra['last_modified'],
'Fri, 15 Jan 2016 14:43:15 GMT')
self.assertEqual(obj.extra['content_type'], 'application/octet-stream')
self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
def test_create_container_bad_request(self):
# invalid container name, returns a 400 bad request
self.mock_response_klass.type = 'invalid_name'
self.assertRaises(ContainerError,
self.driver.create_container,
container_name='invalid_name')
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = 'already_exists'
self.assertRaises(InvalidContainerNameError,
self.driver.create_container,
container_name='new-container')
def test_create_container_success(self):
# success
self.mock_response_klass.type = 'create_container'
name = 'new_container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_create_container_with_ex_location(self):
self.mock_response_klass.type = 'create_container_location'
name = 'new_container'
self.ex_location = 'oss-cn-beijing'
container = self.driver.create_container(container_name=name,
ex_location=self.ex_location)
self.assertEqual(container.name, name)
self.assertTrue(container.extra['location'], self.ex_location)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_doesnt_exist'
self.assertRaises(ContainerDoesNotExistError,
self.driver.delete_container,
container=container)
def test_delete_container_not_empty(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_not_empty'
self.assertRaises(ContainerIsNotEmptyError,
self.driver.delete_container,
container=container)
def test_delete_container_success(self):
self.mock_response_klass.type = 'delete_container'
container = Container(name='new_container', extra=None,
driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_invalid_file_size(self):
self.mock_raw_response_klass.type = 'invalid_size'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertFalse(result)
def test_download_object_not_found(self):
self.mock_raw_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
self.assertRaises(ObjectDoesNotExistError,
self.driver.download_object,
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
def test_download_object_as_stream_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_invalid_hash1(self):
def upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
return True, 'hash343hhash89h932439jsaa89', 1000
self.mock_raw_response_klass.type = 'invalid_hash1'
old_func = self.driver_type._upload_file
try:
self.driver_type._upload_file = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
self.assertRaises(ObjectHashMismatchError,
self.driver.upload_object,
file_path=file_path,
container=container,
object_name=object_name,
verify_hash=True)
finally:
self.driver_type._upload_file = old_func
def test_upload_object_success(self):
def upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
return True, '0cc175b9c0f1b6a831c399e269772661', 1000
old_func = self.driver_type._upload_file
try:
self.driver_type._upload_file = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertTrue('some-value' in obj.meta_data)
finally:
self.driver_type._upload_file = old_func
def test_upload_object_with_acl(self):
def upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
return True, '0cc175b9c0f1b6a831c399e269772661', 1000
old_func = self.driver_type._upload_file
try:
self.driver_type._upload_file = upload_file
self.mock_raw_response_klass.type = 'acl'
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'public-read'}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertEqual(obj.extra['acl'], 'public-read')
finally:
self.driver_type._upload_file = old_func
def test_upload_object_with_invalid_acl(self):
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'invalid-acl'}
self.assertRaises(AttributeError,
self.driver.upload_object,
file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
def test_upload_empty_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_raw_response_klass.type = 'multipart'
self.mock_response_klass.type = 'multipart'
else:
self.mock_raw_response_klass.type = None
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=[''])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 0)
def test_upload_small_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_raw_response_klass.type = 'multipart'
self.mock_response_klass.type = 'multipart'
else:
self.mock_raw_response_klass.type = None
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=['2', '3', '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
def test_upload_big_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_raw_response_klass.type = 'multipart'
self.mock_response_klass.type = 'multipart'
else:
self.mock_raw_response_klass.type = None
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(
data=['2' * CHUNK_SIZE, '3' * CHUNK_SIZE, '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, CHUNK_SIZE * 2 + 1)
def test_upload_object_via_stream_abort(self):
if not self.driver.supports_multipart_upload:
return
self.mock_raw_response_klass.type = 'MULTIPART'
self.mock_response_klass.type = 'MULTIPART'
def _faulty_iterator():
for i in range(0, 5):
yield str(i)
raise RuntimeError('Error in fetching data')
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = _faulty_iterator()
extra = {'content_type': 'text/plain'}
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
except Exception:
pass
return
def test_ex_iterate_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
for upload in self.driver.ex_iterate_multipart_uploads(container,
max_uploads=2):
self.assertTrue(upload.key is not None)
self.assertTrue(upload.id is not None)
self.assertTrue(upload.initiated is not None)
def test_ex_abort_all_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
with mock.patch('libcloud.storage.drivers.oss.OSSStorageDriver'
'._abort_multipart', autospec=True) as mock_abort:
self.driver.ex_abort_all_multipart_uploads(container)
self.assertEqual(3, mock_abort.call_count)
def test_delete_object_not_found(self):
self.mock_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
self.assertRaises(ObjectDoesNotExistError,
self.driver.delete_object,
obj=obj)
def test_delete_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
dims/cinder | cinder/tests/unit/fake_vmem_client.py | 23 | 1779 | # Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake VMEM REST client for testing drivers.
"""
import sys
import mock
# The following gymnastics to fake an exception class globally is done because
# we want to globally model and make available certain exceptions. If we do
# not do this, then the real-driver's import will not see our fakes.
class NoMatchingObjectIdError(Exception):
pass
error = mock.Mock()
error.NoMatchingObjectIdError = NoMatchingObjectIdError
core = mock.Mock()
core.attach_mock(error, 'error')
vmemclient = mock.Mock()
vmemclient.__version__ = "unknown"
vmemclient.attach_mock(core, 'core')
sys.modules['vmemclient'] = vmemclient
mock_client_conf = [
'basic',
'basic.login',
'basic.get_node_values',
'basic.save_config',
'lun',
'lun.export_lun',
'lun.unexport_lun',
'snapshot',
'snapshot.export_lun_snapshot',
'snapshot.unexport_lun_snapshot',
'iscsi',
'iscsi.bind_ip_to_target',
'iscsi.create_iscsi_target',
'iscsi.delete_iscsi_target',
'igroup',
'client',
'client.get_client_info',
'client.create_client',
'client.delete_client',
'adapter',
'adapter.get_fc_info'
]
| apache-2.0 |
brianjimenez/lightdock | bin/simulation/lightdock_setup.py | 1 | 6309 | #!/usr/bin/env python
"""Before launching the LightDock simulation, a setup step is required.
This step parses the input PDB structures, calculates the minimum ellipsoid
containing each of them, calculates the swarms on the surface of the
receptor and populates each swarm with random coordinates for each glowworm's
optimization vector.
"""
import argparse
import numpy as np
from lightdock.util.parser import SetupCommandLineParser
from lightdock.prep.simulation import read_input_structure, save_lightdock_structure, \
calculate_starting_positions, prepare_results_environment, \
create_setup_file, calculate_anm, parse_restraints_file, \
get_restraints
from lightdock.constants import DEFAULT_LIGHTDOCK_PREFIX, DEFAULT_ELLIPSOID_DATA_EXTENSION, \
DEFAULT_NMODES_REC, DEFAULT_REC_NM_FILE, DEFAULT_NMODES_LIG, DEFAULT_LIG_NM_FILE
from lightdock.mathutil.ellipsoid import MinimumVolumeEllipsoid
from lightdock.util.logger import LoggingManager
from lightdock.error.lightdock_errors import LightDockError
log = LoggingManager.get_logger('lightdock_setup')
if __name__ == "__main__":
try:
parser = SetupCommandLineParser()
args = parser.args
# Read input structures
receptor = read_input_structure(args.receptor_pdb, args.noxt, args.noh, args.verbose_parser)
ligand = read_input_structure(args.ligand_pdb, args.noxt, args.noh, args.verbose_parser)
# Move structures to origin
rec_translation = receptor.move_to_origin()
lig_translation = ligand.move_to_origin()
# Calculate reference points for receptor
log.info("Calculating reference points for receptor %s..." % args.receptor_pdb)
rec_ellipsoid = MinimumVolumeEllipsoid(receptor.representative().coordinates)
ellipsoid_data_file = "%s%s" % (DEFAULT_LIGHTDOCK_PREFIX % receptor.structure_file_names[0],
DEFAULT_ELLIPSOID_DATA_EXTENSION)
np.save(ellipsoid_data_file, np.array([rec_ellipsoid.center.copy()]))
log.info("Done.")
# Calculate reference points for ligand
log.info("Calculating reference points for ligand %s..." % args.ligand_pdb)
lig_ellipsoid = MinimumVolumeEllipsoid(ligand.representative().coordinates)
ellipsoid_data_file = "%s%s" % (DEFAULT_LIGHTDOCK_PREFIX % ligand.structure_file_names[0],
DEFAULT_ELLIPSOID_DATA_EXTENSION)
np.save(ellipsoid_data_file, np.array([lig_ellipsoid.center.copy()]))
log.info("Done.")
# Save to file parsed structures
save_lightdock_structure(receptor)
save_lightdock_structure(ligand)
# Calculate and save ANM if required
if args.use_anm:
if args.anm_rec > 0:
log.info("Calculating ANM for receptor molecule...")
calculate_anm(receptor, args.anm_rec, DEFAULT_REC_NM_FILE)
if args.anm_lig > 0:
log.info("Calculating ANM for ligand molecule...")
calculate_anm(ligand, args.anm_lig, DEFAULT_LIG_NM_FILE)
# Parse restraints if any:
receptor_restraints = ligand_restraints = None
if args.restraints:
log.info("Reading restraints from %s" % args.restraints)
restraints = parse_restraints_file(args.restraints)
# Calculate number of restraints in order to check them
num_rec_active = len(restraints['receptor']['active'])
num_rec_passive = len(restraints['receptor']['passive'])
num_lig_active = len(restraints['ligand']['active'])
num_lig_passive = len(restraints['ligand']['passive'])
# Complain if not a single restraint has been defined, but restraints are enabled
if not num_rec_active and not num_rec_passive and not num_lig_active and not num_lig_passive:
raise LightDockError("Restraints file specified, but not a single restraint found")
# Check if restraints correspond with real residues
receptor_restraints = get_restraints(receptor, restraints['receptor'])
args.receptor_restraints = restraints['receptor']
ligand_restraints = get_restraints(ligand, restraints['ligand'])
args.ligand_restraints = restraints['ligand']
log.info("Number of receptor restraints is: %d (active), %d (passive)" % (num_rec_active, num_rec_passive))
log.info("Number of ligand restraints is: %d (active), %d (passive)" % (num_lig_active, num_lig_passive))
rec_restraints = None
try:
rec_restraints = receptor_restraints['active'] + receptor_restraints['passive']
except:
pass
lig_restraints = None
try:
lig_restraints = ligand_restraints['active'] + ligand_restraints['passive']
except:
pass
# Calculate surface points (swarm centers) over receptor structure
starting_points_files = calculate_starting_positions(receptor, ligand,
args.swarms, args.glowworms,
args.starting_points_seed,
rec_restraints, lig_restraints,
rec_translation, lig_translation,
args.ftdock_file, args.use_anm, args.anm_seed,
args.anm_rec, args.anm_lig, args.membrane)
if len(starting_points_files) != args.swarms:
args.swarms = len(starting_points_files)
log.info('Number of swarms is %d after applying restraints' % args.swarms)
# Create simulation folders
prepare_results_environment(args.swarms)
# Dump to a setup file the actual configuration
create_setup_file(args)
log.info("LightDock setup OK")
except LightDockError, error:
log.error("LightDock setup failed. Please see:")
log.error(error)
| gpl-3.0 |
m1ck/bookadoptions | django/conf/locale/pl/formats.py | 238 | 1288 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' '
NUMBER_GROUPING = 3
| bsd-3-clause |
garbled1/ansible | lib/ansible/module_utils/pure.py | 71 | 3161 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Simon Dodsley <[email protected]>,2017
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
HAS_PURESTORAGE = True
try:
from purestorage import purestorage
except ImportError:
HAS_PURESTORAGE = False
from functools import wraps
from os import environ
from os import path
import platform
VERSION = 1.0
USER_AGENT_BASE = 'Ansible'
def get_system(module):
"""Return System Object or Fail"""
user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
'base': USER_AGENT_BASE,
'class': __name__,
'version': VERSION,
'platform': platform.platform()
}
array_name = module.params['fa_url']
api = module.params['api_token']
if array_name and api:
system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
else:
module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
try:
system.get()
except Exception:
module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
return system
def purefa_argument_spec():
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
return dict(
fa_url=dict(),
api_token=dict(no_log=True),
)
| gpl-3.0 |
faun/django_test | build/lib/django/contrib/gis/geos/mutable_list.py | 405 | 10386 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, (int, long, slice)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, (int, long)):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __cmp__(self, other):
'cmp'
slen = len(self)
for i in range(slen):
try:
c = cmp(self[i], other[i])
except IndexError:
# must be other is shorter
return 1
else:
# elements not equal
if c: return c
return cmp(slen, len(other))
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, (int, long)):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=cmp, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(cmp=cmp, key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
temp.sort(cmp=cmp, reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| bsd-3-clause |
thedrow/django | tests/gis_tests/geo3d/tests.py | 199 | 17484 | from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.db.models import Extent3D, Union
from django.contrib.gis.db.models.functions import (
AsGeoJSON, AsKML, Length, Perimeter, Scale, Translate,
)
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from .models import (
City3D, Interstate2D, Interstate3D, InterstateProj2D, InterstateProj3D,
MultiPoint3D, Point2D, Point3D, Polygon2D, Polygon3D,
)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
class Geo3DLoadingHelper(object):
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_storage")
class Geo3DTest(Geo3DLoadingHelper, TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
@ignore_warnings(category=RemovedInDjango20Warning)
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
@skipUnlessDBFeature("supports_3d_functions")
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@skipUnlessDBFeature("supports_3d_functions")
@ignore_warnings(category=RemovedInDjango110Warning)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
self.assertIsNone(City3D.objects.none().extent3d())
self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d'])
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_functions")
class Geo3DFunctionsTests(Geo3DLoadingHelper, TestCase):
def test_kml(self):
"""
Test KML() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(kml=AsKML('point', precision=6)).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoJSON() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(geojson=AsGeoJSON('point', precision=6)).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_perimeter(self):
"""
Testing Perimeter() function on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
poly2d = Polygon2D.objects.annotate(perimeter=Perimeter('poly')).get(name='2D BBox')
self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol)
poly3d = Polygon3D.objects.annotate(perimeter=Perimeter('poly')).get(name='3D BBox')
self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol)
def test_length(self):
"""
Testing Length() function on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
inter2d = Interstate2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = Interstate3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
inter2d = InterstateProj2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = InterstateProj3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
def test_scale(self):
"""
Testing Scale() function on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.annotate(scale=Scale('point', 1.0, 1.0, zscale)):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing Translate() function on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.annotate(translate=Translate('point', 0, 0, ztrans)):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
| bsd-3-clause |
whereismyjetpack/ansible | lib/ansible/plugins/connection/zone.py | 45 | 7978 | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# and chroot.py (c) 2013, Maykel Moya <[email protected]>
# and jail.py (c) 2013, Michael Scherer <[email protected]>
# (c) 2015, Dagobert Michelsen <[email protected]>
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import subprocess
import traceback
from ansible import constants as C
from ansible.compat.six.moves import shlex_quote
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.module_utils._text import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local zone based connections '''
transport = 'zone'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.zone = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("zone connection requires running as root")
self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm'))
self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
if self.zone not in self.list_zones():
raise AnsibleError("incorrect zone name %s" % self.zone)
@staticmethod
def _search_executable(executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH") % executable
return cmd
def list_zones(self):
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
zones = []
for l in process.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':')
if s[1] != 'global':
zones.append(s[1])
return zones
def get_zone_path(self):
#solaris10vm# zoneadm -z cswbuild list -p
#-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#stdout, stderr = p.communicate()
path = process.stdout.readlines()[0].split(':')[3]
return path + '/root'
def _connect(self):
''' connect to the zone; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the zone. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
# Note: zlogin invokes a shell (just like ssh does) so we do not pass
# this through /bin/sh -c here. Instead it goes through the shell
# that zlogin selects.
local_cmd = [self.zlogin_cmd, self.zone, cmd]
local_cmd = map(to_bytes, local_cmd)
display.vvv("EXEC %s" % (local_cmd), host=self.zone)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the zone '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to zone '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from zone to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("zone connection requires dd command in the zone")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| gpl-3.0 |
StanfordBioinformatics/loom | server/loomengine_server/core/settings.py | 2 | 14306 | # Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
import datetime
import json
import logging
import os
import random
import socket
import sys
import tempfile
import warnings
from django.core.exceptions import ValidationError
SESSION_BACKED = 'django.contrib.sessions.backends.db'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
def to_boolean(value):
if value in [None, '', False]:
return False
if value == True:
return True
if str(value).lower() == 'false':
return False
if str(value).lower() == 'true':
return True
raise Exception("Invalid value %s. Expected True or False")
def to_float(value):
if value is None:
return None
if value == '':
return None
return float(value)
def to_int(value):
if value is None:
return None
if value == '':
return None
return int(value)
def to_list(value):
if value is None:
return []
value = value.strip(' "\'')
list_str = value.lstrip('[').rstrip(']')
if list_str == '':
return []
list = list_str.split(',')
return [item.strip(' "\'') for item in list]
SETTINGS_DIR = os.path.dirname(__file__)
BASE_DIR = (os.path.join(SETTINGS_DIR, '..'))
sys.path.append(BASE_DIR)
PORTAL_ROOT = os.path.join(BASE_DIR, '..', '..', 'portal')
# Security settings
DEBUG = to_boolean(os.getenv('LOOM_DEBUG'))
secret_file = os.path.join(os.path.dirname(__file__),'secret.txt')
if os.path.exists(secret_file):
with open(secret_file) as f:
SECRET_KEY = f.read()
else:
SECRET_KEY = os.getenv(
'LOOM_SERVER_SECRET_KEY',
''.join([random.SystemRandom()\
.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
for i in range(50)]))
with open(secret_file, 'w') as f:
f.write(SECRET_KEY)
CORS_ORIGIN_ALLOW_ALL = to_boolean(
os.getenv('LOOM_SERVER_CORS_ORIGIN_ALLOW_ALL', 'False'))
CORS_ORIGIN_WHITELIST = to_list(os.getenv('LOOM_SERVER_CORS_ORIGIN_WHITELIST', '[]'))
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
ALLOWED_HOSTS = to_list(os.getenv('LOOM_SERVER_ALLOWED_HOSTS', '[*]'))
LOGIN_REQUIRED = to_boolean(os.getenv('LOOM_LOGIN_REQUIRED', 'True'))
LOG_LEVEL = os.getenv('LOG_LEVEL', 'WARNING').upper()
STORAGE_TYPE = os.getenv('LOOM_STORAGE_TYPE', 'local').lower()
STATIC_ROOT = os.getenv('LOOM_SERVER_STATIC_ROOT', '/var/www/loom/static')
SERVER_NAME = os.getenv('LOOM_SERVER_NAME', 'loom') # used in attempt container names
SERVER_URL_FOR_WORKER = os.getenv('SERVER_URL_FOR_WORKER', 'http://127.0.0.1:8000')
SERVER_URL_FOR_CLIENT = os.getenv('SERVER_URL_FOR_CLIENT', 'http://127.0.0.1:8000')
# GCP settings
GCE_EMAIL = os.getenv('GCE_EMAIL')
GCE_PROJECT = os.getenv('GCE_PROJECT', '')
GCE_PEM_FILE_PATH = os.getenv('GCE_PEM_FILE_PATH')
GOOGLE_STORAGE_BUCKET = os.getenv('LOOM_GOOGLE_STORAGE_BUCKET', '')
SETTINGS_HOME = os.getenv('LOOM_SETTINGS_HOME', os.path.expanduser('~/.loom'))
PLAYBOOK_PATH = os.path.join(SETTINGS_HOME, os.getenv('LOOM_PLAYBOOK_DIR', 'playbooks'))
RUN_TASK_ATTEMPT_PLAYBOOK = os.getenv('LOOM_RUN_TASK_ATTEMPT_PLAYBOOK')
CLEANUP_TASK_ATTEMPT_PLAYBOOK = os.getenv('LOOM_CLEANUP_TASK_ATTEMPT_PLAYBOOK')
def _add_url_prefix(path):
if STORAGE_TYPE.lower() == 'local':
return 'file://' + path
elif STORAGE_TYPE.lower() == 'google_storage':
return 'gs://' + GOOGLE_STORAGE_BUCKET + path
else:
raise ValidationError(
'Couldn\'t recognize value for setting STORAGE_TYPE="%s"'\
% STORAGE_TYPE)
STORAGE_ROOT = os.path.expanduser(os.getenv('LOOM_STORAGE_ROOT', '~/loomdata'))
INTERNAL_STORAGE_ROOT = os.path.expanduser(
os.getenv('LOOM_INTERNAL_STORAGE_ROOT', STORAGE_ROOT))
STORAGE_ROOT_WITH_PREFIX =_add_url_prefix(STORAGE_ROOT)
INTERNAL_STORAGE_ROOT_WITH_PREFIX =_add_url_prefix(INTERNAL_STORAGE_ROOT)
DISABLE_DELETE = to_boolean(os.getenv('LOOM_DISABLE_DELETE', 'False'))
FORCE_RERUN = to_boolean(os.getenv('LOOM_FORCE_RERUN', 'False'))
TASKRUNNER_HEARTBEAT_INTERVAL_SECONDS = float(os.getenv('LOOM_TASKRUNNER_HEARTBEAT_INTERVAL_SECONDS', '60'))
TASKRUNNER_HEARTBEAT_TIMEOUT_SECONDS = float(os.getenv('LOOM_TASKRUNNER_HEARTBEAT_TIMEOUT_SECONDS', TASKRUNNER_HEARTBEAT_INTERVAL_SECONDS*2.5))
SYSTEM_CHECK_INTERVAL_MINUTES = float(os.getenv('LOOM_SYSTEM_CHECK_INTERVAL_MINUTES', '15'))
PRESERVE_ON_FAILURE = to_boolean(os.getenv('LOOM_PRESERVE_ON_FAILURE', 'False'))
PRESERVE_ALL = to_boolean(os.getenv('LOOM_PRESERVE_ALL', 'False'))
TASK_TIMEOUT_HOURS = float(os.getenv(
'LOOM_TASK_TIMEOUT_HOURS', '24.0'))
MAXIMUM_RETRIES_FOR_ANALYSIS_FAILURE = int(os.getenv(
'LOOM_MAXIMUM_TASK_RETRIES_FOR_ANALYSIS_FAILURE', '1'))
MAXIMUM_RETRIES_FOR_SYSTEM_FAILURE = int(os.getenv(
'LOOM_MAXIMUM_TASK_RETRIES_FOR_SYSTEM_FAILURE', '10'))
MAXIMUM_RETRIES_FOR_TIMEOUT_FAILURE = int(os.getenv(
'LOOM_MAXIMUM_TASK_RETRIES_FOR_TIMEOUT_FAILURE', '0'))
MAXIMUM_TREE_DEPTH = int(os.getenv('LOOM_MAXIMUM_TREE_DEPTH', '10'))
DEFAULT_DOCKER_REGISTRY = os.getenv('LOOM_DEFAULT_DOCKER_REGISTRY', '')
# Database settings
# Any defaults must match defaults in playbook
MYSQL_HOST = os.getenv('LOOM_MYSQL_HOST')
MYSQL_USER = os.getenv('LOOM_MYSQL_USER', 'loom')
MYSQL_PASSWORD = os.getenv('LOOM_MYSQL_PASSWORD', 'loompass')
MYSQL_DATABASE = os.getenv('LOOM_MYSQL_DATABASE', 'loomdb')
MYSQL_PORT = int(os.getenv('LOOM_MYSQL_PORT', 3306))
MYSQL_SSL_CA_CERT_PATH = os.getenv('LOOM_MYSQL_SSL_CA_CERT_PATH')
MYSQL_SSL_CLIENT_CERT_PATH = os.getenv('LOOM_MYSQL_SSL_CLIENT_CERT_PATH')
MYSQL_SSL_CLIENT_KEY_PATH = os.getenv('LOOM_MYSQL_SSL_CLIENT_KEY_PATH')
# Email settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.getenv('LOOM_EMAIL_HOST', None)
EMAIL_PORT = to_int(os.getenv('LOOM_EMAIL_PORT', 587))
EMAIL_HOST_USER = os.getenv('LOOM_EMAIL_HOST_USER', None)
EMAIL_HOST_PASSWORD = os.getenv('LOOM_EMAIL_HOST_PASSWORD', None)
EMAIL_USE_TLS = to_boolean(os.getenv('LOOM_EMAIL_USE_TLS', True))
EMAIL_USE_SSL = to_boolean(os.getenv('LOOM_EMAIL_USE_SSL', True))
EMAIL_TIMEOUT = to_float(os.getenv('LOOM_EMAIL_TIMEOUT', 0.0))
EMAIL_SSL_KEYFILE = os.getenv('LOOM_EMAIL_SSL_KEYFILE', None)
EMAIL_SSL_CERTFILE = os.getenv('LOOM_EMAIL_SSL_CERTFILE', None)
DEFAULT_FROM_EMAIL = os.getenv('LOOM_DEFAULT_FROM_EMAIL', EMAIL_HOST_USER)
NOTIFICATION_ADDRESSES = to_list(os.getenv('LOOM_NOTIFICATION_ADDRESSES', '[]'))
NOTIFICATION_HTTPS_VERIFY_CERTIFICATE = to_boolean(os.getenv('LOOM_NOTIFICATION_HTTPS_VERIFY_CERTIFICATE', True))
# Message broker settings
LOOM_RABBITMQ_PASSWORD = os.getenv('LOOM_RABBITMQ_PASSWORD', 'guest')
LOOM_RABBITMQ_USER = os.getenv('LOOM_RABBITMQ_USER', 'guest')
LOOM_RABBITMQ_VHOST = os.getenv('LOOM_RABBITMQ_VHOST', '/')
LOOM_RABBITMQ_HOST = os.getenv('LOOM_RABBITMQ_HOST', 'rabbitmq')
LOOM_RABBITMQ_PORT = os.getenv('LOOM_RABBITMQ_PORT', '5672')
def _get_ansible_inventory():
ansible_inventory = os.getenv('LOOM_ANSIBLE_INVENTORY', 'localhost,')
if ',' not in ansible_inventory:
ansible_inventory = os.path.join(
PLAYBOOK_PATH,
os.getenv('LOOM_ANSIBLE_INVENTORY'))
return ansible_inventory
ANSIBLE_INVENTORY = _get_ansible_inventory()
LOOM_SSH_PRIVATE_KEY_PATH = os.getenv('LOOM_SSH_PRIVATE_KEY_PATH')
# For testing only
TEST_DISABLE_ASYNC_DELAY = to_boolean(os.getenv('TEST_DISABLE_ASYNC_DELAY', False))
TEST_NO_CREATE_TASK = to_boolean(os.getenv('TEST_NO_CREATE_TASK', False))
TEST_NO_RUN_TASK_ATTEMPT = to_boolean(os.getenv('TEST_NO_RUN_TASK_ATTEMPT', False))
TEST_NO_TASK_ATTEMPT_CLEANUP = to_boolean(os.getenv(
'TEST_NO_TASK_ATTEMPT_CLEANUP', False))
TEST_NO_PUSH_INPUTS= to_boolean(os.getenv('TEST_NO_PUSH_INPUTS', False))
# Fixed settings
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_TZ = True
CELERY_ALWAYS_EAGER = True
APPEND_SLASH = True
ROOT_URLCONF = 'loomengine_server.core.urls'
# Celery
CELERY_RESULT_BACKEND = 'django-cache'
CELERY_BROKER_URL = 'amqp://%s:%s@%s:%s/%s' \
% (LOOM_RABBITMQ_USER, LOOM_RABBITMQ_PASSWORD,
LOOM_RABBITMQ_HOST, LOOM_RABBITMQ_PORT,
LOOM_RABBITMQ_VHOST)
CELERY_BROKER_POOL_LIMIT = 50
CELERYD_TASK_SOFT_TIME_LIMIT = 60
LOGIN_REDIRECT_URL = '/'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django_extensions',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'django_celery_results',
'api',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if LOGIN_REQUIRED:
drf_permission_classes = ('rest_framework.permissions.IsAuthenticated',)
else:
drf_permission_classes = ('rest_framework.permissions.AllowAny',)
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': drf_permission_classes,
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'loomengine_server.core.wsgi.application'
def _get_sqlite_databases():
return {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'loomdb.sqlite3'),
}
}
def _get_mysql_databases():
if not MYSQL_USER:
raise Exception(
"LOOM_MYSQL_USER is a required setting if LOOM_MYSQL_HOST is set")
if not MYSQL_DATABASE:
raise Exception(
"LOOM_MYSQL_DATABASE is a required setting if LOOM_MYSQL_HOST is set")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': MYSQL_HOST,
'NAME': MYSQL_DATABASE,
'USER': MYSQL_USER,
'PORT': MYSQL_PORT,
}
}
if MYSQL_PASSWORD:
DATABASES['default'].update({
'PASSWORD': MYSQL_PASSWORD
})
if MYSQL_SSL_CA_CERT_PATH \
or MYSQL_SSL_CLIENT_CERT_PATH \
or MYSQL_SSL_CLIENT_KEY_PATH:
if not (MYSQL_SSL_CA_CERT_PATH \
and MYSQL_SSL_CLIENT_CERT_PATH \
and MYSQL_SSL_CLIENT_KEY_PATH):
raise Exception(
'One or more required values missing: '\
'LOOM_MYSQL_SSL_CA_CERT_PATH="%s", '\
'LOOM_MYSQL_SSL_CLIENT_CERT_PATH="%s", '\
'LOOM_MYSQL_SSL_CLIENT_KEY_PATH="%s"' % (
MYSQL_SSL_CA_CERT_PATH,
MYSQL_SSL_CLIENT_CERT_PATH,
MYSQL_SSL_CLIENT_KEY_PATH))
else:
DATABASES['default'].update({
'OPTIONS': {
'ssl': {
'ca': MYSQL_SSL_CA_CERT_PATH,
'cert': MYSQL_SSL_CLIENT_CERT_PATH,
'key': MYSQL_SSL_CLIENT_KEY_PATH
}
}
})
return DATABASES
# Database
if MYSQL_HOST:
DATABASES = _get_mysql_databases()
else:
DATABASES = _get_sqlite_databases()
# Logging
if len(sys.argv) > 1 and sys.argv[1] == 'test':
DISABLE_LOGGING = True
else:
DISABLE_LOGGING = False
if DISABLE_LOGGING:
LOGGING = {}
else:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(levelname)s [%(asctime)s] %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
}
},
'loggers': {
'django': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'loomengine': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'api': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
},
}
STATIC_URL = '/%s/' % os.path.basename(STATIC_ROOT)
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# This is needed for nginx reverse proxy to work
INTERNAL_IPS = ["127.0.0.1",]
if DEBUG or (len(sys.argv) > 1 and sys.argv[1] == 'collectstatic'):
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE_CLASSES.append('debug_toolbar.middleware.DebugToolbarMiddleware')
def custom_show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
'MEDIA_URL': '/__debug__/m/',
'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': '232871b2',
'TIMEOUT': 0,
}
}
| agpl-3.0 |
senuido/stash-scanner | lib/ModsHelper.py | 1 | 3577 | import json
import re
from lib.ModFilter import ModFilterType
from lib.ModFilterGroup import PSEUDO_MODS
from lib.Utility import AppException
class ModsHelper:
MODS_FNAME = 'res\\mods.json'
MOD_TEXT_REGEX = re.compile('\(([^()]+)\)\s+(.*)')
def __init__(self):
self.mod_list = None
self.mod_set = None
def init(self):
try:
self.load()
except Exception as e:
raise AppException('Failed to load item mods information.\n{}'.format(e))
def load(self):
mod_set = set()
mod_list = []
cat_ordered = ['[pseudo] mods', '[total] mods', 'explicit', 'crafted', 'implicit', 'enchantments',
'unique explicit', 'map mods', 'prophecies', 'leaguestone']
cat_ignore = []
with open(self.MODS_FNAME) as f:
data = json.load(f)
for cat in cat_ordered:
if cat in cat_ignore:
continue
cat_mods = []
for mod in data['mods'][cat]:
mod_type, text = self.textToMod(mod)
if mod_type == ModFilterType.Pseudo and text not in PSEUDO_MODS:
# convert mod to a non-psuedo if it has another tag
inner_tag, inner_text = self._getTagText(text)
if inner_tag is None:
continue
mod = text
cat_mods.append(mod)
for mod in sorted(cat_mods):
mod_set.add(mod)
if len(mod_set) > len(mod_list):
mod_list.append(mod)
self.mod_list = mod_list
self.mod_set = mod_set
def modToText(self, mod_type, expr):
if mod_type == ModFilterType.Pseudo:
pat = expr
else:
pat = expr.replace('([0-9]+)', '#')
pat = pat.replace('\+', '+') # un-escape characters
if pat.endswith('$'):
pat = pat[:-1]
if mod_type == ModFilterType.Explicit:
return pat
return '({}) {}'.format(mod_type.value, pat)
def isCustom(self, mod_type, expr):
return self.modToText(mod_type, expr) not in self.mod_set
def isPredefined(self, mod_text):
return mod_text in self.mod_set
def textToMod(self, mod_text):
tag, text = self._getTagText(mod_text)
if tag is None:
mod_type = ModFilterType.Explicit
else:
mod_type = ModFilterType(tag)
expr = text
if expr and mod_type != ModFilterType.Pseudo:
expr = expr.replace('+', '\+') # escape characters
expr = expr.replace('#', '([0-9]+)') + '$'
return mod_type, expr
def _getTagText(self, text):
match = self.MOD_TEXT_REGEX.match(text)
if match:
return match.groups()
return None, text
def stripTags(self, mod_text):
while True:
tag, mod_text = self._getTagText(mod_text)
if tag is None:
return mod_text
def modToParam(self, mod_type, expr):
text = self.modToText(mod_type, expr)
# prevents custom mod conversion
# while searching for sortable mods works, for most cases, custom mods will break the search
if not self.isPredefined(text):
raise ValueError('Cannot convert custom mod {} to param.'.format(text))
if mod_type == ModFilterType.Total:
text = '({}) {}'.format(ModFilterType.Pseudo.name.lower(), text)
return text
mod_helper = ModsHelper() | gpl-3.0 |
jashworth-isb/cmonkey-python | test/util_test.py | 1 | 11520 | """util_test.py - test classes for util module
This file is part of cMonkey Python. Please see README and LICENSE for
more information and licensing details.
"""
import unittest
import util
import operator
import numpy as np
class DelimitedFileTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for DelimitedFile"""
def test_read_with_tabs(self):
"""Reads a tab delimited file"""
dfile = util.DelimitedFile.read("testdata/simple.tsv")
lines = dfile.lines()
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
self.assertIsNone(dfile.header())
def test_read_with_tabs_and_header(self):
"""Reads a tab delimited file with a header"""
dfile = util.DelimitedFile.read("testdata/simple.tsv", has_header=True)
lines = dfile.lines()
self.assertEquals(1, len(lines))
self.assertEquals(["value11", "value12"], dfile.header())
def test_read_with_semicolon_header_and_comments(self):
"""Reads a semicolon delimited file with a header and comments"""
dfile = util.DelimitedFile.read("testdata/withcomments.ssv", sep=';',
has_header=True, comment='#')
lines = dfile.lines()
self.assertEquals(2, len(lines))
self.assertEquals(["header1", "header2"], dfile.header())
def test_read_with_quotes(self):
"""Reads a semicolon delimited file with quotes"""
dfile = util.DelimitedFile.read("testdata/withquotes.ssv", sep=';',
has_header=False, comment='#', quote='"')
lines = dfile.lines()
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
def test_read_with_empty_lines(self):
"""Reads a semicolon delimited file containing emptylines"""
dfile = util.DelimitedFile.read("testdata/withemptylines.ssv", sep=';',
has_header=True, comment='#', quote='"')
lines = dfile.lines()
self.assertEquals(["header1", "header2"], dfile.header())
self.assertEquals(2, len(lines))
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
def test_create_from_text(self):
"""Reads a tab delimited file from a text"""
dfile = util.DelimitedFile.create_from_text(
"value11\tvalue12\nvalue21\tvalue22")
lines = dfile.lines()
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
self.assertIsNone(dfile.header())
def test_create_from_text_empty_line_at_end(self):
"""Reads a tab delimited file from a text"""
dfile = util.DelimitedFile.create_from_text(
"value11\tvalue12\nvalue21\tvalue22\n")
lines = dfile.lines()
self.assertEquals(2, len(lines))
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
self.assertIsNone(dfile.header())
class LevenshteinDistanceTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for levenshtein_distance"""
def test_kitten_sitting(self):
"""compare kitten with sitting"""
self.assertEquals(3, util.levenshtein_distance('sitting', 'kitten'))
def test_saturday_sunday(self):
"""compare Saturday with Sunday"""
self.assertEquals(3, util.levenshtein_distance('Sunday', 'Saturday'))
RSAT_LIST_FILE_PATH = "testdata/RSAT_genomes_listing.txt"
class BestMatchingLinksTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for best_matching_links"""
def test_best_rsat_matches(self):
"""test the best_matching_links function"""
with open(RSAT_LIST_FILE_PATH) as inputfile:
html = inputfile.read()
matches = util.best_matching_links('Halobacterium', html)
self.assertEquals("Halobacterium_sp/", matches[0])
class UtilsTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for utility functions"""
def test_quantile(self):
"""tests the quantile function"""
data = [1, 2, 3, 4, 5]
self.assertEquals(1, util.quantile(data, 0))
self.assertEquals(1.8, util.quantile(data, 0.2))
self.assertEquals(2, util.quantile(data, 0.25))
self.assertEquals(3, util.quantile(data, 0.5))
self.assertEquals(4, util.quantile(data, 0.75))
self.assertEquals(5, util.quantile(data, 1))
def test_r_stddev(self):
"""tests the standard deviation function"""
self.assertEquals(0.1, util.r_stddev([0.1, 0.2, 0.3]))
def test_r_stddev_with_nan(self):
"""tests the standard deviation function"""
self.assertEquals(0.1, util.r_stddev([0.1, 0.2, 0.3, np.nan]))
def test_r_variance_columns(self):
"""tests the column variance function"""
matrix = [[0.0010, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, 0.00312321]]
result = util.r_variance_columns(matrix)
self.assertAlmostEqual(0.1157139233, result[0])
self.assertAlmostEqual(0.1482354433, result[1])
self.assertAlmostEqual(0.8356519353, result[2])
self.assertAlmostEqual(0.0007737516, result[3])
def test_r_variance_columns_with_nans(self):
"""tests the column variance function"""
matrix = [[np.nan, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, np.nan]]
result = util.r_variance_columns(matrix)
self.assertAlmostEqual(0.1661836837, result[0])
self.assertAlmostEqual(0.1482354433, result[1])
self.assertAlmostEqual(0.8356519353, result[2])
self.assertAlmostEqual(0.0011550937, result[3])
def test_column_means(self):
"""tests the column_means() function"""
matrix = [[0.0010, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, 0.00312321]]
result = util.column_means(matrix)
self.assertAlmostEqual(-0.08003333, result[0])
self.assertAlmostEqual(0.15483333, result[1])
self.assertAlmostEqual(0.00171, result[2])
self.assertAlmostEqual(0.00534107, result[3])
def test_column_means_with_nans(self):
"""tests the column_means() function, containing NaNs"""
matrix = [[0.0010, 0.1234, 0.21370, np.nan],
[0.2123, np.nan, -0.99980, -0.0213],
[np.nan, 0.5546, 0.79123, 0.00312321]]
result = util.column_means(matrix)
self.assertAlmostEqual(0.10664999, result[0])
self.assertAlmostEqual(0.33899999, result[1])
self.assertAlmostEqual(0.00171, result[2])
self.assertAlmostEqual(-0.00908839499, result[3])
def test_row_means(self):
"""tests the row_means() function"""
matrix = [[0.0010, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, 0.00312321]]
result = util.row_means(matrix)
self.assertAlmostEqual(0.0930750, result[0])
self.assertAlmostEqual(-0.255575, result[1])
self.assertAlmostEqual(0.2238883025, result[2])
def test_row_means_with_nans(self):
"""tests the row_means() function"""
matrix = [[0.0010, np.nan, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, np.nan]]
result = util.row_means(matrix)
self.assertAlmostEqual(0.08296666, result[0])
self.assertAlmostEqual(-0.255575, result[1])
self.assertAlmostEqual(0.297476666, result[2])
def test_trim_mean_nonmedian(self):
self.assertAlmostEqual(
40.625,
util.trim_mean([2, 4, 6, 7, 11, 21, 81, 90, 105, 121], 0.1))
def test_trim_mean_median(self):
self.assertAlmostEqual(3.5, util.trim_mean([.1, .2, 3, 4, 5, 6], 0.5))
def test_trim_mean_no_values(self):
self.assertEqual(0, util.trim_mean([], 0.05))
def test_trim_mean_real(self):
values = [0.0, 0.0, -8.7520618359684352, -8.7520618359684352, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.assertAlmostEqual(-1.4586770, util.trim_mean(values, 0.05))
def test_mean_with_nans(self):
"""tests the mean() function"""
array = np.array([2.0, 3.0, np.nan, 1.0])
result = util.mean(array)
self.assertAlmostEqual(2.0, result)
def test_density(self):
kvalues = [3.4268700450682301, 3.3655160468930152, -8.0654569044842539,
2.0762815314005487, 4.8537715329554203, 1.2374476248622075]
cluster_values = [-3.5923001345962162, 0.77069901513184735,
-4.942909785931378, -3.1580950032999096]
bandwidth = 2.69474878768
dmin = -13.8848342423
dmax = 12.6744452247
result = util.density(kvalues, cluster_values, bandwidth, dmin, dmax)
self.assertAlmostEquals(0.08663036966690765, result[0])
self.assertAlmostEquals(0.08809242907902183, result[1])
self.assertAlmostEquals(0.49712338305039777, result[2])
self.assertAlmostEquals(0.12248549621579163, result[3])
self.assertAlmostEquals(0.05708884005243133, result[4])
self.assertAlmostEquals(0.14857948193544993, result[5])
def test_sd_rnorm(self):
result = util.sd_rnorm([1.3, 1.6, 1.2, 1.05], 9, 0.748951)
# the results are fairly random, make sure we have the right
# number of values
self.assertEquals(9, len(result))
def test_max_row_var(self):
"""tests maximum row variance function"""
matrix = [[1, 5, 9, 13],
[2, 6, 10, 14],
[3, 7, 11, 15],
[4, 8, 12, 16]]
result = util.max_row_var(matrix)
self.assertAlmostEqual(26.666666666666664, result)
def test_max_row_var_with_nans(self):
"""tests maximum row variance with NaNs"""
matrix = [[1, np.nan, 9],
[np.nan, 6, 10],
[3, 7, np.nan],
[4, 8, 12]]
result = util.max_row_var(matrix)
self.assertAlmostEqual(16.0, result)
def test_r_outer(self):
"""tests the r_outer function"""
result = util.r_outer([5.5, 6.5], [4.5, 7.5], operator.add)
self.assertAlmostEqual(10.0, result[0][0])
self.assertAlmostEqual(13.0, result[0][1])
self.assertAlmostEqual(11.0, result[1][0])
self.assertAlmostEqual(14.0, result[1][1])
class Order2StringTest(unittest.TestCase): # pylint: disable-msg=R09042
"""Test class for order2string"""
def test_order2string(self):
self.assertEquals("1st", util.order2string(1))
self.assertEquals("2nd", util.order2string(2))
self.assertEquals("3rd", util.order2string(3))
self.assertEquals("4th", util.order2string(4))
self.assertEquals("11th", util.order2string(11))
self.assertEquals("12th", util.order2string(12))
self.assertEquals("21st", util.order2string(21))
self.assertEquals("22nd", util.order2string(22))
self.assertEquals("23rd", util.order2string(23))
| lgpl-3.0 |
Srisai85/scipy | scipy/stats/tests/test_contingency.py | 126 | 5959 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (run_module_suite, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_approx_equal, assert_raises,
assert_allclose)
from scipy.special import xlogy
from scipy.stats.contingency import margins, expected_freq, chi2_contingency
def test_margins():
a = np.array([1])
m = margins(a)
assert_equal(len(m), 1)
m0 = m[0]
assert_array_equal(m0, np.array([1]))
a = np.array([[1]])
m0, m1 = margins(a)
expected0 = np.array([[1]])
expected1 = np.array([[1]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(12).reshape(2, 6)
m0, m1 = margins(a)
expected0 = np.array([[15], [51]])
expected1 = np.array([[6, 8, 10, 12, 14, 16]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(24).reshape(2, 3, 4)
m0, m1, m2 = margins(a)
expected0 = np.array([[[66]], [[210]]])
expected1 = np.array([[[60], [92], [124]]])
expected2 = np.array([[[60, 66, 72, 78]]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
assert_array_equal(m2, expected2)
def test_expected_freq():
assert_array_equal(expected_freq([1]), np.array([1.0]))
observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
e = expected_freq(observed)
assert_array_equal(e, np.ones_like(observed))
observed = np.array([[10, 10, 20], [20, 20, 20]])
e = expected_freq(observed)
correct = np.array([[12., 12., 16.], [18., 18., 24.]])
assert_array_almost_equal(e, correct)
def test_chi2_contingency_trivial():
# Some very simple tests for chi2_contingency.
# A trivial case
obs = np.array([[1, 2], [1, 2]])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 1)
assert_array_equal(obs, expected)
# A *really* trivial case: 1-D data.
obs = np.array([1, 2, 3])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 0)
assert_array_equal(obs, expected)
def test_chi2_contingency_R():
# Some test cases that were computed independently, using R.
Rcode = \
"""
# Data vector.
data <- c(
12, 34, 23, 4, 47, 11,
35, 31, 11, 34, 10, 18,
12, 32, 9, 18, 13, 19,
12, 12, 14, 9, 33, 25
)
# Create factor tags:r=rows, c=columns, t=tiers
r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
# 3-way Chi squared test of independence
s = summary(xtabs(data~r+c+t))
print(s)
"""
Routput = \
"""
Call: xtabs(formula = data ~ r + c + t)
Number of cases in table: 478
Number of factors: 3
Test for independence of all factors:
Chisq = 102.17, df = 17, p-value = 3.514e-14
"""
obs = np.array(
[[[12, 34, 23],
[35, 31, 11],
[12, 32, 9],
[12, 12, 14]],
[[4, 47, 11],
[34, 10, 18],
[18, 13, 19],
[9, 33, 25]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 102.17, significant=5)
assert_approx_equal(p, 3.514e-14, significant=4)
assert_equal(dof, 17)
Rcode = \
"""
# Data vector.
data <- c(
#
12, 17,
11, 16,
#
11, 12,
15, 16,
#
23, 15,
30, 22,
#
14, 17,
15, 16
)
# Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
# 4-way Chi squared test of independence
s = summary(xtabs(data~r+c+d+t))
print(s)
"""
Routput = \
"""
Call: xtabs(formula = data ~ r + c + d + t)
Number of cases in table: 262
Number of factors: 4
Test for independence of all factors:
Chisq = 8.758, df = 11, p-value = 0.6442
"""
obs = np.array(
[[[[12, 17],
[11, 16]],
[[11, 12],
[15, 16]]],
[[[23, 15],
[30, 22]],
[[14, 17],
[15, 16]]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 8.758, significant=4)
assert_approx_equal(p, 0.6442, significant=4)
assert_equal(dof, 11)
def test_chi2_contingency_g():
c = np.array([[15, 60], [15, 90]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=False)
assert_allclose(g, 2*xlogy(c, c/e).sum())
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=True)
c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
c = np.array([[10, 12, 10], [12, 10, 10]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
assert_allclose(g, 2*xlogy(c, c/e).sum())
def test_chi2_contingency_bad_args():
# Test that "bad" inputs raise a ValueError.
# Negative value in the array of observed frequencies.
obs = np.array([[-1, 10], [1, 2]])
assert_raises(ValueError, chi2_contingency, obs)
# The zeros in this will result in zeros in the array
# of expected frequencies.
obs = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, chi2_contingency, obs)
# A degenerate case: `observed` has size 0.
obs = np.empty((0, 8))
assert_raises(ValueError, chi2_contingency, obs)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Lkhagvadelger/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py | 122 | 10361 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.net.layouttestresults import LayoutTestResults
class UnableToApplyPatch(Exception):
def __init__(self, patch):
Exception.__init__(self)
self.patch = patch
class PatchAnalysisTaskDelegate(object):
def parent_command(self):
raise NotImplementedError("subclasses must implement")
def run_command(self, command):
raise NotImplementedError("subclasses must implement")
def command_passed(self, message, patch):
raise NotImplementedError("subclasses must implement")
def command_failed(self, message, script_error, patch):
raise NotImplementedError("subclasses must implement")
def refetch_patch(self, patch):
raise NotImplementedError("subclasses must implement")
def expected_failures(self):
raise NotImplementedError("subclasses must implement")
def test_results(self):
raise NotImplementedError("subclasses must implement")
def archive_last_test_results(self, patch):
raise NotImplementedError("subclasses must implement")
def build_style(self):
raise NotImplementedError("subclasses must implement")
# We could make results_archive optional, but for now it's required.
def report_flaky_tests(self, patch, flaky_tests, results_archive):
raise NotImplementedError("subclasses must implement")
class PatchAnalysisTask(object):
def __init__(self, delegate, patch):
self._delegate = delegate
self._patch = patch
self._script_error = None
self._results_archive_from_patch_test_run = None
self._results_from_patch_test_run = None
self._expected_failures = delegate.expected_failures()
def _run_command(self, command, success_message, failure_message):
try:
self._delegate.run_command(command)
self._delegate.command_passed(success_message, patch=self._patch)
return True
except ScriptError, e:
self._script_error = e
self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
return False
def _clean(self):
return self._run_command([
"clean",
],
"Cleaned working directory",
"Unable to clean working directory")
def _update(self):
# FIXME: Ideally the status server log message should include which revision we updated to.
return self._run_command([
"update",
],
"Updated working directory",
"Unable to update working directory")
def _apply(self):
return self._run_command([
"apply-attachment",
"--no-update",
"--non-interactive",
self._patch.id(),
],
"Applied patch",
"Patch does not apply")
def _build(self):
return self._run_command([
"build",
"--no-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
],
"Built patch",
"Patch does not build")
def _build_without_patch(self):
return self._run_command([
"build",
"--force-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
],
"Able to build without patch",
"Unable to build without patch")
def _test(self):
return self._run_command([
"build-and-test",
"--no-clean",
"--no-update",
# Notice that we don't pass --build, which means we won't build!
"--test",
"--non-interactive",
],
"Passed tests",
"Patch does not pass tests")
def _build_and_test_without_patch(self):
return self._run_command([
"build-and-test",
"--force-clean",
"--no-update",
"--build",
"--test",
"--non-interactive",
],
"Able to pass tests without patch",
"Unable to pass tests without patch (tree is red?)")
def _land(self):
# Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should.
return self._run_command([
"land-attachment",
"--force-clean",
"--non-interactive",
"--parent-command=" + self._delegate.parent_command(),
self._patch.id(),
],
"Landed patch",
"Unable to land patch")
def _report_flaky_tests(self, flaky_test_results, results_archive):
self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
def _results_failed_different_tests(self, first, second):
first_failing_tests = [] if not first else first.failing_tests()
second_failing_tests = [] if not second else second.failing_tests()
return first_failing_tests != second_failing_tests
def _test_patch(self):
if self._test():
return True
# Note: archive_last_test_results deletes the results directory, making these calls order-sensitve.
# We could remove this dependency by building the test_results from the archive.
first_results = self._delegate.test_results()
first_results_archive = self._delegate.archive_last_test_results(self._patch)
first_script_error = self._script_error
first_failure_status_id = self.failure_status_id
if self._expected_failures.failures_were_expected(first_results):
return True
if self._test():
# Only report flaky tests if we were successful at parsing results.json and archiving results.
if first_results and first_results_archive:
self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
return True
second_results = self._delegate.test_results()
if self._results_failed_different_tests(first_results, second_results):
# We could report flaky tests here, but we would need to be careful
# to use similar checks to ExpectedFailures._can_trust_results
# to make sure we don't report constant failures as flakes when
# we happen to hit the --exit-after-N-failures limit.
# See https://bugs.webkit.org/show_bug.cgi?id=51272
return False
# Archive (and remove) second results so test_results() after
# build_and_test_without_patch won't use second results instead of the clean-tree results.
second_results_archive = self._delegate.archive_last_test_results(self._patch)
if self._build_and_test_without_patch():
# The error from the previous ._test() run is real, report it.
return self.report_failure(first_results_archive, first_results, first_script_error)
clean_tree_results = self._delegate.test_results()
self._expected_failures.update(clean_tree_results)
# Re-check if the original results are now to be expected to avoid a full re-try.
if self._expected_failures.failures_were_expected(first_results):
return True
# Now that we have updated information about failing tests with a clean checkout, we can
# tell if our original failures were unexpected and fail the patch if necessary.
if self._expected_failures.unexpected_failures_observed(first_results):
self.failure_status_id = first_failure_status_id
return self.report_failure(first_results_archive, first_results, first_script_error)
# We don't know what's going on. The tree is likely very red (beyond our layout-test-results
# failure limit), just keep retrying the patch. until someone fixes the tree.
return False
def results_archive_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_archive_from_patch_test_run
def results_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_from_patch_test_run
def report_failure(self, results_archive=None, results=None, script_error=None):
if not self.validate():
return False
self._results_archive_from_patch_test_run = results_archive
self._results_from_patch_test_run = results
raise script_error or self._script_error
def validate(self):
raise NotImplementedError("subclasses must implement")
def run(self):
raise NotImplementedError("subclasses must implement")
| bsd-3-clause |
baxter-cs/BaxterEPCSWeek1 | studentInfo.py | 1 | 1635 | def main():
students = [
Student("Larsson", 37),
Student("BonJovi", 55),
]
printHeader()
selection = getUserSelection()
if selection == 0:
printStudentsByAge(students)
elif selection == 1:
pass
elif selection == 2:
pass
else:
print "SELECTION NOT RECOGNIZED"
class Student:
def __init__(self, lastName, age):
self.lastName = lastName
self.age = age
self.firstName = "JOHN"
def assignRandomName(self):
pass
def assignRandomAge(self):
self.age = random.randint(0,100)
def assignRandomWeight(self, isMetric):
pass
def assignRandomHeight(self, isMetric):
pass
inputQuestions = [
"For STUDENTS BY AGE, type 0",
"For STUDENTS BY LAST NAME, type 1",
"For STUDENTS BY FIRST NAME, type 3",
"For SUM of STUDENT AGES type 4",
"For AVERAGE of STUDENT AGES type 5",
]
def getUserSelection():
print (inputQuestions[0])
print (inputQuestions[1])
print (inputQuestions[2])
return input("Type selection and press enter:")
def printHeader():
print("HEADER TEXT HERE")
def printStudentsByAge(students):
print ("----Students By Age-----")
sortStudents = sorted(students, key=lambda student: student.age)
for student in students:
print student.lastName + ", " + student.firstName + ", " + str(student.age)
def printStudentsByLName(students):
print ("----Students By -----")
def printStudentsByFName(students):
print ("----Students By -----")
def printSumAge(students):
print ("Answer:")
def printAvgAge(students):
print ("Answer:")
def ageRange(studentA, studentB):
return math.abs(studentA.age - studentB.age)
main() | mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.