code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Copyright (C) 2013 Johnny Vestergaard <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
import ast
import textwrap
from mixbox import idgen
from mixbox.namespaces import Namespace
from stix.core import STIXHeader, STIXPackage
from stix.common import InformationSource
from stix.common.vocabs import VocabString
from stix.incident import Incident
from stix.incident.time import Time as StixTime
from stix.indicator import Indicator
from stix.ttp import TTP, VictimTargeting
from stix.extensions.identity.ciq_identity_3_0 import CIQIdentity3_0Instance, STIXCIQIdentity3_0, OrganisationInfo
from cybox.core import Observable
from cybox.objects.socket_address_object import SocketAddress
from cybox.objects.address_object import Address
from cybox.objects.port_object import Port
from cybox.objects.network_connection_object import NetworkConnection
from cybox.objects.artifact_object import Artifact, ZlibCompression, Base64Encoding
from cybox.common import ToolInformationList, ToolInformation
from cybox.common import Time as CyboxTime
from datetime import datetime
import conpot
CONPOT_NAMESPACE = 'mushmush-conpot'
CONPOT_NAMESPACE_URL = 'http://mushmush.org/conpot'
class StixTransformer(object):
def __init__(self, config, dom):
self.protocol_to_port_mapping = dict(
modbus=502,
snmp=161,
http=80,
s7comm=102,
)
port_path_list = map(lambda x: '//conpot_template/protocols/'+x+'/@port', self.protocol_to_port_mapping.keys())
for port_path in port_path_list:
try:
protocol_port = ast.literal_eval(dom.xpath(port_path)[0])
protocol_name = port_path.rsplit("/", 2)[1]
self.protocol_to_port_mapping[protocol_name] = protocol_port
except IndexError:
continue
conpot_namespace = Namespace(CONPOT_NAMESPACE_URL, CONPOT_NAMESPACE, '')
idgen.set_id_namespace(conpot_namespace)
def _add_header(self, stix_package, title, desc):
stix_header = STIXHeader()
stix_header.title = title
stix_header.description = desc
stix_header.information_source = InformationSource()
stix_header.information_source.time = CyboxTime()
stix_header.information_source.time.produced_time = datetime.now()
stix_package.stix_header = stix_header
def transform(self, event):
stix_package = STIXPackage()
self._add_header(stix_package, "Unauthorized traffic to honeypot", "Describes one or more honeypot incidents")
incident = Incident(id_="%s:%s-%s" % (CONPOT_NAMESPACE, 'incident', event['session_id']))
initial_time = StixTime()
initial_time.initial_compromise = event['timestamp'].isoformat()
incident.time = initial_time
incident.title = "Conpot Event"
incident.short_description = "Traffic to Conpot ICS honeypot"
incident.add_category(VocabString(value='Scans/Probes/Attempted Access'))
tool_list = ToolInformationList()
tool_list.append(ToolInformation.from_dict({
'name': "Conpot",
'vendor': "Conpot Team",
'version': conpot.__version__,
'description': textwrap.dedent('Conpot is a low interactive server side Industrial Control Systems '
'honeypot designed to be easy to deploy, modify and extend.')
}))
incident.reporter = InformationSource(tools=tool_list)
incident.add_discovery_method("Monitoring Service")
incident.confidence = "High"
# Victim Targeting by Sector
ciq_identity = CIQIdentity3_0Instance()
#identity_spec = STIXCIQIdentity3_0()
#identity_spec.organisation_info = OrganisationInfo(industry_type="Electricity, Industrial Control Systems")
#ciq_identity.specification = identity_spec
ttp = TTP(title="Victim Targeting: Electricity Sector and Industrial Control System Sector")
ttp.victim_targeting = VictimTargeting()
ttp.victim_targeting.identity = ciq_identity
incident.leveraged_ttps.append(ttp)
indicator = Indicator(title="Conpot Event")
indicator.description = "Conpot network event"
indicator.confidence = "High"
source_port = Port.from_dict({'port_value': event['remote'][1], 'layer4_protocol': 'tcp'})
dest_port = Port.from_dict({'port_value': self.protocol_to_port_mapping[event['data_type']],
'layer4_protocol': 'tcp'})
source_ip = Address.from_dict({'address_value': event['remote'][0], 'category': Address.CAT_IPV4})
dest_ip = Address.from_dict({'address_value': event['public_ip'], 'category': Address.CAT_IPV4})
source_address = SocketAddress.from_dict({'ip_address': source_ip.to_dict(), 'port': source_port.to_dict()})
dest_address = SocketAddress.from_dict({'ip_address': dest_ip.to_dict(), 'port': dest_port.to_dict()})
network_connection = NetworkConnection.from_dict(
{'source_socket_address': source_address.to_dict(),
'destination_socket_address': dest_address.to_dict(),
'layer3_protocol': u"IPv4",
'layer4_protocol': u"TCP",
'layer7_protocol': event['data_type'],
'source_tcp_state': u"ESTABLISHED",
'destination_tcp_state': u"ESTABLISHED",
}
)
indicator.add_observable(Observable(network_connection))
artifact = Artifact()
artifact.data = json.dumps(event['data'])
artifact.packaging.append(ZlibCompression())
artifact.packaging.append(Base64Encoding())
indicator.add_observable(Observable(artifact))
incident.related_indicators.append(indicator)
stix_package.add_incident(incident)
stix_package_xml = stix_package.to_xml()
return stix_package_xml
| markes1977/conpot-master | conpot/core/loggers/stix_transform.py | Python | gpl-2.0 | 6,622 |
#!/usr/bin/env python
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master/wkpf'))
print os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master/wkpf')
from wkpf.pynvc import *
from wkpf.wkpfcomm import *
comm = getComm()
print "node ids", comm.getNodeIds()
comm.setFeature(2, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(2, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(2, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(2, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(2, "WuKong")
comm.setFeature(7, WKPF_FEATURE_LIGHT_SENSOR, 1)
comm.setFeature(7, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(7, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(7, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(7, "WuKong")
comm.setFeature(4, WKPF_FEATURE_LIGHT_SENSOR, 1)
comm.setFeature(4, WKPF_FEATURE_LIGHT_ACTUATOR, 0)
comm.setFeature(4, WKPF_FEATURE_NUMERIC_CONTROLLER, 1)
comm.setFeature(4, WKPF_FEATURE_NATIVE_THRESHOLD, 1)
comm.setLocation(4, "WuKong")
comm.setFeature(5, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(5, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(5, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(5, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(5, "WuKong")
comm.setFeature(6, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(6, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(6, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(6, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(6, "WuKong")
comm.setFeature(13, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(13, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(13, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(13, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(13, "WuKong")
comm.setFeature(14, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(14, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(14, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(14, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(14, "WuKong")
comm.setFeature(15, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(15, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(15, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(15, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(15, "WuKong")
comm.setFeature(10, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(10, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(10, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(10, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(10, "WuKong")
comm.setFeature(12, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(12, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(12, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(12, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(12, "WuKong")
| wukong-m2m/NanoKong | tools/python/scripts/installer.py | Python | gpl-2.0 | 2,839 |
import os
import sys
print help(sys)
print help(os) | pybursa/homeworks | e_tverdokhleboff/hw3/3.4.py | Python | gpl-2.0 | 52 |
#!/usr/bin/env python
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import bs4 as BeautifulSoup
import logging
from thug.DOM.W3C.Element import Element
from thug.DOM.W3C.Style.CSS.ElementCSSInlineStyle import ElementCSSInlineStyle
from .attr_property import attr_property
log = logging.getLogger("Thug")
class HTMLElement(Element, ElementCSSInlineStyle):
id = attr_property("id")
title = attr_property("title")
lang = attr_property("lang")
dir = attr_property("dir")
className = attr_property("class", default = "")
def __init__(self, doc, tag):
Element.__init__(self, doc, tag)
ElementCSSInlineStyle.__init__(self, doc, tag)
def getInnerHTML(self):
if not self.hasChildNodes():
return ""
html = StringIO()
for tag in self.tag.contents:
html.write(unicode(tag))
return html.getvalue()
def setInnerHTML(self, html):
self.tag.clear()
soup = BeautifulSoup.BeautifulSoup(html, "html5lib")
for node in list(soup.head.descendants):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
for node in list(soup.body.children):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
# soup.head.unwrap()
# soup.body.unwrap()
# soup.html.wrap(self.tag)
# self.tag.html.unwrap()
for node in self.tag.descendants:
name = getattr(node, 'name', None)
if not name:
continue
p = getattr(self.doc.window.doc.DFT, 'handle_%s' % (name, ), None)
if p is None:
p = getattr(log.DFT, 'handle_%s' % (name, ), None)
if p:
p(node)
innerHTML = property(getInnerHTML, setInnerHTML)
# WARNING: NOT DEFINED IN W3C SPECS!
def focus(self):
pass
@property
def sourceIndex(self):
return None
| tweemeterjop/thug | thug/DOM/W3C/HTML/HTMLElement.py | Python | gpl-2.0 | 2,479 |
import sys
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Drawing Lines")
screen.fill((0, 80, 0))
# draw the line
color = 100, 255, 200
width = 8
pygame.draw.line(screen, color, (100, 100), (500, 400), width)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type in (QUIT, KEYDOWN):
sys.exit() | PoisonBOx/PyGames | 2.Pie/drawLine.py | Python | gpl-2.0 | 453 |
"""
Function-like objects that creates cubic clusters.
"""
import numpy as np
from ase.data import reference_states as _refstate
from ase.cluster.factory import ClusterFactory
class SimpleCubicFactory(ClusterFactory):
spacegroup = 221
xtal_name = 'sc'
def get_lattice_constant(self):
"Get the lattice constant of an element with cubic crystal structure."
symmetry = _refstate[self.atomic_numbers[0]]['symmetry']
if symmetry != self.xtal_name:
raise ValueError, ("Cannot guess the %s " % (self.xtal_name,) +
"lattice constant of an element with crystal " +
"structure %s." % (symmetry,))
return _refstate[self.atomic_numbers[0]]['a']
def set_basis(self):
a = self.lattice_constant
if not isinstance(a, (int, float)):
raise ValueError("Improper lattice constant for %s crystal." % (self.xtal_name,))
self.lattice_basis = np.array([[a, 0., 0.],
[0., a, 0.],
[0., 0., a]])
self.resiproc_basis = self.get_resiproc_basis(self.lattice_basis)
SimpleCubic = SimpleCubicFactory()
class BodyCenteredCubicFactory(SimpleCubicFactory):
xtal_name = 'bcc'
atomic_basis = np.array([[0., 0., 0.],
[.5, .5, .5]])
BodyCenteredCubic = BodyCenteredCubicFactory()
class FaceCenteredCubicFactory(SimpleCubicFactory):
xtal_name = 'fcc'
atomic_basis = np.array([[0., 0., 0.],
[0., .5, .5],
[.5, 0., .5],
[.5, .5, 0.]])
FaceCenteredCubic = FaceCenteredCubicFactory()
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/cluster/cubic.py | Python | gpl-2.0 | 1,732 |
#!/usr/bin/python3
import math
import random
def finding_prime(number):
num=abs(number)
if num<4: return True
for x in range(2,num):
if num%x == 0:
return False
return True
def finding_prime_sqrt(number):
num=abs(number)
if num<4: return True
for x in range(2,int(math.sqrt(num))+1):
if number%x == 0:
return False
return True
def finding_prime_fermat(number):
if number<=102:
for a in range(2,number):
if pow(a,number-1,number)!=1:
return False
return True
else:
for i in range(100):
a=random.randint(2,number-1)
if pow(a,number-1,number)!=1:
return False
return True
def test_finding_prime():
number1=17
number2=20
assert(finding_prime(number1)==True)
assert(finding_prime(number2)==False)
assert(finding_prime_sqrt(number1)==True)
assert(finding_prime_sqrt(number2)==False)
assert(finding_prime_fermat(number1)==True)
assert(finding_prime_fermat(number2)==False)
print('Tests passed!')
if __name__=='__main__':
test_finding_prime() | Urinx/SomeCodes | Python/others/practice/finding_if_prime.py | Python | gpl-2.0 | 995 |
# encoding: utf-8
# module _dbus_bindings
# from /usr/lib/python2.7/dist-packages/_dbus_bindings.so
# by generator 1.135
"""
Low-level Python bindings for libdbus. Don't use this module directly -
the public API is provided by the `dbus`, `dbus.service`, `dbus.mainloop`
and `dbus.mainloop.glib` modules, with a lower-level API provided by the
`dbus.lowlevel` module.
"""
# imports
import dbus.lowlevel as __dbus_lowlevel
from _LongBase import _LongBase
class UInt64(_LongBase):
"""
An unsigned 64-bit integer between 0 and 0xFFFF FFFF FFFF FFFF,
represented as a subtype of `long`.
This type only exists on platforms where the C compiler has suitable
64-bit types, such as C99 ``unsigned long long``.
Constructor::
dbus.UInt64(value: long[, variant_level: int]) -> UInt64
``value`` must be within the allowed range, or `OverflowError` will be
raised.
``variant_level`` must be non-negative; the default is 0.
:IVariables:
`variant_level` : int
Indicates how many nested Variant containers this object
is contained in: if a message's wire format has a variant containing a
variant containing a uint64, this is represented in Python by a
UInt64 with variant_level==2.
"""
def __init__(self, value, variant_level=None): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/_dbus_bindings/UInt64.py | Python | gpl-2.0 | 1,618 |
bl_info = {
"name" : "text objects to-from xml",
"author" : "chebhou",
"version" : (1, 0),
"blender" : (2, 7, 3),
"location" : "file->export->text to-from xml",
"discription" : "copys an text objectx from-to xml file",
"wiki_url" : " https://github.com/chebhou",
"tracker_url" : "https://github.com/chebhou",
"category" : "Import-Export"
}
import bpy
from bpy.types import Operator
from bpy_extras.io_utils import ExportHelper
from bpy.props import EnumProperty, BoolProperty
from xml.dom import minidom
from xml.dom.minidom import Document
def txt_sync(filepath):
dom = minidom.parse(filepath)
scenes =dom.getElementsByTagName('scene')
for scene in scenes:
scene_name=scene.getAttribute('name')
print("\n",scene_name)
bl_scene = bpy.data.scenes[scene_name]
txt_objs =scene.getElementsByTagName('object')
for obj in txt_objs:
obj_name = obj.getAttribute('name')
obj_body = obj.childNodes[0].nodeValue
bl_obj = bl_scene.objects[obj_name].data.body = obj_body
print(obj_name," ",obj_body)
def txt_export(filepath):
doc = Document()
root = doc.createElement('data')
doc.appendChild(root)
for sce in bpy.data.scenes :
#create a scene
scene = doc.createElement('scene')
scene.setAttribute('name', sce.name)
root.appendChild(scene)
for obj in sce.objects :
if obj.type == 'FONT':
#add object element
object = doc.createElement('object')
object.setAttribute('name', obj.name)
txt_node = doc.createTextNode(obj.data.body)
object.appendChild(txt_node)
scene.appendChild(object)
#write to a file
file_handle = open(filepath,"wb")
file_handle.write(bytes(doc.toprettyxml(indent='\t'), 'UTF-8'))
file_handle.close()
class text_export(Operator, ExportHelper):
"""write and read text objects to a file"""
bl_idname = "export_scene.text_xml"
bl_label = "text from-to xml"
bl_options = {'REGISTER', 'UNDO'} #should remove undo ?
# ExportHelper mixin class uses this
filename_ext = ".xml"
#parameters and variables
convert = EnumProperty(
name="Convert",
description="Choose conversion",
items=(('W', "write objects", "write text objects to xml"),
('R', "read objects", "read text objects from xml")),
default='W',
)
#main function
def execute(self, context):
bpy.ops.object.mode_set(mode = 'OBJECT')
if self.convert == 'W':
txt_export(self.filepath)
else:
txt_sync(self.filepath)
bpy.context.scene.update()
self.report({'INFO'},"Conversion is Done")
return {'FINISHED'}
def menu_func_export(self, context):
self.layout.operator(text_export.bl_idname, text="Text to-from xml")
def register():
bpy.utils.register_class(text_export)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(text_export)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
if __name__ == "__main__":
register()
| chebhou/Text-from-to-.XML | text_io_xml.py | Python | gpl-2.0 | 3,503 |
from __future__ import print_function, division, absolute_import
import difflib
import locale
import os
import pprint
import six
import sys
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
# just log py.warnings (and pygtk warnings in particular)
import logging
try:
# 2.7+
logging.captureWarnings(True)
except AttributeError:
pass
from mock import Mock, MagicMock, NonCallableMock, patch, mock_open
from contextlib import contextmanager
from . import stubs
import subscription_manager.injection as inj
import subscription_manager.managercli
from rhsmlib.services import config
# use instead of the normal pid file based ActionLock
from threading import RLock
if six.PY2:
OPEN_FUNCTION = '__builtin__.open'
else:
OPEN_FUNCTION = 'builtins.open'
@contextmanager
def open_mock(content=None, **kwargs):
content_out = six.StringIO()
m = mock_open(read_data=content)
with patch(OPEN_FUNCTION, m, create=True, **kwargs) as mo:
stream = six.StringIO(content)
rv = mo.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
rv.__iter__ = lambda x: iter(stream.readlines())
yield rv
@contextmanager
def open_mock_many(file_content_map=None, **kwargs):
"""
Mock out access to one or many files opened using the builtin "open".
:param file_content_map: A dictionary of path : file_contents
:type file_content_map: dict[str,str]
:param kwargs:
:return:
"""
file_content_map = file_content_map or {}
for key, value in file_content_map.items():
file_content_map[key] = (mock_open(read_data=value), value, six.StringIO())
def get_file(path, *args, **kwargs):
"""
The side effect that will allow us to "open" the right "file".
Not for use outside open_mock_many.
:param path: The path which is passed in to the built
:param args:
:param kwargs:
:return:
"""
try:
rv, file_contents, content_out = file_content_map[path]
except KeyError:
if six.PY2:
raise IOError(2, 'No such file or directory')
else:
raise OSError(2, 'No such file or directory')
rv = rv.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
return rv
with patch(OPEN_FUNCTION, **kwargs) as mo:
mo.side_effect = get_file
yield mo
@contextmanager
def temp_file(content, *args, **kwargs):
try:
kwargs['delete'] = False
kwargs.setdefault('prefix', 'sub-man-test')
fh = tempfile.NamedTemporaryFile(mode='w+', *args, **kwargs)
fh.write(content)
fh.close()
yield fh.name
finally:
os.unlink(fh.name)
@contextmanager
def locale_context(new_locale, category=None):
old_category = category or locale.LC_CTYPE
old_locale = locale.getlocale(old_category)
category = category or locale.LC_ALL
locale.setlocale(category, new_locale)
try:
yield
finally:
locale.setlocale(category, old_locale)
class FakeLogger(object):
def __init__(self):
self.expected_msg = ""
self.msg = None
self.logged_exception = None
def debug(self, buf, *args, **kwargs):
self.msg = buf
def error(self, buf, *args, **kwargs):
self.msg = buf
def exception(self, e, *args, **kwargs):
self.logged_exception = e
def set_expected_msg(self, msg):
self.expected_msg = msg
def info(self, buf, *args, **kwargs):
self.msg = buf
def warning(self, buf, *args, **kwargs):
self.msg = buf
class FakeException(Exception):
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Matcher(object):
@staticmethod
def set_eq(first, second):
"""Useful for dealing with sets that have been cast to or instantiated as lists."""
return set(first) == set(second)
def __init__(self, compare, some_obj):
self.compare = compare
self.some_obj = some_obj
def __eq__(self, other):
return self.compare(self.some_obj, other)
class SubManFixture(unittest.TestCase):
def set_facts(self):
"""Override if you need to set facts for a test."""
return {"mock.facts": "true"}
"""
Can be extended by any subscription manager test case to make
sure nothing on the actual system is read/touched, and appropriate
mocks/stubs are in place.
"""
def setUp(self):
# No matter what, stop all patching (even if we have a failure in setUp itself)
self.addCleanup(patch.stopall)
# Never attempt to use the actual managercli.cfg which points to a
# real file in etc.
self.mock_cfg_parser = stubs.StubConfig()
original_conf = subscription_manager.managercli.conf
def unstub_conf():
subscription_manager.managercli.conf = original_conf
# Mock makes it damn near impossible to mock a module attribute (which we shouldn't be using
# in the first place because it's terrible) so we monkey-patch it ourselves.
# TODO Fix this idiocy by not reading the damn config on module import
subscription_manager.managercli.conf = config.Config(self.mock_cfg_parser)
self.addCleanup(unstub_conf)
facts_host_patcher = patch('rhsmlib.dbus.facts.FactsClient', auto_spec=True)
self.mock_facts_host = facts_host_patcher.start()
self.mock_facts_host.return_value.GetFacts.return_value = self.set_facts()
# By default mock that we are registered. Individual test cases
# can override if they are testing disconnected scenario.
id_mock = NonCallableMock(name='FixtureIdentityMock')
id_mock.exists_and_valid = Mock(return_value=True)
id_mock.uuid = 'fixture_identity_mock_uuid'
id_mock.name = 'fixture_identity_mock_name'
id_mock.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
id_mock.keypath.return_value = "/not/a/real/key/path"
id_mock.certpath.return_value = "/not/a/real/cert/path"
# Don't really care about date ranges here:
self.mock_calc = NonCallableMock()
self.mock_calc.calculate.return_value = None
# Avoid trying to read real /etc/yum.repos.d/redhat.repo
self.mock_repofile_path_exists_patcher = patch('subscription_manager.repolib.YumRepoFile.path_exists')
mock_repofile_path_exists = self.mock_repofile_path_exists_patcher.start()
mock_repofile_path_exists.return_value = True
inj.provide(inj.IDENTITY, id_mock)
inj.provide(inj.PRODUCT_DATE_RANGE_CALCULATOR, self.mock_calc)
inj.provide(inj.ENTITLEMENT_STATUS_CACHE, stubs.StubEntitlementStatusCache())
inj.provide(inj.POOL_STATUS_CACHE, stubs.StubPoolStatusCache())
inj.provide(inj.PROD_STATUS_CACHE, stubs.StubProductStatusCache())
inj.provide(inj.CONTENT_ACCESS_MODE_CACHE, stubs.StubContentAccessModeCache())
inj.provide(inj.SUPPORTED_RESOURCES_CACHE, stubs.StubSupportedResourcesCache())
inj.provide(inj.SYSPURPOSE_VALID_FIELDS_CACHE, stubs.StubSyspurposeValidFieldsCache())
inj.provide(inj.CURRENT_OWNER_CACHE, stubs.StubCurrentOwnerCache)
inj.provide(inj.OVERRIDE_STATUS_CACHE, stubs.StubOverrideStatusCache())
inj.provide(inj.RELEASE_STATUS_CACHE, stubs.StubReleaseStatusCache())
inj.provide(inj.AVAILABLE_ENTITLEMENT_CACHE, stubs.StubAvailableEntitlementsCache())
inj.provide(inj.PROFILE_MANAGER, stubs.StubProfileManager())
# By default set up an empty stub entitlement and product dir.
# Tests need to modify or create their own but nothing should hit
# the system.
self.ent_dir = stubs.StubEntitlementDirectory()
inj.provide(inj.ENT_DIR, self.ent_dir)
self.prod_dir = stubs.StubProductDirectory()
inj.provide(inj.PROD_DIR, self.prod_dir)
# Installed products manager needs PROD_DIR injected first
inj.provide(inj.INSTALLED_PRODUCTS_MANAGER, stubs.StubInstalledProductsManager())
self.stub_cp_provider = stubs.StubCPProvider()
self._release_versions = []
self.stub_cp_provider.content_connection.get_versions = self._get_release_versions
inj.provide(inj.CP_PROVIDER, self.stub_cp_provider)
inj.provide(inj.CERT_SORTER, stubs.StubCertSorter())
# setup and mock the plugin_manager
plugin_manager_mock = MagicMock(name='FixturePluginManagerMock')
plugin_manager_mock.runiter.return_value = iter([])
inj.provide(inj.PLUGIN_MANAGER, plugin_manager_mock)
inj.provide(inj.DBUS_IFACE, Mock(name='FixtureDbusIfaceMock'))
pooltype_cache = Mock()
inj.provide(inj.POOLTYPE_CACHE, pooltype_cache)
# don't use file based locks for tests
inj.provide(inj.ACTION_LOCK, RLock)
self.stub_facts = stubs.StubFacts()
inj.provide(inj.FACTS, self.stub_facts)
content_access_cache_mock = MagicMock(name='ContentAccessCacheMock')
inj.provide(inj.CONTENT_ACCESS_CACHE, content_access_cache_mock)
self.dbus_patcher = patch('subscription_manager.managercli.CliCommand._request_validity_check')
self.dbus_patcher.start()
# No tests should be trying to connect to any configure or test server
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.is_valid_server_patcher = patch("subscription_manager.managercli.is_valid_server_info")
is_valid_server_mock = self.is_valid_server_patcher.start()
is_valid_server_mock.return_value = True
# No tests should be trying to test the proxy connection
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.test_proxy_connection_patcher = patch("subscription_manager.managercli.CliCommand.test_proxy_connection")
test_proxy_connection_mock = self.test_proxy_connection_patcher.start()
test_proxy_connection_mock.return_value = True
self.syncedstore_patcher = patch('subscription_manager.syspurposelib.SyncedStore')
syncedstore_mock = self.syncedstore_patcher.start()
set_up_mock_sp_store(syncedstore_mock)
self.files_to_cleanup = []
def tearDown(self):
if not hasattr(self, 'files_to_cleanup'):
return
for f in self.files_to_cleanup:
# Assuming these are tempfile.NamedTemporaryFile, created with
# the write_tempfile() method in this class.
f.close()
def write_tempfile(self, contents):
"""
Write out a tempfile and append it to the list of those to be
cleaned up in tearDown.
"""
fid = tempfile.NamedTemporaryFile(mode='w+', suffix='.tmp')
fid.write(contents)
fid.seek(0)
self.files_to_cleanup.append(fid)
return fid
def set_consumer_auth_cp(self, consumer_auth_cp):
cp_provider = inj.require(inj.CP_PROVIDER)
cp_provider.consumer_auth_cp = consumer_auth_cp
def get_consumer_cp(self):
cp_provider = inj.require(inj.CP_PROVIDER)
consumer_cp = cp_provider.get_consumer_auth_cp()
return consumer_cp
# The ContentConnection used for reading release versions from
# the cdn. The injected one uses this.
def _get_release_versions(self, listing_path):
return self._release_versions
# For changing injection consumer id to one that fails "is_valid"
def _inject_mock_valid_consumer(self, uuid=None):
"""For changing injected consumer identity to one that passes is_valid()
Returns the injected identity if it need to be examined.
"""
identity = NonCallableMock(name='ValidIdentityMock')
identity.uuid = uuid or "VALIDCONSUMERUUID"
identity.is_valid = Mock(return_value=True)
identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, identity)
return identity
def _inject_mock_invalid_consumer(self, uuid=None):
"""For chaining injected consumer identity to one that fails is_valid()
Returns the injected identity if it need to be examined.
"""
invalid_identity = NonCallableMock(name='InvalidIdentityMock')
invalid_identity.is_valid = Mock(return_value=False)
invalid_identity.uuid = uuid or "INVALIDCONSUMERUUID"
invalid_identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, invalid_identity)
return invalid_identity
# use our naming convention here to make it clear
# this is our extension. Note that python 2.7 adds a
# assertMultilineEquals that assertEqual of strings does
# automatically
def assert_string_equals(self, expected_str, actual_str, msg=None):
if expected_str != actual_str:
expected_lines = expected_str.splitlines(True)
actual_lines = actual_str.splitlines(True)
delta = difflib.unified_diff(expected_lines, actual_lines, "expected", "actual")
message = ''.join(delta)
if msg:
message += " : " + msg
self.fail("Multi-line strings are unequal:\n" + message)
def assert_equal_dict(self, expected_dict, actual_dict):
mismatches = []
missing_keys = []
extra = []
for key in expected_dict:
if key not in actual_dict:
missing_keys.append(key)
continue
if expected_dict[key] != actual_dict[key]:
mismatches.append((key, expected_dict[key], actual_dict[key]))
for key in actual_dict:
if key not in expected_dict:
extra.append(key)
message = ""
if missing_keys or extra:
message += "Keys in only one dict: \n"
if missing_keys:
for key in missing_keys:
message += "actual_dict: %s\n" % key
if extra:
for key in extra:
message += "expected_dict: %s\n" % key
if mismatches:
message += "Unequal values: \n"
for info in mismatches:
message += "%s: %s != %s\n" % info
# pprint the dicts
message += "\n"
message += "expected_dict:\n"
message += pprint.pformat(expected_dict)
message += "\n"
message += "actual_dict:\n"
message += pprint.pformat(actual_dict)
if mismatches or missing_keys or extra:
self.fail(message)
def assert_items_equals(self, a, b):
"""Assert that two lists contain the same items regardless of order."""
if sorted(a, key=lambda item: str(item)) != sorted(b, key=lambda item: str(item)):
self.fail("%s != %s" % (a, b))
return True
class Capture(object):
class Tee(object):
def __init__(self, stream, silent):
self.buf = six.StringIO()
self.stream = stream
self.silent = silent
def write(self, data):
self.buf.write(data)
if not self.silent:
self.stream.write(data)
def flush(self):
pass
def getvalue(self):
return self.buf.getvalue()
def isatty(self):
return False
def __init__(self, silent=False):
self.silent = silent
def __enter__(self):
self.buffs = (self.Tee(sys.stdout, self.silent), self.Tee(sys.stderr, self.silent))
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout, sys.stderr = self.buffs
return self
@property
def out(self):
return self.buffs[0].getvalue()
@property
def err(self):
return self.buffs[1].getvalue()
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.stdout
sys.stderr = self.stderr
def set_up_mock_sp_store(mock_sp_store):
"""
Sets up the mock syspurpose store with methods that are mock versions of the real deal.
Allows us to test in the absence of the syspurpose module.
This documents the essential expected behaviour of the methods subman relies upon
from the syspurpose codebase.
:return:
"""
contents = {}
mock_sp_store_contents = contents
def set(item, value):
contents[item] = value
def read(path, raise_on_error=False):
return mock_sp_store
def unset(item):
contents[item] = None
def add(item, value):
current = contents.get(item, [])
if value not in current:
current.append(value)
contents[item] = current
def remove(item, value):
current = contents.get(item)
if current is not None and isinstance(current, list) and value in current:
current.remove(value)
def get_local_contents():
return contents
def get_cached_contents():
return contents
def update_local(data):
global contents
contents = data
mock_sp_store.return_value.set = Mock(side_effect=set)
mock_sp_store.return_value.read = Mock(side_effect=read)
mock_sp_store.return_value.unset = Mock(side_effect=unset)
mock_sp_store.return_value.add = Mock(side_effect=add)
mock_sp_store.return_value.remove = Mock(side_effect=remove)
mock_sp_store.return_value.local_contents = mock_sp_store_contents
mock_sp_store.return_value.get_local_contents = Mock(side_effect=get_local_contents)
mock_sp_store.return_value.update_local = Mock(side_effect=update_local)
mock_sp_store.return_value.get_cached_contents = Mock(side_effect=get_cached_contents)
return mock_sp_store, mock_sp_store_contents
| Lorquas/subscription-manager | test/fixture.py | Python | gpl-2.0 | 18,129 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 11:09:05 2013
@author: jotterbach
"""
from numpy import *
from ED_HalfFilling import EigSys_HalfFilling
from DotProduct import scalar_prod
from multiprocessing import *
from multiprocessing import Pool
import matplotlib.pyplot as plt
from ParallelizationTools import info
from os.path import *
from scipy.special import *
from scipy.linalg import qr
from DotProduct import scalar_prod
from Correlation_Generator import *
from datetime import datetime
''' define the datestamp for the filenames '''
date = str(datetime.now())
now = date[0:10]+'_'+date[11:13]+'h'+date[14:16]+'m'
def AngleSpectrum(number_particles, noEV, gamma, hopping, angle):
"""
AngleSpectrum(number_particles, noEV, gamma, hopping, angle):
computes the energy eigenspectrum as a function of the angle of the dipoles
with the chain axis given an unit interaction V and a hopping J
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
hopping: hopping parameter in units of interaction V
angle: array containing the angles as a multiple of **PI**
"""
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independet_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
# number_particles = 6
# noEV = 5*number_sites #degeneracy of GS requires noEV>number_sites
# hopping = .1
# gamma = 2*pi/3
# angle = linspace(-.8,-.7,41)
''' intialization of variables that will be stored for later use '''
eigval = zeros((angle.shape[0],noEV), dtype = float)
degeneracies = zeros((angle.shape[0],1))
v1 = zeros((angle.shape[0],1))
v2 = zeros((angle.shape[0],1))
v3 = zeros((angle.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of the eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping, interaction_strength, angle[angle_idx], noEV, spectrum, gamma, independet_v1_v2)) for angle_idx in range(0,angle.shape[0])]
for ridx in it:
angle_idx = nonzero(angle == ridx.get()[0])
eigval[angle_idx,:]= ridx.get()[1]#floor(10*around(real(ridx.get()[1]),decimals = 2))/10
degeneracies[angle_idx] = sum((eigval[angle_idx,:] == eigval[angle_idx,0]).astype(int))
v1[angle_idx]=ridx.get()[2]
v2[angle_idx]=ridx.get()[3]
v3[angle_idx]=ridx.get()[4]
print 'angle:', angle[angle_idx], '\nground-state degeneracy:', degeneracies[angle_idx]
filename = 'FigureData/'+now+'_AngleSpectrum_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_vdd'+str(interaction_strength).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_angle', angle)
print 'saved: '+filename
def InteractionSpectrum(number_particles, noEV, gamma, angle, interaction_strength):
''' computes the eigenvalue spectrum for a given angle
as a function of the interaction strength in units of J
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
angle: array containing the angles as a multiple of **PI**
interaction_strength: interaction in units of hopping J
'''
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
hopping = 1 #unit of energy
''' intialization of variables that will be stored for later use '''
eigval = zeros((len(interaction_strength),noEV), dtype = float)
v1 = zeros((interaction_strength.shape[0],1))
v2 = zeros((interaction_strength.shape[0],1))
v3 = zeros((interaction_strength.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping, interaction_strength[idx], angle, noEV, spectrum, gamma, independent_v1_v2)) for idx in range(len(interaction_strength))]
for ridx in it:
idx = nonzero(interaction_strength == ridx.get()[6])
v1=ridx.get()[2]
v2=ridx.get()[3]
v3=ridx.get()[4]
eigval[idx,:]= ridx.get()[1]#floor(10*around(real(ridx.get()[1]),decimals = 2))/10
print 'interaction:', interaction_strength[idx], 'interaction constants: ', v1,v2,v3
filename = 'FigureData/'+now+'_InteractionSpectrum_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_interaction',interaction_strength)
print 'saved: '+filename
def HoppingSpectrum(number_particles, noEV, gamma, angle, hopping):
''' computes the eigenvalue spectrum for given interactions as a function
of the hopping in units of interaction V
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
angle: array containing the angles as a multiple of **PI**
hopping: hopping in units of interaction V
'''
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
''' intialization of variables that will be stored for later use '''
eigval = zeros((len(hopping),noEV), dtype = float)
v1 = zeros((hopping.shape[0],1))
v2 = zeros((hopping.shape[0],1))
v3 = zeros((hopping.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping[idx], interaction_strength, angle, noEV, spectrum, gamma, independent_v1_v2)) for idx in range(len(hopping))]
for ridx in it:
idx = nonzero(hopping == ridx.get()[5])
v1=ridx.get()[2]
v2=ridx.get()[3]
v3=ridx.get()[4]
eigval[idx,:]= ridx.get()[1]
print 'hopping:', hopping[idx], 'interactions: ', v1,v2,v3
filename = 'FigureData/'+now+'_HoppingSpectrum-nnhopping_N'+str(number_particles)+'_vdd'+str(interaction_strength).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_hopping', hopping)
print 'saved: '+filename
def DensityCorrelations(number_particles, noEV, gamma, angle, hopping, degeneracy):
''' computes the density correlation function for a given set of angle,
interaction and hopping'''
''' default values for other methods that are being called by the current
function '''
spectrum = 0 #ensures that the spectrum AND the eigenvectors are calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
''' function specific parameter initilaization '''
eigval, eigvec, basisstates = EigSys_HalfFilling(number_particles, number_sites, hopping, interaction_strength, angle, noEV, spectrum, gamma, independent_v1_v2)
eigval = around(real(eigval),decimals = 2)
print '\nlow-energy spectrum: \n', eigval
print 'GS degeneracy:', degeneracy
eigvec = eigvec.astype(complex)
if degeneracy > 1:
print '\nOrthogonalizing GS manifold'
eigvec_GS = zeros((eigvec.shape[0],degeneracy), dtype = complex)
for m in range(degeneracy):
eigvec_GS[:,m] = eigvec[:,m]
Q, R = qr(eigvec_GS, mode = 'economic')
for m in range(degeneracy):
eigvec[:,m] = Q[:,m]
del Q, R, eigvec_GS
number_states = basisstates.shape[0]
if __name__ == 'DiagonalizationMethods':
''' local density '''
print '\nCalculating local density'
local_density = zeros((2*number_particles,1), dtype = float)
pool = Pool()
for deg_idx in range(0,degeneracy):
print 'state index: ', deg_idx
it = [pool.apply_async(loc_den, (basisstates, number_particles, number_states, eigvec[:,deg_idx], site_idx)) for site_idx in range(0,2*number_particles)]
for ridx in it:
site_idx = ridx.get()[0]
local_density[site_idx] += real(ridx.get()[1])/degeneracy
''' density-density correlation '''
print '\nCalculating density-density correlations'
g2 = zeros((number_sites,1), dtype = float)
for deg_idx in range(0,degeneracy):
print 'state index: ', deg_idx
it = [pool.apply_async(pair_corr, (basisstates, number_particles, number_sites, number_states, eigvec[:,deg_idx], site_idx)) for site_idx in range(0,number_sites)]
for ridx in it:
site_idx = ridx.get()[0]
g2[site_idx] += real(ridx.get()[1])/degeneracy
filename='FigureData/'+now+'_Correlations_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_vdd'+str(interaction_strength).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_local_density', local_density)
save(filename+'_g2', g2)
print 'saved: '+filename
| jotterbach/ExactDiagonalization_PolarizedFermionicDipolesOnZigZagChain | DiagonalizationMethods.py | Python | gpl-2.0 | 10,568 |
#!/usr/bin/python
"""Do not call std::string::find_first_of or std::string::find with a string of
characters to locate that has the size 1.
Use the version of std::string::find that takes a single character to
locate instead. Same for find_last_of/rfind.
"""
error_msg = "Do not use find(\"a\"), use find('a')."
regexp = r"""(?x)
r?find(_(first|last)_of)?\s*
\(
"([^\\]|(\\[nt\\"]))"[,)]"""
forbidden = [
r'find_first_of("a")',
r'find_last_of("a")',
r'find("a")',
r'rfind("a")',
r'find_first_of("\n")',
r'find_last_of("\n")',
r'find("\n")',
r'rfind("\n")',
r'find_first_of("\t")',
r'find_last_of("\t")',
r'find("\t")',
r'rfind("\t")',
r'find_first_of("\\")',
r'find_last_of("\\")',
r'find("\\")',
r'rfind("\\")',
r'find_first_of("\"")',
r'find_last_of("\"")',
r'find("\"")',
r'rfind("\"")',
r'find_first_of("a", 1)',
r'find_last_of("a", 1)',
r'find("a", 1)',
r'rfind("a", 1)',
]
allowed = [
r'find("ab")',
r"find('a')",
r"rfind('a')",
r'rfind("ab")',
r"find('\n')",
r'find("\nx")',
r"rfind('\n')",
r'rfind("\nx")',
r"find('\t')",
r'find("\tx")',
r"rfind('\t')",
r'rfind("\tx")',
r"find('\\')",
r'find("\\x")',
r"rfind('\\')",
r'rfind("\\x")',
r"find('\"')",
r'find("\"x")',
r"rfind('\"')",
r'rfind("\"x")',
r"find('a', 1)",
r'find("ab", 1)',
r"rfind('a', 1)",
r'rfind("ab", 1)',
]
| widelands/widelands | cmake/codecheck/rules/contrived_std_string_find.py | Python | gpl-2.0 | 1,480 |
global mods
mods = []
| TheCherry/ark-server-manager | src/config.py | Python | gpl-2.0 | 22 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import string
import os
import re
import time
import urllib
import urlparse
import zlib
import sys
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS
from invenio.search_engine_config import InvenioWebSearchUnknownCollectionError, InvenioWebSearchWildcardLimitError
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibrecord import create_record
from invenio.bibrank_record_sorter import get_bibrank_methods, is_method_valid, rank_records as rank_records_bibrank
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_engine_tokenizer import wash_author_name, author_name_requires_phrase_search
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from invenio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, get_self_cited_by, \
get_refersto_hitset, get_citedby_hitset
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.dbquery import run_sql, run_sql_with_limit, \
get_table_update_time, Error
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils import solr_get_bitset
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile('[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_equal = re.compile('\=')
re_logical_and = re.compile('\sand\s', re.I)
re_logical_or = re.compile('\sor\s', re.I)
re_logical_not = re.compile('\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_regexp_quotes = re.compile("\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile("\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
try:
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,))
except Exception:
# database problems, return empty cache
return []
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except Exception:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
for collection in restricted_collection_cache.cache:
if acc_authorize_action(user_info, 'viewrestrcoll', collection=collection)[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
if record_public_p(recid):
## The record is already known to be public.
return (0, '')
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False):
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT name,reclist FROM collection")
except Exception:
# database problems, return empty cache
return {}
for name, reclist in res:
ret[name] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if not collection_reclist_cache.cache[coll]:
# not yet it the cache, so calculate it and fill the cache:
set = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
if res:
try:
set = intbitset(res[0][1])
except:
pass
collection_reclist_cache.cache[coll] = set
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({ 'value' : code,
'text' : name
})
else:
formats.append({'value' : 'hb',
'text' : "HTML brief"
})
return formats
class SearchResultsCache(DataCacher):
"""
Provides temporary lazy cache for Search Results.
Useful when users click on `next page'.
"""
def __init__(self):
def cache_filler():
return {}
def timestamp_verifier():
return '1970-01-01 00:00:00' # lazy cache is always okay;
# its filling is governed by
# CFG_WEBSEARCH_SEARCH_CACHE_SIZE
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not search_results_cache.is_ok_p:
raise Exception
except Exception:
search_results_cache = SearchResultsCache()
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if not ret.has_key(c):
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if not ret.has_key(f):
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT id,name FROM collection ORDER BY name ASC")
for c_id, c_name in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score DESC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"Returns list of whitespace-separated words from pattern."
words = {}
for word in string.split(pattern):
if not words.has_key(word):
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
print_warning(req, "Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning")
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and string.find(p, ',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: string.replace(x.group(1), ' ', '__SPACE__'), p)
# wash argument:
p = re_equal.sub(":", p)
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in string.split(p): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if string.find(pi, ":") > 0:
fi, pi = string.split(pi, ":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = string.replace(pi, '"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = string.replace(pi, "'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
print_warning(req, "Ignoring standalone wildcard word.", "Warning")
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
print_warning(req, "Ignoring empty <em>%s</em> search term." % fi, "Warning")
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o,p,wash_field(f),t] for o,p,f,t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = "text/xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_SITE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.','_').replace('-','_').replace(':','_')
body_css_classes.append(css)
## finally, print page header:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
#else:
# req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")), \
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")), \
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action=""):
"""Create search box for 'search again in the results page' functionality."""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({ 'value' : cx,
'text' : cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({ 'value' : CFG_SITE_NAME,
'text' : '*** %s ***' % _("any public collection")
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({ 'value' : '',
'text' : '*** %s ***' % _("remove this collection")
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({ 'value' : val['value'],
'text' : val['text'],
'selected' : (c == re.sub("^[\s\-]*","", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{ 'value' : '',
'text' : '*** %s ***' % _("add another collection")
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{ 'value' : CFG_SITE_NAME,
'text' : '*** %s ***' % _("any public collection")
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower (), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value' : code,
'text' : name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title,
)
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append ((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({ 'value' : field_code,
'text' : get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value' : '',
'text' : _("latest first")
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({ 'value' : field_code,
'text' : get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if collection_reclist_cache.cache.has_key(ci):
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if not collection_reclist_cache.cache.has_key(cc):
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if collection_reclist_cache.cache.has_key(coll):
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def wash_index_term(term, max_char_length=50, lower_term=True):
"""
Return washed form of the index term TERM that would be suitable
for storing into idxWORD* tables. I.e., lower the TERM if
LOWER_TERM is True, and truncate it safely to MAX_CHAR_LENGTH
UTF-8 characters (meaning, in principle, 4*MAX_CHAR_LENGTH bytes).
The function works by an internal conversion of TERM, when needed,
from its input Python UTF-8 binary string format into Python
Unicode format, and then truncating it safely to the given number
of UTF-8 characters, without possible mis-truncation in the middle
of a multi-byte UTF-8 character that could otherwise happen if we
would have been working with UTF-8 binary representation directly.
Note that MAX_CHAR_LENGTH corresponds to the length of the term
column in idxINDEX* tables.
"""
if lower_term:
washed_term = unicode(term, 'utf-8').lower()
else:
washed_term = unicode(term, 'utf-8')
if len(washed_term) <= max_char_length:
# no need to truncate the term, because it will fit
# nicely even if it uses four-byte UTF-8 characters
return washed_term.encode('utf-8')
else:
# truncate the term in a safe position:
return washed_term[:max_char_length].encode('utf-8')
def lower_index_term(term):
"""
Return safely lowered index term TERM. This is done by converting
to UTF-8 first, because standard Python lower() function is not
UTF-8 safe. To be called by both the search engine and the
indexer when appropriate (e.g. before stemming).
In case of problems with UTF-8 compliance, this function raises
UnicodeDecodeError, so the client code may want to catch it.
"""
return unicode(term, 'utf-8').lower().encode('utf-8')
def get_synonym_terms(term, kbr_name, match_type):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == 'leading_to_comma':
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == 'leading_to_number':
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e'):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(format):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(format[0:3]).isdigit() and len(format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
else:
return format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = string.strip(p)
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
try:
return res[0][0].startswith("hostedcollection:")
except:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
try:
return run_sql("SELECT name FROM collection WHERE name=%s", (c,))[0][0]
except:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'type' for collection 'coll'.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type=%s AND ccc.name=%s"
query += " ORDER BY cc.score DESC"
res = run_sql(query, (type, coll))
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
def get_coll_real_descendants(coll, type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score DESC""",
(coll, type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Browse either biliographic phrases or words indexes, and display it."""
# load the right message language
_ = gettext_set_language(ln)
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
p_orig = p
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
## do we search in words indexes?
if not f:
return browse_in_bibwords(req, p, f)
index_id = get_index_id_from_field(f)
if index_id != 0:
coll = intbitset()
for coll_name in colls:
coll |= get_collection_reclist(coll_name)
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
# probably there are no hits at all:
req.write(_("No values found."))
return
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#print_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
browsed_phrases_in_colls.append([phrase, get_nbhits_in_bibxxx(phrase, f)])
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
print_warning(req, "Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)))
print_warning(req, "Search stage 1: execution took %.2f seconds." % (t2 - t1))
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = CFG_BIBFORMAT_HIDDEN_TAGS
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = (acc_authorize_action(user_info, 'runbibedit')[0] == 0)
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for o,p,f,m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
print_warning(req, _("Warning: full-text search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") % \
{'x_range_from_year': '2006',
'x_range_to_year': '2012'})
elif 'caption' in fields_to_be_searched:
print_warning(req, _("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") % \
{'x_range_from_year': '2008',
'x_range_to_year': '2012'})
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
print_warning(req, _("There is no index %s. Searching for %s in all fields." % (bsu_f, bsu_p)))
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
print_warning(req, _('Instead searching %s.' % str([bsu_o, bsu_p, bsu_f, bsu_m])))
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
print_warning(req, _("Search term too generic, displaying only partial results..."))
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
print_warning(req, _("No phrase index available for fulltext yet, looking for word combination..."))
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
print_warning(req, "Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(repr(bsu_p), repr(myhiddens)))
display_nearest_terms_box=False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
print_warning(req, "Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset))
if len(basic_search_unit_hitset) > 0 or \
ap==0 or \
bsu_o=="|" or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
print_warning(req, "Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)))
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
print_warning(req, _("No exact match found for %(x_query1)s, using %(x_query2)s instead...") % \
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"})
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
print_warning(req, _("Requested record does not seem to exist."))
else:
print_warning(req, create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln))
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
print_warning(req, _("Requested record does not seem to exist."))
else:
print_warning(req, create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln))
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
print_warning(req, "Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])))
print_warning(req, "Search stage 2: execution took %.2f seconds." % (t2 - t1))
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
print_warning(req, "Invalid set operation %s." % cgi.escape(this_unit_operation), "Error")
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
print_warning(req, text)
if verbose and of.startswith("h"):
t2 = os.times()[4]
print_warning(req, "Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection))
print_warning(req, "Search stage 3: execution took %.2f seconds." % (t2 - t1))
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1):
if not re_pattern_parens.search(p):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
print_warning(req, "Search stage 1: search_pattern_parenthesised() searched %s." % repr(p))
print_warning(req, "Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result))
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2 ):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box=False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
print_warning(req, _("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."))
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f):
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f][1]):
if p_synonym != p:
hitset_synonyms |= search_unit(p_synonym, f, m, wl)
## look up hits:
if CFG_SOLR_URL and f == 'fulltext':
# redirect to Solr/Lucene
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception(alert_admin=True)
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif m == 'a' or m == 'r':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, m, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
return hitset
def search_unit_in_bibwords(word, f, m=None, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
set = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f == 'authorcount' and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = string.replace(word, '*', '%') # we now use '*' as the truncation character
words = string.split(word, "->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
word0 = stem(word0, stemming_language)
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f == 'authorcount':
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
word = stem(word, stemming_language)
if string.find(word, '%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
set.union_update(hitset_bibwrd)
else:
set = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
# okay, return result set:
return set
def search_unit_in_idxphrases(p, f, type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'authorcount':
return search_unit_in_bibwords(p, f, wl=wl)
set = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
set.union_update(hitset_bibphrase)
else:
set = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
# okay, return result set:
return set
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f == 'authorcount':
return search_unit_in_bibwords(p, f, wl=wl)
p_orig = p # saving for eventual future 'no match' reporting
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
set = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
return set
def search_unit_in_solr(p, f=None, m=None):
"""
Query the Solr full-text index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(p, CFG_SOLR_URL)
def search_unit_in_bibrec(datetext1, datetext2, type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
set = intbitset()
if type.startswith("m"):
type = "modification_date"
else:
type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (type, type),
(datetext1, datetext2))
for row in res:
set += row[0]
return set
def search_unit_by_times_cited(p):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_refersto_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_citedby_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, ap=0, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe:
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {}
results_nbhits = 0
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home:
results_in_Home = hitset_in_any_collection & get_collection_reclist(CFG_SITE_NAME)
if len(results_in_Home) > 0:
# some hits found in Home, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
print_warning(req, _("No match found in collection %(x_collection)s. Other public collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %\
{'x_collection': '<em>' + string.join([get_coll_i18nname(coll, ln, False) for coll in colls], ', ') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': len(results_in_Home),
'x_url_close': '</a>'})
results = {}
else:
# no hits found in Home, recommend different search terms:
if of.startswith("h") and display_nearest_terms_box:
print_warning(req, _("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."))
results = {}
if verbose and of.startswith("h"):
t2 = os.times()[4]
print_warning(req, "Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits)
print_warning(req, "Search stage 4: execution took %.2f seconds." % (t2 - t1))
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final 'results' set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
for coll in results.keys():
results[coll].intersection_update(hitset)
nb_total += len(results[coll])
if nb_total == 0:
if of.startswith("h"):
print_warning(req, aptext)
results = results_ap
return results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:]=='%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto':
return _("There are no records referring to %s.") % cgi.escape(p)
if f == 'citedby':
return _("There are no records cited by %s.") % cgi.escape(p)
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for (px, fx) in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %s did not match any record. Nearest terms in any collection are:") % \
("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>")
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p,]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = map(lambda x: x[0], res_above)
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = map(lambda x: x[0], res_below)
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
return len(recIDs)
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': 'Current Price Enquiries',
'IT': 'Current Invitation for Tenders',
'MS': 'Current Market Surveys'}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
if recID in get_collection_reclist(FP_collections[coll]):
return FP_collections[coll]
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_field_tags(field):
"""Returns a list of MARC tags for the field code 'field'.
Returns empty list in case of error.
Example: field='author', output=['100__%','700__%']."""
out = []
query = """SELECT t.value FROM tag AS t, field_tag AS ft, field AS f
WHERE f.code=%s AND ft.id_field=f.id AND t.id=ft.id_tag
ORDER BY ft.score DESC"""
res = run_sql(query, (field, ))
for val in res:
out.append(val[0])
return out
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in,]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]),"\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_exists(recID):
"""Return 1 if record RECID exists.
Return 0 if it doesn't exist.
Return -1 if it exists but is marked as deleted.
"""
out = 0
res = run_sql("SELECT id FROM bibrec WHERE id=%s", (recID,), 1)
if res:
try: # if recid is '123foo', mysql will return id=123, and we don't want that
recID = int(recID)
except ValueError:
return 0
# record exists; now check whether it isn't marked as deleted:
dbcollids = get_fieldvalues(recID, "980__%")
if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids):
out = -1 # exists, but marked as deleted
else:
out = 1 # exists fine
return out
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
record = get_record(recID)
if record is None or len(record) < 2:
return 1
else:
return 0
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_warning(req, msg, msg_type='', prologue='<br />', epilogue='<br />'):
"Prints warning message and flushes output."
if req and msg:
req.write(websearch_templates.tmpl_print_warning(
msg = msg,
type = msg_type,
prologue = prologue,
epilogue = epilogue,
))
return
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=10,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=10,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
out = ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False):
"""Prints results overview box with links to particular collections below."""
out = ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit
)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
try:
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
except:
self.method_id = 0
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except:
data_dict_ordered= {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
if not CFG_BIBSORT_BUCKETS: # we do not want to use buckets
return {}
try: # make sure the method has some data
res = run_sql("""SELECT m.name, m.definition FROM bsrMETHOD m, bsrMETHODDATA md WHERE m.id = md.id_bsrMETHOD""")
except:
return {}
return dict(res)
sorting_methods = get_sorting_methods()
cache_sorted_data = {}
for sorting_method in sorting_methods:
try:
cache_sorted_data[sorting_method].is_ok_p
except Exception:
cache_sorted_data[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_form_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and str(sort_field[0:2]).isdigit():
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
if CFG_BIBSORT_BUCKETS and sorting_methods:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:','').strip().lower() == string.lower(rank_method_code):
(solution_recs, solution_scores) = sort_records_bibsort(req, hitset_global, sort_method, '', sort_order, verbose, of, ln, rg, jrec, 'r')
#return (solution_recs, solution_scores, '', '', '')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' %[[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return (solution_recs, solution_scores, '(', ')', comment)
return rank_records_bibrank(rank_method_code, rank_limit_relevance, hitset_global, pattern, verbose)
def sort_records(req, recIDs, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
#calculate the min index on the reverted list
index_min = max(len(recIDs) - irec_max, 0) #just to be sure that the min index is not negative
#bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
use_sorting_buckets = True
if not CFG_BIBSORT_BUCKETS or not sorting_methods: #ignore the use of buckets, use old fashion sorting
use_sorting_buckets = False
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
return recIDs[index_min:]
sort_fields = string.split(sort_field, ",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and \
definition.replace('FIELD:','').strip().lower() == string.lower(sort_fields[0])) or \
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_form_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
print_warning(req, _("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error")
return recIDs[index_min:]
if tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:','').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
return recIDs[index_min:]
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sort_or_rank = 's'):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(sort_method, 0, recIDs, None, verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
print_warning(req, "Sorting (using BibSort cache) by method %s (definition %s)." \
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))))
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset([])
input_recids = intbitset(recIDs)
cache_sorted_data[sort_method].recreate_cache_if_needed()
sort_cache = cache_sorted_data[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
print_warning(req, "Not all buckets have been constructed.. switching to old fashion sorting.")
if sort_or_rank == 'r':
return rank_records_bibrank(sort_method, 0, recIDs, None, verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(input_recids & sort_cache['bucket_data'][bucket_no])
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = []
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
#recid is in buckets, but not in the bsrMETHODDATA,
#maybe because the value has been deleted, but the change has not yet been propagated to the buckets
missing_records.append(recid)
#check if there are recids that are not in any bucket -> to be added at the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records = sorted(missing_records + list(input_recids.difference(solution)))
#the records need to be sorted in reverse order for the print record function
#the return statement should be equivalent with the following statements
#(these are clearer, but less efficient, since they revert the same list twice)
#sorted_solution = (missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='d'))[:irec_max]
#sorted_solution.reverse()
#return sorted_solution
if sort_method.strip().lower().startswith('latest') and sort_order == 'd':
# if we want to sort the records on their insertion date, add the mission records at the top
solution = sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='a') + missing_records
else:
solution = missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='a')
#calculate the min index on the reverted list
index_min = max(len(solution) - irec_max, 0) #just to be sure that the min index is not negative
#return all the records up to irec_max, but on the reverted list
if sort_or_rank == 'r':
# we need the recids, with values
return (solution[index_min:], [dict_solution.get(record, 0) for record in solution[index_min:]])
else:
return solution[index_min:]
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
#calculate the min index on the reverted list
index_min = max(len(recIDs) - irec_max, 0) #just to be sure that the min index is not negative
## check arguments:
if not sort_field:
return recIDs[index_min:]
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
print_warning(req, _("Sorry, sorting is allowed on sets of up to %d records only. Using default sort order.") % CFG_WEBSEARCH_NB_RECORDS_TO_SORT, "Warning")
return recIDs[index_min:]
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = string.split(sort_field, ",")
tags, error_field = get_tags_form_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
print_warning(req, _("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error")
return recIDs[index_min:]
if verbose >= 3 and of.startswith('h'):
print_warning(req, "Sorting by tags %s." % cgi.escape(repr(tags)))
if sort_pattern:
print_warning(req, "Sorting preferentially by %s." % cgi.escape(sort_pattern))
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-",1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + string.join(vals)
else:
# no sort pattern defined, so join them all together
val = string.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if recIDs_dict.has_key(val):
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# sort them:
recIDs_dict_keys = recIDs_dict.keys()
recIDs_dict_keys.sort()
# now that keys are sorted, create output array:
for k in recIDs_dict_keys:
for s in recIDs_dict[k]:
recIDs_out.append(s)
# ascending or descending?
if sort_order == 'a':
recIDs_out.reverse()
# okay, we are done
# return only up to the maximum that we need to sort
if len(recIDs_out) != len(recIDs):
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs_out), jrec, rg)
index_min = max(len(recIDs_out) - irec_max, 0) #just to be sure that the min index is not negative
return recIDs_out[index_min:]
else:
# good, no sort needed
return recIDs[index_min:]
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=10, format='hb', ot='', ln=CFG_SITE_LANG, relevances=[], relevances_prologue="(", relevances_epilogue="%%)", decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True, print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='', rm=''):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if len(recIDs):
nb_found = len(recIDs)
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
# print records
recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)]
format_records(recIDs_to_print,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for irec in range(irec_max, irec_min, -1):
x = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format == 'excel':
recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)]
create_excel(recIDs=recIDs_to_print, req=req, ln=ln, ot=ot)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for irec in range(irec_max, irec_min, -1):
req.write(print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(
ln = ln))
for irec in range(irec_max, irec_min, -1):
row_number = jrec+irec_max-irec
recid = recIDs[irec]
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln = ln,
recid = recid,
row_number = row_number,
relevance = relevance,
record = record,
relevances_prologue = relevances_prologue,
relevances_epilogue = relevances_epilogue,
display_add_to_basket = display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln = ln,
display_add_to_basket = display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
for irec in range(irec_max, irec_min, -1):
if record_exists(recIDs[irec]) == -1:
print_warning(req, _("The record has been deleted."))
merged_recid = get_merged_recid(recIDs[irec])
if merged_recid:
print_warning(req, _("The record %d replaces it." % merged_recid))
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(recIDs[irec])),
recIDs[irec], ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x,y: cmp(x[1],y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid = recIDs[irec]
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln), \
tab_id == tab,
unordered_tabs[tab_id]['enabled']) \
for (tab_id, order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recIDs[irec], "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recIDs[irec], ln)
r = calculate_reading_similarity_list(recIDs[irec], "pageviews")
viewsimilarity = None
if r: viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recIDs[irec],
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
elif tab == 'citations':
recid = recIDs[irec]
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
print_warning(req, "Citation graph debug: " + \
str(len(citationhistory)))
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(recid, ln, citationhistory))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recIDs[irec], 'HDREF', ln=ln, user_info=user_info, verbose=verbose))
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
elif tab == 'keywords':
import bibclassify_webinterface
recid = recIDs[irec]
bibclassify_webinterface.main_page(req, recid, tabs, ln, webstyle_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recIDs[irec],
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum, referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recIDs[irec]) == 1:
creationdate = get_creation_date(recIDs[irec])
modificationdate = get_modification_date(recIDs[irec])
content = print_record(recIDs[irec], format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID = recIDs[irec],
ln = ln,
format = format,
creationdate = creationdate,
modificationdate = modificationdate,
content = content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recIDs[irec],
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.webcomment import get_mini_reviews
reviews = get_mini_reviews(recid = recIDs[irec], ln=ln)
else:
reviews = ''
actions = format_record(recIDs[irec], 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recIDs[irec], 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recIDs[irec],
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for irec in range(irec_max, irec_min, -1):
req.write(print_record(recIDs[irec], format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
print_warning(req, _("Use different search terms."))
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
return deserialize_via_marshal(value[0][0])
except:
### In case of corruption, let's rebuild it!
pass
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d', sp='', rm=''):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
_ = gettext_set_language(ln)
display_claim_this_paper = False
try:
display_claim_this_paper = user_info["precached_viewclaimlink"]
except (KeyError, TypeError):
display_claim_this_paper = False
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = (acc_authorize_action(user_info, 'runbibedit')[0] == 0)
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# New Python BibFormat procedure for formatting
# Old procedure follows further below
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (CFG_BIBFORMAT_USE_OLD_BIBFORMAT \
or format.lower().startswith('t') \
or format.lower().startswith('hm') \
or str(format[0:3]).isdigit() \
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %d replaces it." % merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper)
return out
# Old PHP BibFormat procedure for formatting
# print record opening tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format' -- they are not in "bibfmt" table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s" >%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre>" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre>" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.bibformat_utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
units = create_basic_search_units(None, str(search_pattern), None)
keywords = [unit[1] for unit in units if (unit[0] != '-' and unit[2] in [None, 'fulltext'])]
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri']:
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = get_pdf_snippets(recID, keywords)
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="", wl=0):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed.
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found. (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results.
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
selected_external_collections_infos = None
# wash output format:
of = wash_output_format(of)
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
# wash all arguments requiring special care
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
except InvenioWebSearchUnknownCollectionError, exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
return page_end(req, of, ln)
elif of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
else:
return page_end(req, of, ln)
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
_ = gettext_set_language(ln)
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, cStringIO.OutputType) \
and req.args: # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldargs.has_key(fieldcode):
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
try:
uid = getUid(req)
except:
uid = 0
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and not req.header_only:
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of == "id":
return [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln, search_pattern=p, verbose=verbose, tab=tab, sf=sf, so=so, sp=sp, rm=rm)
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
else:
print_warning(req, _("Requested record does not seem to exist."))
elif action == "browse":
## 2 - browse needed
of = 'hb'
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3))
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
if req and not req.header_only:
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3))
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
if record_exists(p[6:]) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
else:
print_warning(req, _("Requested record does not seem to exist."))
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
results_similar_recIDs, results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, results_similar_comments = \
rank_records_bibrank(rm, 0, get_collection_reclist(cc), string.split(p), verbose)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
print_warning(req, results_similar_comments)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
elif of=="id":
return results_similar_recIDs
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
print_warning(req, results_similar_relevances_prologue)
print_warning(req, results_similar_relevances_epilogue)
print_warning(req, results_similar_comments)
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3))
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
print_warning(req, _("Requested record does not seem to exist."))
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = map(lambda x: x[0], calculate_co_cited_with_list(int(recID)))
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
elif of=="id":
return results_cocited_recIDs
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
print_warning(req, "nothing found")
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
## 3 - common search needed
query_in_cache = False
query_representation_in_cache = repr((p,f,colls_to_search, wl))
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3))
if of.startswith("h") and verbose and wash_colls_debug:
print_warning(req, "wash_colls debugging info : %s" % wash_colls_debug)
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] == None or result[1] == False:
# these are the searches the returned no or zero results
if verbose:
print_warning(req, "Hosted collections (perform_search_request): %s returned no results" % result[0][1].name)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
print_warning(req, "Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]))
else:
if verbose:
print_warning(req, "Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time")
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
print_warning(req, "Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
print_warning(req, "Hosted collections (perform_search_request): there were no hosted collections to be searched")
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
hosted_colls_actual_or_potential_results_p = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
hosted_colls_potential_results_p = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
only_hosted_colls_actual_or_potential_results_p = not colls_to_search and hosted_colls_actual_or_potential_results_p
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 1 or (p1 or p2 or p3):
## 3A - advanced search
try:
results_in_any_collection = search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
print_warning(req, "Invalid set operation %s." % cgi.escape(op1), "Error")
if len(results_in_any_collection) == 0:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
print_warning(req, "Invalid set operation %s." % cgi.escape(op2), "Error")
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
else:
## 3B - simple search
if search_results_cache.cache.has_key(query_representation_in_cache):
# query is not in the cache already, so reuse it:
query_in_cache = True
results_in_any_collection = search_results_cache.cache[query_representation_in_cache]
if verbose and of.startswith("h"):
print_warning(req, "Search stage 0: query found in cache, reusing cached results.")
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection = search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln, display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p, wl=wl)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if len(results_in_any_collection) == 0 and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
# store this search query results into search results cache if needed:
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE and not query_in_cache:
if len(search_results_cache.cache) > CFG_WEBSEARCH_SEARCH_CACHE_SIZE:
search_results_cache.clear()
search_results_cache.cache[query_representation_in_cache] = results_in_any_collection
if verbose and of.startswith("h"):
print_warning(req, "Search stage 3: storing query results in cache.")
# search stage 4: intersection with collection universe:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final = intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, ap, of, verbose, ln, display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p)
else:
results_final = {}
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
# search stage 5: apply search option limits and restrictions:
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
print_warning(req, "Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2))
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
print_warning(req, "Search stage 5: applying search pattern limit %s..." % cgi.escape(pl))
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
t2 = os.times()[4]
cpu_time = t2 - t1
## search stage 6: display results:
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total ==0 and not hosted_colls_potential_results_p:
if of.startswith("h"):
print_warning(req, "No match found, please enter different search terms.")
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec)
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_BUCKETS and sorting_methods): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
return recIDs
elif of.startswith("h"):
if of not in ['hcs']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time, ln, ec, hosted_colls_potential_results_p=hosted_colls_potential_results_p))
selected_external_collections_infos = print_external_results_overview(req, cc, [p, p1, p2, p3], f, ec, verbose, ln)
# print number of hits found for XML outputs:
if of.startswith("x"):
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % results_final_nb_total)
# print records:
if of in ['hcs']:
# feed the current search to be summarized:
from invenio.search_engine_summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, 'hcs', ln, search_p, search_f, req)
else:
if len(colls_to_search)>1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if results_final.has_key(coll) and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
results_final_recIDs = list(results_final[coll])
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec)
if of.startswith("h"):
print_warning(req, results_final_comments)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
print_warning(req, results_final_relevances_prologue)
print_warning(req, results_final_relevances_epilogue)
elif sf or (CFG_BIBSORT_BUCKETS and sorting_methods): # do we have to sort?
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] == None or result[1] == False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
# log query:
try:
id_query = log_query(req.remote_host, req.args, uid)
if of.startswith("h") and id_query:
if not of in ['hcs']:
# display alert/RSS teaser for non-summary formats:
user_info = collect_user_info(req)
display_email_alert_part = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not user_info['precached_usealerts']:
display_email_alert_part = False
req.write(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, \
ln=ln, display_email_alert_part=display_email_alert_part))
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
# External searches
if of.startswith("h"):
if not of in ['hcs']:
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
# clear cache if requested:
if action == "clear":
search_results_cache.clear()
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show search results cache:
out = "<h3>Search Cache</h3>"
out += "- search cache usage: %d queries cached (max. ~%d)" % \
(len(search_results_cache.cache), CFG_WEBSEARCH_SEARCH_CACHE_SIZE)
if len(search_results_cache.cache):
out += "<br />- search cache contents:"
out += "<blockquote>"
for query, hitset in search_results_cache.cache.items():
out += "<br />%s ... %s" % (query, hitset)
out += """<p><a href="%s/search/cache?action=clear">clear search results cache</a>""" % CFG_SITE_URL
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = string.split(line,"#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" \
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" % \
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%2dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
(('PREPRINT', 10), ('THESIS', 7), ...)
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
(('Ellis, J', 10), ('Ellis, N', 7), ...)
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
(('Ellis, N', 7), ...)
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"Compare VAL1 and VAL2 according to, firstly, frequency, then secondly, alphabetically."
compared_via_frequencies = cmp(valuefreqdict[val2], valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, str):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if valuefreqdict.has_key(val):
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
out = ()
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if displaytmp.has_key(val):
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out += (tmpdisplv, valuefreqdict[val]),
return out
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile
import pstats
profile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
| jrbl/invenio | modules/websearch/lib/search_engine.py | Python | gpl-2.0 | 270,395 |
import hashlib
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.patches import Rectangle
import os
import shutil
import tempfile
from sar_parser import SarParser
# If the there are more than 50 plots in a graph we move the legend to the
# bottom
LEGEND_THRESHOLD = 50
def ascii_date(d):
return "%s" % (d.strftime("%Y-%m-%d %H:%M"))
class SarGrapher(object):
def __init__(self, filenames, starttime=None, endtime=None):
"""Initializes the class, creates a SarParser class
given a list of files and also parsers the files"""
# Temporary dir where images are stored (one per graph)
# NB: This is done to keep the memory usage constant
# in spite of being a bit slower (before this change
# we could use > 12GB RAM for a simple sar file -
# matplotlib is simply inefficient in this area)
self._tempdir = tempfile.mkdtemp(prefix='sargrapher')
self.sar_parser = SarParser(filenames, starttime, endtime)
self.sar_parser.parse()
duplicate_timestamps = self.sar_parser._duplicate_timestamps
if duplicate_timestamps:
print("There are {0} lines with duplicate timestamps. First 10"
"line numbers at {1}".format(
len(duplicate_timestamps.keys()),
sorted(list(duplicate_timestamps.keys()))[:10]))
def _graph_filename(self, graph, extension='.png'):
"""Creates a unique constant file name given a graph or graph list"""
if isinstance(graph, list):
temp = "_".join(graph)
else:
temp = graph
temp = temp.replace('%', '_')
temp = temp.replace('/', '_')
digest = hashlib.sha1()
digest.update(temp.encode('utf-8'))
fname = os.path.join(self._tempdir, digest.hexdigest() + extension)
return fname
def datasets(self):
"""Returns a list of all the available datasets"""
return self.sar_parser.available_data_types()
def timestamps(self):
"""Returns a list of all the available datasets"""
return sorted(self.sar_parser.available_timestamps())
def plot_datasets(self, data, fname, extra_labels, showreboots=False,
output='pdf'):
""" Plot timeseries data (of type dataname). The data can be either
simple (one or no datapoint at any point in time, or indexed (by
indextype). dataname is assumed to be in the form of [title, [label1,
label2, ...], [data1, data2, ...]] extra_labels is a list of tuples
[(datetime, 'label'), ...] """
sar_parser = self.sar_parser
title = data[0][0]
unit = data[0][1]
axis_labels = data[0][2]
datanames = data[1]
if not isinstance(datanames, list):
raise Exception("plottimeseries expects a list of datanames: %s" %
data)
fig = plt.figure(figsize=(10.5, 6.5))
axes = fig.add_subplot(111)
axes.set_title('{0} time series'.format(title), fontsize=12)
axes.set_xlabel('Time')
axes.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
# Twenty minutes. Could probably make it a parameter
axes.xaxis.set_minor_locator(mdates.MinuteLocator(interval=20))
fig.autofmt_xdate()
ylabel = title
if unit:
ylabel += " - " + unit
axes.set_ylabel(ylabel)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
axes.yaxis.get_major_formatter().set_scientific(False)
color_norm = colors.Normalize(vmin=0, vmax=len(datanames) - 1)
scalar_map = cm.ScalarMappable(norm=color_norm,
cmap=plt.get_cmap('Set1'))
timestamps = self.timestamps()
counter = 0
for i in datanames:
try:
dataset = [sar_parser._data[d][i] for d in timestamps]
except:
print("Key {0} does not exist in this graph".format(i))
raise
axes.plot(timestamps, dataset, 'o:', label=axis_labels[counter],
color=scalar_map.to_rgba(counter))
counter += 1
# Draw extra_labels
if extra_labels:
for extra in extra_labels:
axes.annotate(extra[1], xy=(mdates.date2num(extra[0]),
sar_parser.find_max(extra[0], datanames)),
xycoords='data', xytext=(30, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2"))
# If we have a sosreport draw the reboots
if showreboots and sar_parser.sosreport is not None and \
sar_parser.sosreport.reboots is not None:
reboots = sar_parser.sosreport.reboots
for reboot in reboots.keys():
reboot_date = reboots[reboot]['date']
rboot_x = mdates.date2num(reboot_date)
(xmin, xmax) = plt.xlim()
(ymin, ymax) = plt.ylim()
if rboot_x < xmin or rboot_x > xmax:
continue
axes.annotate('', xy=(mdates.date2num(reboot_date), ymin),
xycoords='data', xytext=(-30, -30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->", color='blue',
connectionstyle="arc3,rad=-0.1"))
# Show any data collection gaps in the graph
gaps = sar_parser.find_data_gaps()
if len(gaps) > 0:
for i in gaps:
(g1, g2) = i
x1 = mdates.date2num(g1)
x2 = mdates.date2num(g2)
(ymin, ymax) = plt.ylim()
axes.add_patch(Rectangle((x1, ymin), x2 - x1,
ymax - ymin, facecolor="lightgrey"))
# Add a grid to the graph to ease visualization
axes.grid(True)
lgd = None
# Draw the legend only when needed
if len(datanames) > 1 or \
(len(datanames) == 1 and len(datanames[0].split('#')) > 1):
# We want the legends box roughly square shaped
# and not take up too much room
props = matplotlib.font_manager.FontProperties(size='xx-small')
if len(datanames) < LEGEND_THRESHOLD:
cols = int((len(datanames) ** 0.5))
lgd = axes.legend(loc=1, ncol=cols, shadow=True, prop=props)
else:
cols = int(len(datanames) ** 0.6)
lgd = axes.legend(loc=9, ncol=cols,
bbox_to_anchor=(0.5, -0.29),
shadow=True, prop=props)
if len(datanames) == 0:
return None
try:
if lgd:
plt.savefig(fname, bbox_extra_artists=(lgd,),
bbox_inches='tight')
else:
plt.savefig(fname, bbox_inches='tight')
except:
import traceback
print(traceback.format_exc())
import sys
sys.exit(-1)
plt.cla()
plt.clf()
plt.close('all')
def plot_svg(self, graphs, output, labels):
"""Given a list of graphs, output an svg file per graph.
Input is a list of strings. A graph with multiple datasets
is a string with datasets separated by comma"""
if output == 'out.pdf':
output = 'graph'
counter = 1
fnames = []
for i in graphs:
subgraphs = i.split(',')
fname = self._graph_filename(subgraphs, '.svg')
fnames.append(fname)
self.plot_datasets((['', None, subgraphs], subgraphs), fname,
labels)
dest = os.path.join(os.getcwd(), "{0}{1}.svg".format(
output, counter))
shutil.move(fname, dest)
print("Created: {0}".format(dest))
counter += 1
# removes all temporary files and directories
self.close()
def plot_ascii(self, graphs, def_columns=80, def_rows=25):
"""Displays a single graph in ASCII form on the terminal"""
import subprocess
sar_parser = self.sar_parser
timestamps = self.timestamps()
try:
rows, columns = os.popen('stty size', 'r').read().split()
except:
columns = def_columns
rows = def_rows
if columns > def_columns:
columns = def_columns
for graph in graphs:
try:
gnuplot = subprocess.Popen(["/usr/bin/gnuplot"],
stdin=subprocess.PIPE)
except Exception as e:
raise("Error launching gnuplot: {0}".format(e))
gnuplot.stdin.write("set term dumb {0} {1}\n".format(
columns, rows))
gnuplot.stdin.write("set xdata time\n")
gnuplot.stdin.write('set xlabel "Time"\n')
gnuplot.stdin.write('set timefmt \"%Y-%m-%d %H:%M\"\n')
gnuplot.stdin.write('set xrange [\"%s\":\"%s\"]\n' %
(ascii_date(timestamps[0]),
ascii_date(timestamps[-1])))
gnuplot.stdin.write('set ylabel "%s"\n' % (graph))
gnuplot.stdin.write('set datafile separator ","\n')
gnuplot.stdin.write('set autoscale y\n')
gnuplot.stdin.write('set title "%s - %s"\n' %
(graph, " ".join(sar_parser._files)))
# FIXME: do it through a method
try:
dataset = [sar_parser._data[d][graph] for d in timestamps]
except KeyError:
print("Key '{0}' could not be found")
return
txt = "plot '-' using 1:2 title '{0}' with linespoints \n".format(
graph)
gnuplot.stdin.write(txt)
for i, j in zip(timestamps, dataset):
s = '\"%s\",%f\n' % (ascii_date(i), j)
gnuplot.stdin.write(s)
gnuplot.stdin.write("e\n")
gnuplot.stdin.write("exit\n")
gnuplot.stdin.flush()
def export_csv(self):
return
def close(self):
"""Removes temporary directory and files"""
if os.path.isdir(self._tempdir):
shutil.rmtree(self._tempdir)
| mbaldessari/sarstats | sar_grapher.py | Python | gpl-2.0 | 10,858 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.501688
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/gettags.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class gettags(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(gettags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_91099948 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_91099948
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_gettags= 'respond'
## END CLASS DEFINITION
if not hasattr(gettags, '_initCheetahAttributes'):
templateAPIClass = getattr(gettags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(gettags)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=gettags()).run()
| MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/web/gettags.py | Python | gpl-2.0 | 5,093 |
"""
.. module:: editor_subscribe_label_deleted
The **Editor Subscribe Label Deleted** Model.
PostgreSQL Definition
---------------------
The :code:`editor_subscribe_label_deleted` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE editor_subscribe_label_deleted
(
editor INTEGER NOT NULL, -- PK, references editor.id
gid UUID NOT NULL, -- PK, references deleted_entity.gid
deleted_by INTEGER NOT NULL -- references edit.id
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class editor_subscribe_label_deleted(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param editor: references :class:`.editor`
:param gid: references :class:`.deleted_entity`
:param deleted_by: references :class:`.edit`
"""
editor = models.OneToOneField('editor', primary_key=True)
gid = models.OneToOneField('deleted_entity')
deleted_by = models.ForeignKey('edit')
def __str__(self):
return 'Editor Subscribe Label Deleted'
class Meta:
db_table = 'editor_subscribe_label_deleted'
| marios-zindilis/musicbrainz-django-models | musicbrainz_django_models/models/editor_subscribe_label_deleted.py | Python | gpl-2.0 | 1,251 |
#!/usr/bin/env python
####################################################################################################
####################################################################################################
## ##
## Hove's Raspberry Pi Python Quadcopter Flight Controller. Open Source @ GitHub ##
## PiStuffing/Quadcopter under GPL for non-commercial application. Any code derived from ##
## this should retain this copyright comment. ##
## ##
## Copyright 2012 - 2018 Andy Baker (Hove) - [email protected] ##
## ##
####################################################################################################
####################################################################################################
from __future__ import division
from __future__ import with_statement
import signal
import socket
import time
import sys
import getopt
import math
from array import *
import smbus
import select
import os
import io
import logging
import csv
from RPIO import PWM
import RPi.GPIO as GPIO
import subprocess
import ctypes
from ctypes.util import find_library
import picamera
import struct
import gps
import serial
MIN_SATS = 7
EARTH_RADIUS = 6371000 # meters
GRAV_ACCEL = 9.80665 # meters per second per second
RC_PASSIVE = 0
RC_TAKEOFF = 1
RC_FLYING = 2
RC_LANDING = 3
RC_DONE = 4
RC_ABORT = 5
rc_status_name = ["PASSIVE", "TAKEOFF", "FLYING", "LANDING", "DONE", "ABORT"]
FULL_FIFO_BATCHES = 20 # << int(512 / 12)
####################################################################################################
#
# Adafruit i2c interface enhanced with performance / error handling enhancements
#
####################################################################################################
class I2C:
def __init__(self, address, bus=smbus.SMBus(1)):
self.address = address
self.bus = bus
self.misses = 0
def writeByte(self, value):
self.bus.write_byte(self.address, value)
def write8(self, reg, value):
self.bus.write_byte_data(self.address, reg, value)
def writeList(self, reg, list):
self.bus.write_i2c_block_data(self.address, reg, list)
def readU8(self, reg):
result = self.bus.read_byte_data(self.address, reg)
return result
def readS8(self, reg):
result = self.bus.read_byte_data(self.address, reg)
result = result - 256 if result > 127 else result
return result
def readU16(self, reg):
hibyte = self.bus.read_byte_data(self.address, reg)
result = (hibyte << 8) + self.bus.read_byte_data(self.address, reg+1)
return result
def readS16(self, reg):
hibyte = self.bus.read_byte_data(self.address, reg)
hibyte = hibyte - 256 if hibyte > 127 else hibyte
result = (hibyte << 8) + self.bus.read_byte_data(self.address, reg+1)
return result
def readList(self, reg, length):
"Reads a byte array value from the I2C device. The content depends on the device. The "
"FIFO read return sequential values from the same register. For all other, sequestial"
"regester values are returned"
result = self.bus.read_i2c_block_data(self.address, reg, length)
return result
####################################################################################################
#
# Gyroscope / Accelerometer class for reading position / movement. Works with the Invensense IMUs:
#
# - MPU-6050
# - MPU-9150
# - MPU-9250
#
####################################################################################################
class MPU6050:
i2c = None
# Registers/etc.
__MPU6050_RA_SELF_TEST_XG = 0x00
__MPU6050_RA_SELF_TEST_YG = 0x01
__MPU6050_RA_SELF_TEST_ZG = 0x02
__MPU6050_RA_SELF_TEST_XA = 0x0D
__MPU6050_RA_SELF_TEST_YA = 0x0E
__MPU6050_RA_SELF_TEST_ZA = 0x0F
__MPU6050_RA_XG_OFFS_USRH = 0x13
__MPU6050_RA_XG_OFFS_USRL = 0x14
__MPU6050_RA_YG_OFFS_USRH = 0x15
__MPU6050_RA_YG_OFFS_USRL = 0x16
__MPU6050_RA_ZG_OFFS_USRH = 0x17
__MPU6050_RA_ZG_OFFS_USRL = 0x18
__MPU6050_RA_SMPLRT_DIV = 0x19
__MPU6050_RA_CONFIG = 0x1A
__MPU6050_RA_GYRO_CONFIG = 0x1B
__MPU6050_RA_ACCEL_CONFIG = 0x1C
__MPU9250_RA_ACCEL_CFG_2 = 0x1D
__MPU6050_RA_FF_THR = 0x1D
__MPU6050_RA_FF_DUR = 0x1E
__MPU6050_RA_MOT_THR = 0x1F
__MPU6050_RA_MOT_DUR = 0x20
__MPU6050_RA_ZRMOT_THR = 0x21
__MPU6050_RA_ZRMOT_DUR = 0x22
__MPU6050_RA_FIFO_EN = 0x23
__MPU6050_RA_I2C_MST_CTRL = 0x24
__MPU6050_RA_I2C_SLV0_ADDR = 0x25
__MPU6050_RA_I2C_SLV0_REG = 0x26
__MPU6050_RA_I2C_SLV0_CTRL = 0x27
__MPU6050_RA_I2C_SLV1_ADDR = 0x28
__MPU6050_RA_I2C_SLV1_REG = 0x29
__MPU6050_RA_I2C_SLV1_CTRL = 0x2A
__MPU6050_RA_I2C_SLV2_ADDR = 0x2B
__MPU6050_RA_I2C_SLV2_REG = 0x2C
__MPU6050_RA_I2C_SLV2_CTRL = 0x2D
__MPU6050_RA_I2C_SLV3_ADDR = 0x2E
__MPU6050_RA_I2C_SLV3_REG = 0x2F
__MPU6050_RA_I2C_SLV3_CTRL = 0x30
__MPU6050_RA_I2C_SLV4_ADDR = 0x31
__MPU6050_RA_I2C_SLV4_REG = 0x32
__MPU6050_RA_I2C_SLV4_DO = 0x33
__MPU6050_RA_I2C_SLV4_CTRL = 0x34
__MPU6050_RA_I2C_SLV4_DI = 0x35
__MPU6050_RA_I2C_MST_STATUS = 0x36
__MPU6050_RA_INT_PIN_CFG = 0x37
__MPU6050_RA_INT_ENABLE = 0x38
__MPU6050_RA_DMP_INT_STATUS = 0x39
__MPU6050_RA_INT_STATUS = 0x3A
__MPU6050_RA_ACCEL_XOUT_H = 0x3B
__MPU6050_RA_ACCEL_XOUT_L = 0x3C
__MPU6050_RA_ACCEL_YOUT_H = 0x3D
__MPU6050_RA_ACCEL_YOUT_L = 0x3E
__MPU6050_RA_ACCEL_ZOUT_H = 0x3F
__MPU6050_RA_ACCEL_ZOUT_L = 0x40
__MPU6050_RA_TEMP_OUT_H = 0x41
__MPU6050_RA_TEMP_OUT_L = 0x42
__MPU6050_RA_GYRO_XOUT_H = 0x43
__MPU6050_RA_GYRO_XOUT_L = 0x44
__MPU6050_RA_GYRO_YOUT_H = 0x45
__MPU6050_RA_GYRO_YOUT_L = 0x46
__MPU6050_RA_GYRO_ZOUT_H = 0x47
__MPU6050_RA_GYRO_ZOUT_L = 0x48
__MPU6050_RA_EXT_SENS_DATA_00 = 0x49
__MPU6050_RA_EXT_SENS_DATA_01 = 0x4A
__MPU6050_RA_EXT_SENS_DATA_02 = 0x4B
__MPU6050_RA_EXT_SENS_DATA_03 = 0x4C
__MPU6050_RA_EXT_SENS_DATA_04 = 0x4D
__MPU6050_RA_EXT_SENS_DATA_05 = 0x4E
__MPU6050_RA_EXT_SENS_DATA_06 = 0x4F
__MPU6050_RA_EXT_SENS_DATA_07 = 0x50
__MPU6050_RA_EXT_SENS_DATA_08 = 0x51
__MPU6050_RA_EXT_SENS_DATA_09 = 0x52
__MPU6050_RA_EXT_SENS_DATA_10 = 0x53
__MPU6050_RA_EXT_SENS_DATA_11 = 0x54
__MPU6050_RA_EXT_SENS_DATA_12 = 0x55
__MPU6050_RA_EXT_SENS_DATA_13 = 0x56
__MPU6050_RA_EXT_SENS_DATA_14 = 0x57
__MPU6050_RA_EXT_SENS_DATA_15 = 0x58
__MPU6050_RA_EXT_SENS_DATA_16 = 0x59
__MPU6050_RA_EXT_SENS_DATA_17 = 0x5A
__MPU6050_RA_EXT_SENS_DATA_18 = 0x5B
__MPU6050_RA_EXT_SENS_DATA_19 = 0x5C
__MPU6050_RA_EXT_SENS_DATA_20 = 0x5D
__MPU6050_RA_EXT_SENS_DATA_21 = 0x5E
__MPU6050_RA_EXT_SENS_DATA_22 = 0x5F
__MPU6050_RA_EXT_SENS_DATA_23 = 0x60
__MPU6050_RA_MOT_DETECT_STATUS = 0x61
__MPU6050_RA_I2C_SLV0_DO = 0x63
__MPU6050_RA_I2C_SLV1_DO = 0x64
__MPU6050_RA_I2C_SLV2_DO = 0x65
__MPU6050_RA_I2C_SLV3_DO = 0x66
__MPU6050_RA_I2C_MST_DELAY_CTRL = 0x67
__MPU6050_RA_SIGNAL_PATH_RESET = 0x68
__MPU6050_RA_MOT_DETECT_CTRL = 0x69
__MPU6050_RA_USER_CTRL = 0x6A
__MPU6050_RA_PWR_MGMT_1 = 0x6B
__MPU6050_RA_PWR_MGMT_2 = 0x6C
__MPU6050_RA_BANK_SEL = 0x6D
__MPU6050_RA_MEM_START_ADDR = 0x6E
__MPU6050_RA_MEM_R_W = 0x6F
__MPU6050_RA_DMP_CFG_1 = 0x70
__MPU6050_RA_DMP_CFG_2 = 0x71
__MPU6050_RA_FIFO_COUNTH = 0x72
__MPU6050_RA_FIFO_COUNTL = 0x73
__MPU6050_RA_FIFO_R_W = 0x74
__MPU6050_RA_WHO_AM_I = 0x75
#-----------------------------------------------------------------------------------------------
# Compass output registers when using the I2C master / slave
#-----------------------------------------------------------------------------------------------
__MPU9250_RA_MAG_XOUT_L = 0x4A
__MPU9250_RA_MAG_XOUT_H = 0x4B
__MPU9250_RA_MAG_YOUT_L = 0x4C
__MPU9250_RA_MAG_YOUT_H = 0x4D
__MPU9250_RA_MAG_ZOUT_L = 0x4E
__MPU9250_RA_MAG_ZOUT_H = 0x4F
#-----------------------------------------------------------------------------------------------
# Compass output registers when directly accessing via IMU bypass
#-----------------------------------------------------------------------------------------------
__AK893_RA_WIA = 0x00
__AK893_RA_INFO = 0x01
__AK893_RA_ST1 = 0x00
__AK893_RA_X_LO = 0x03
__AK893_RA_X_HI = 0x04
__AK893_RA_Y_LO = 0x05
__AK893_RA_Y_HI = 0x06
__AK893_RA_Z_LO = 0x07
__AK893_RA_Z_HI = 0x08
__AK893_RA_ST2 = 0x09
__AK893_RA_CNTL1 = 0x0A
__AK893_RA_RSV = 0x0B
__AK893_RA_ASTC = 0x0C
__AK893_RA_TS1 = 0x0D
__AK893_RA_TS2 = 0x0E
__AK893_RA_I2CDIS = 0x0F
__AK893_RA_ASAX = 0x10
__AK893_RA_ASAY = 0x11
__AK893_RA_ASAZ = 0x12
__RANGE_ACCEL = 8 #AB: +/- 8g
__RANGE_GYRO = 250 #AB: +/- 250o/s
__SCALE_GYRO = math.radians(2 * __RANGE_GYRO / 65536)
__SCALE_ACCEL = 2 * __RANGE_ACCEL / 65536
def __init__(self, address=0x68, alpf=2, glpf=1):
self.i2c = I2C(address)
self.address = address
self.min_az = 0.0
self.max_az = 0.0
self.min_gx = 0.0
self.max_gx = 0.0
self.min_gy = 0.0
self.max_gy = 0.0
self.min_gz = 0.0
self.max_gz = 0.0
self.ax_offset = 0.0
self.ay_offset = 0.0
self.az_offset = 0.0
self.gx_offset = 0.0
self.gy_offset = 0.0
self.gz_offset = 0.0
logger.info('Reseting MPU-6050')
#-------------------------------------------------------------------------------------------
# Reset all registers
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_PWR_MGMT_1, 0x80)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Sets sample rate to 1kHz/(1+0) = 1kHz or 1ms (note 1kHz assumes dlpf is on - setting
# dlpf to 0 or 7 changes 1kHz to 8kHz and therefore will require sample rate divider
# to be changed to 7 to obtain the same 1kHz sample rate.
#-------------------------------------------------------------------------------------------
sample_rate_divisor = int(round(adc_frequency / sampling_rate))
logger.warning("SRD:, %d", sample_rate_divisor)
self.i2c.write8(self.__MPU6050_RA_SMPLRT_DIV, sample_rate_divisor - 1)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Sets clock source to gyro reference w/ PLL
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_PWR_MGMT_1, 0x01)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Gyro DLPF => 1kHz sample frequency used above divided by the sample divide factor.
#
# 0x00 = 250Hz @ 8kHz sampling - DO NOT USE, THE ACCELEROMETER STILL SAMPLES AT 1kHz WHICH PRODUCES EXPECTED BUT NOT CODED FOR TIMING AND FIFO CONTENT PROBLEMS
# 0x01 = 184Hz
# 0x02 = 92Hz
# 0x03 = 41Hz
# 0x04 = 20Hz
# 0x05 = 10Hz
# 0x06 = 5Hz
# 0x07 = 3600Hz @ 8kHz
#
# 0x0* FIFO overflow overwrites oldest FIFO contents
# 0x4* FIFO overflow does not overwrite full FIFO contents
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_CONFIG, 0x40 | glpf)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Disable gyro self tests, scale of +/- 250 degrees/s
#
# 0x00 = +/- 250 degrees/s
# 0x08 = +/- 500 degrees/s
# 0x10 = +/- 1000 degrees/s
# 0x18 = +/- 2000 degrees/s
# See SCALE_GYRO for conversion from raw data to units of radians per second
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_GYRO_CONFIG, int(round(math.log(self.__RANGE_GYRO / 250, 2))) << 3)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Accel DLPF => 1kHz sample frequency used above divided by the sample divide factor.
#
# 0x00 = 460Hz
# 0x01 = 184Hz
# 0x02 = 92Hz
# 0x03 = 41Hz
# 0x04 = 20Hz
# 0x05 = 10Hz
# 0x06 = 5Hz
# 0x07 = 460Hz
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU9250_RA_ACCEL_CFG_2, alpf)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Disable accel self tests, scale of +/-8g
#
# 0x00 = +/- 2g
# 0x08 = +/- 4g
# 0x10 = +/- 8g
# 0x18 = +/- 16g
# See SCALE_ACCEL for convertion from raw data to units of meters per second squared
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_ACCEL_CONFIG, int(round(math.log(self.__RANGE_ACCEL / 2, 2))) << 3)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Set INT pin to push/pull, latch 'til read, any read to clear
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_INT_PIN_CFG, 0x30)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Initialize the FIFO overflow interrupt 0x10 (turned off at startup).
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_INT_ENABLE, 0x00)
time.sleep(0.1)
#-------------------------------------------------------------------------------------------
# Enabled the FIFO.
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_USER_CTRL, 0x40)
#-------------------------------------------------------------------------------------------
# Accelerometer / gyro goes into FIFO later on - see flushFIFO()
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_FIFO_EN, 0x00)
#-------------------------------------------------------------------------------------------
# Read ambient temperature
#-------------------------------------------------------------------------------------------
temp = self.readTemperature()
logger.critical("IMU core temp (boot): ,%f", temp / 333.86 + 21.0)
def readTemperature(self):
temp = self.i2c.readS16(self.__MPU6050_RA_TEMP_OUT_H)
return temp
def enableFIFOOverflowISR(self):
#-------------------------------------------------------------------------------------------
# Clear the interrupt status register and enable the FIFO overflow interrupt 0x10
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_INT_ENABLE, 0x10)
self.i2c.readU8(self.__MPU6050_RA_INT_STATUS)
def disableFIFOOverflowISR(self):
#-------------------------------------------------------------------------------------------
# Disable the FIFO overflow interrupt.
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_INT_ENABLE, 0x00)
def numFIFOBatches(self):
#-------------------------------------------------------------------------------------------
# The FIFO is 512 bytes long, and we're storing 6 signed shorts (ax, ay, az, gx, gy, gz) i.e.
# 12 bytes per batch of sensor readings
#-------------------------------------------------------------------------------------------
fifo_bytes = self.i2c.readU16(self.__MPU6050_RA_FIFO_COUNTH)
fifo_batches = int(fifo_bytes / 12) # This rounds down
return fifo_batches
def readFIFO(self, fifo_batches):
#-------------------------------------------------------------------------------------------
# Read n x 12 bytes of FIFO data averaging, and return the averaged values and inferred time
# based upon the sampling rate and the number of samples.
#-------------------------------------------------------------------------------------------
ax = 0
ay = 0
az = 0
gx = 0
gy = 0
gz = 0
for ii in range(fifo_batches):
sensor_data = []
fifo_batch = self.i2c.readList(self.__MPU6050_RA_FIFO_R_W, 12)
for jj in range(0, 12, 2):
hibyte = fifo_batch[jj]
hibyte = hibyte - 256 if hibyte > 127 else hibyte
lobyte = fifo_batch[jj + 1]
sensor_data.append((hibyte << 8) + lobyte)
ax += sensor_data[0]
ay += sensor_data[1]
az += sensor_data[2]
gx += sensor_data[3]
gy += sensor_data[4]
gz += sensor_data[5]
'''
self.max_az = self.max_az if sensor_data[2] < self.max_az else sensor_data[2]
self.min_az = self.min_az if sensor_data[2] > self.min_az else sensor_data[2]
self.max_gx = self.max_gx if sensor_data[3] < self.max_gx else sensor_data[3]
self.min_gx = self.min_gx if sensor_data[3] > self.min_gx else sensor_data[3]
self.max_gy = self.max_gy if sensor_data[4] < self.max_gy else sensor_data[4]
self.min_gy = self.min_gy if sensor_data[4] > self.min_gy else sensor_data[4]
self.max_gz = self.max_gz if sensor_data[5] < self.max_gz else sensor_data[5]
self.min_gz = self.min_gz if sensor_data[5] > self.min_gz else sensor_data[5]
'''
return ax / fifo_batches, ay / fifo_batches, az / fifo_batches, gx / fifo_batches, gy / fifo_batches, gz / fifo_batches, fifo_batches / sampling_rate
def flushFIFO(self):
#-------------------------------------------------------------------------------------------
# First shut off the feed in the FIFO.
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_FIFO_EN, 0x00)
#-------------------------------------------------------------------------------------------
# Empty the FIFO by reading whatever is there
#-------------------------------------------------------------------------------------------
SMBUS_MAX_BUF_SIZE = 32
fifo_bytes = self.i2c.readU16(self.__MPU6050_RA_FIFO_COUNTH)
for ii in range(int(fifo_bytes / SMBUS_MAX_BUF_SIZE)):
self.i2c.readList(self.__MPU6050_RA_FIFO_R_W, SMBUS_MAX_BUF_SIZE)
fifo_bytes = self.i2c.readU16(self.__MPU6050_RA_FIFO_COUNTH)
for ii in range(fifo_bytes):
self.i2c.readU8(self.__MPU6050_RA_FIFO_R_W)
#-------------------------------------------------------------------------------------------
# Finally start feeding the FIFO with sensor data again
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__MPU6050_RA_FIFO_EN, 0x78)
def setGyroOffsets(self, gx, gy, gz):
self.gx_offset = gx
self.gy_offset = gy
self.gz_offset = gz
def scaleSensors(self, ax, ay, az, gx, gy, gz):
qax = (ax - self.ax_offset) * self.__SCALE_ACCEL
qay = (ay - self.ay_offset) * self.__SCALE_ACCEL
qaz = (az - self.az_offset) * self.__SCALE_ACCEL
qrx = (gx - self.gx_offset) * self.__SCALE_GYRO
qry = (gy - self.gy_offset) * self.__SCALE_GYRO
qrz = (gz - self.gz_offset) * self.__SCALE_GYRO
return qax, qay, qaz, qrx, qry, qrz
def initCompass(self):
#-------------------------------------------------------------------------------------------
# Set up the I2C master pass through.
#-------------------------------------------------------------------------------------------
int_bypass = self.i2c.readU8(self.__MPU6050_RA_INT_PIN_CFG)
self.i2c.write8(self.__MPU6050_RA_INT_PIN_CFG, int_bypass | 0x02)
#-------------------------------------------------------------------------------------------
# Connect directly to the bypassed magnetometer, and configured it for 16 bit continuous data
#-------------------------------------------------------------------------------------------
self.i2c_compass = I2C(0x0C)
self.i2c_compass.write8(self.__AK893_RA_CNTL1, 0x16);
def readCompass(self):
compass_bytes = self.i2c_compass.readList(self.__AK893_RA_X_LO, 7)
#-------------------------------------------------------------------------------------------
# Convert the array of 6 bytes to 3 shorts - 7th byte kicks off another read.
# Note compass X, Y, Z are aligned with GPS not IMU i.e. X = 0, Y = 1 => 0 degrees North
#-------------------------------------------------------------------------------------------
compass_data = []
for ii in range(0, 6, 2):
lobyte = compass_bytes[ii]
hibyte = compass_bytes[ii + 1]
hibyte = hibyte - 256 if hibyte > 127 else hibyte
compass_data.append((hibyte << 8) + lobyte)
[mgx, mgy, mgz] = compass_data
mgx = (mgx - self.mgx_offset) * self.mgx_gain
mgy = (mgy - self.mgy_offset) * self.mgy_gain
mgz = (mgz - self.mgz_offset) * self.mgz_gain
return mgx, mgy, mgz
def compassCheckCalibrate(self):
rc = True
while True:
coc = raw_input("'check' or 'calibrate'? ")
if coc == "check":
self.checkCompass()
break
elif coc == "calibrate":
rc = self.calibrateCompass()
break
return rc
def checkCompass(self):
print "Pop me on the ground pointing in a known direction based on another compass."
raw_input("Press enter when that's done, and I'll tell you which way I think I'm pointing")
self.loadCompassCalibration()
mgx, mgy, mgz = self.readCompass()
#-------------------------------------------------------------------------------
# Convert compass vector into N, S, E, W variants. Get the compass angle in the
# range of 0 - 359.99.
#-------------------------------------------------------------------------------
compass_angle = (math.degrees(math.atan2(mgx, mgy)) + 360) % 360
#-------------------------------------------------------------------------------
# There are 16 possible compass directions when you include things like NNE at
# 22.5 degrees.
#-------------------------------------------------------------------------------
compass_points = ("N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW")
num_compass_points = len(compass_points)
for ii in range(num_compass_points):
angle_range_min = (360 * (ii - 0.5) / num_compass_points)
angle_range_max = (360 * (ii + 0.5) / num_compass_points)
if compass_angle > angle_range_min and compass_angle <= angle_range_max:
break
else:
ii = 0 # Special case where max < min when north.
print "I think I'm pointing %s?" % compass_points[ii]
def calibrateCompass(self):
self.mgx_offset = 0.0
self.mgy_offset = 0.0
self.mgz_offset = 0.0
self.mgx_gain = 1.0
self.mgy_gain = 1.0
self.mgz_gain = 1.0
offs_rc = False
#-------------------------------------------------------------------------------------------
# First we need gyro offset calibration. Flush the FIFO, collect roughly half a FIFO full
# of samples and feed back to the gyro offset calibrations.
#-------------------------------------------------------------------------------------------
raw_input("First, put me on a stable surface, and press enter.")
mpu6050.flushFIFO()
time.sleep(FULL_FIFO_BATCHES / sampling_rate)
nfb = mpu6050.numFIFOBatches()
qax, qay, qaz, qrx, qry, qrz, dt = mpu6050.readFIFO(nfb)
mpu6050.setGyroOffsets(qrx, qry, qrz)
print "OK, thanks. That's the gyro calibrated."
#-------------------------------------------------------------------------------------------
# Open the offset file for this run
#-------------------------------------------------------------------------------------------
try:
with open('CompassOffsets', 'ab') as offs_file:
mgx, mgy, mgz = self.readCompass()
max_mgx = mgx
min_mgx = mgx
max_mgy = mgy
min_mgy = mgy
max_mgz = mgz
min_mgz = mgz
#-----------------------------------------------------------------------------------
# Collect compass X. Y compass values
#-------------------------------------------------------------------------------
GPIO.output(GPIO_BUZZER, GPIO.LOW)
print "Now, pick me up and rotate me horizontally twice until the buzzing stop."
raw_input("Press enter when you're ready to go.")
self.flushFIFO()
yaw = 0.0
total_dt = 0.0
print "ROTATION: ",
number_len = 0
#-------------------------------------------------------------------------------
# While integrated Z axis gyro < 2 pi i.e. 360 degrees, keep flashing the light
#-------------------------------------------------------------------------------
while abs(yaw) < 4 * math.pi:
time.sleep(10 / sampling_rate)
nfb = mpu6050.numFIFOBatches()
ax, ay, az, gx, gy, gz, dt = self.readFIFO(nfb)
ax, ay, az, gx, gy, gz = self.scaleSensors(ax, ay, az, gx, gy, gz)
yaw += gz * dt
total_dt += dt
mgx, mgy, mgz = self.readCompass()
max_mgx = mgx if mgx > max_mgx else max_mgx
max_mgy = mgy if mgy > max_mgy else max_mgy
min_mgx = mgx if mgx < min_mgx else min_mgx
min_mgy = mgy if mgy < min_mgy else min_mgy
if total_dt > 0.2:
total_dt %= 0.2
number_text = str(abs(int(math.degrees(yaw))))
if len(number_text) == 2:
number_text = " " + number_text
elif len(number_text) == 1:
number_text = " " + number_text
print "\b\b\b\b%s" % number_text,
sys.stdout.flush()
GPIO.output(GPIO_BUZZER, not GPIO.input(GPIO_BUZZER))
print
#-------------------------------------------------------------------------------
# Collect compass Z values
#-------------------------------------------------------------------------------
GPIO.output(GPIO_BUZZER, GPIO.LOW)
print "\nGreat! Now do the same but with my nose down."
raw_input("Press enter when you're ready to go.")
self.flushFIFO()
rotation = 0.0
total_dt = 0.0
print "ROTATION: ",
number_len = 0
#-------------------------------------------------------------------------------
# While integrated X+Y axis gyro < 4 pi i.e. 720 degrees, keep flashing the light
#-------------------------------------------------------------------------------
while abs(rotation) < 4 * math.pi:
time.sleep(10 / sampling_rate)
nfb = self.numFIFOBatches()
ax, ay, az, gx, gy, gz, dt = self.readFIFO(nfb)
ax, ay, az, gx, gy, gz = self.scaleSensors(ax, ay, az, gx, gy, gz)
rotation += math.pow(math.pow(gx, 2) + math.pow(gy, 2), 0.5) * dt
total_dt += dt
mgx, mgy, mgz = self.readCompass()
max_mgz = mgz if mgz > max_mgz else max_mgz
min_mgz = mgz if mgz < min_mgz else min_mgz
if total_dt > 0.2:
total_dt %= 0.2
number_text = str(abs(int(math.degrees(rotation))))
if len(number_text) == 2:
number_text = " " + number_text
elif len(number_text) == 1:
number_text = " " + number_text
print "\b\b\b\b%s" % number_text,
sys.stdout.flush()
GPIO.output(GPIO_BUZZER, not GPIO.input(GPIO_BUZZER))
print
#-------------------------------------------------------------------------------
# Turn the light off regardless of the result
#-------------------------------------------------------------------------------
GPIO.output(GPIO_BUZZER, GPIO.LOW)
#-------------------------------------------------------------------------------
# Write the good output to file.
#-------------------------------------------------------------------------------
mgx_offset = (max_mgx + min_mgx) / 2
mgy_offset = (max_mgy + min_mgy) / 2
mgz_offset = (max_mgz + min_mgz) / 2
mgx_gain = 1 / (max_mgx - min_mgx)
mgy_gain = 1 / (max_mgy - min_mgy)
mgz_gain = 1 / (max_mgz - min_mgz)
offs_file.write("%f %f %f %f %f %f\n" % (mgx_offset, mgy_offset, mgz_offset, mgx_gain, mgy_gain, mgz_gain))
#-------------------------------------------------------------------------------
# Sanity check.
#-------------------------------------------------------------------------------
print "\nLooking good, just one last check to confirm all's well."
self.checkCompass()
print "All done - ready to go!"
offs_rc = True
except EnvironmentError as e:
print "Environment Error: '%s'" % e
return offs_rc
def loadCompassCalibration(self):
self.mgx_offset = 0.0
self.mgy_offset = 0.0
self.mgz_offset = 0.0
self.mgx_gain = 1.0
self.mgy_gain = 1.0
self.mgz_gain = 1.0
offs_rc = False
try:
with open('CompassOffsets', 'rb') as offs_file:
mgx_offset = 0.0
mgy_offset = 0.0
mgz_offset = 0.0
mgx_gain = 1.0
mgy_gain = 1.0
mgz_gain = 1.0
for line in offs_file:
mgx_offset, mgy_offset, mgz_offset, mgx_gain, mgy_gain, mgz_gain = line.split()
self.mgx_offset = float(mgx_offset)
self.mgy_offset = float(mgy_offset)
self.mgz_offset = float(mgz_offset)
self.mgx_gain = float(mgx_gain)
self.mgy_gain = float(mgy_gain)
self.mgz_gain = float(mgz_gain)
except EnvironmentError:
#---------------------------------------------------------------------------------------
# Compass calibration is essential to exclude soft magnetic fields such as from local
# metal; enforce a recalibration if not found.
#---------------------------------------------------------------------------------------
print "Oops, something went wrong reading the compass offsets file 'CompassOffsets'"
print "Have you calibrated it (--cc)?"
offs_rc = False
else:
#---------------------------------------------------------------------------------------
# Calibration results were successful.
#---------------------------------------------------------------------------------------
offs_rc = True
finally:
pass
logger.warning("Compass Offsets:, %f, %f, %f, Compass Gains:, %f, %f, %f", self.mgx_offset,
self.mgy_offset,
self.mgz_offset,
self.mgx_gain,
self.mgy_gain,
self.mgz_gain)
return offs_rc
def calibrate0g(self):
ax_offset = 0.0
ay_offset = 0.0
az_offset = 0.0
offs_rc = False
#-------------------------------------------------------------------------------------------
# Open the ofset file for this run
#-------------------------------------------------------------------------------------------
try:
with open('0gOffsets', 'ab') as offs_file:
raw_input("Rest me on my props and press enter.")
self.flushFIFO()
time.sleep(FULL_FIFO_BATCHES / sampling_rate)
fifo_batches = self.numFIFOBatches()
ax, ay, az, gx, gy, gz, dt = self.readFIFO(fifo_batches)
offs_file.write("%f %f %f\n" % (ax, ay, az))
except EnvironmentError:
pass
else:
offs_rc = True
return offs_rc
def load0gCalibration(self):
offs_rc = False
try:
with open('0gOffsets', 'rb') as offs_file:
for line in offs_file:
ax_offset, ay_offset, az_offset = line.split()
self.ax_offset = float(ax_offset)
self.ay_offset = float(ay_offset)
self.az_offset = float(az_offset)
except EnvironmentError:
pass
else:
pass
finally:
#---------------------------------------------------------------------------------------
# For a while, I thought 0g calibration might help, but actually, it doesn't due to
# temperature dependency, so it always returns default values now.
#---------------------------------------------------------------------------------------
self.ax_offset = 0.0
self.ay_offset = 0.0
self.az_offset = 0.0
offs_rc = True
logger.warning("0g Offsets:, %f, %f, %f", self.ax_offset, self.ay_offset, self.az_offset)
return offs_rc
def getStats(self):
return (self.max_az * self.__SCALE_ACCEL,
self.min_az * self.__SCALE_ACCEL,
self.max_gx * self.__SCALE_GYRO,
self.min_gx * self.__SCALE_GYRO,
self.max_gy * self.__SCALE_GYRO,
self.min_gy * self.__SCALE_GYRO,
self.max_gz * self.__SCALE_GYRO,
self.min_gz * self.__SCALE_GYRO)
####################################################################################################
#
# Garmin LiDAR-Lite v3 range finder
#
####################################################################################################
class GLLv3:
i2c = None
__GLL_ACQ_COMMAND = 0x00
__GLL_STATUS = 0x01
__GLL_SIG_COUNT_VAL = 0x02
__GLL_ACQ_CONFIG_REG = 0x04
__GLL_VELOCITY = 0x09
__GLL_PEAK_CORR = 0x0C
__GLL_NOISE_PEAK = 0x0D
__GLL_SIGNAL_STRENGTH = 0x0E
__GLL_FULL_DELAY_HIGH = 0x0F
__GLL_FULL_DELAY_LOW = 0x10
__GLL_OUTER_LOOP_COUNT = 0x11
__GLL_REF_COUNT_VAL = 0x12
__GLL_LAST_DELAY_HIGH = 0x14
__GLL_LAST_DELAY_LOW = 0x15
__GLL_UNIT_ID_HIGH = 0x16
__GLL_UNIT_ID_LOW = 0x17
__GLL_I2C_ID_HIGHT = 0x18
__GLL_I2C_ID_LOW = 0x19
__GLL_I2C_SEC_ADDR = 0x1A
__GLL_THRESHOLD_BYPASS = 0x1C
__GLL_I2C_CONFIG = 0x1E
__GLL_COMMAND = 0x40
__GLL_MEASURE_DELAY = 0x45
__GLL_PEAK_BCK = 0x4C
__GLL_CORR_DATA = 0x52
__GLL_CORR_DATA_SIGN = 0x53
__GLL_ACQ_SETTINGS = 0x5D
__GLL_POWER_CONTROL = 0x65
def __init__(self, address=0x62, rate=10):
self.i2c = I2C(address)
self.rate = rate
#-------------------------------------------------------------------------------------------
# Set to continuous sampling after initial read.
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__GLL_OUTER_LOOP_COUNT, 0xFF)
#-------------------------------------------------------------------------------------------
# Set the sampling frequency as 2000 / Hz:
# 10Hz = 0xc8
# 20Hz = 0x64
# 100Hz = 0x14
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__GLL_MEASURE_DELAY, int(2000 / rate))
#-------------------------------------------------------------------------------------------
# Include receiver bias correction 0x04
#AB: 0x04 | 0x01 should cause (falling edge?) GPIO_GLL_DR_INTERRUPT. Test GPIO handle this?
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__GLL_ACQ_COMMAND, 0x04 | 0x01)
#-------------------------------------------------------------------------------------------
# Acquisition config register:
# 0x01 Data ready interrupt
# 0x20 Take sampling rate from MEASURE_DELAY
#-------------------------------------------------------------------------------------------
self.i2c.write8(self.__GLL_ACQ_CONFIG_REG, 0x21)
def read(self):
#-------------------------------------------------------------------------------------------
# Distance is in cm hence the 100s to convert to meters.
# Velocity is in cm between consecutive reads; sampling rate converts these to a velocity.
# Reading the list from 0x8F seems to get the previous reading, probably cached for the sake
# of calculating the velocity next time round.
#-------------------------------------------------------------------------------------------
distance = self.i2c.readU16(self.__GLL_FULL_DELAY_HIGH)
if distance == 1:
raise ValueError("GLL out of range")
return distance / 100
####################################################################################################
#
# Garmin LiDAR-Lite v3HP range finder
#
####################################################################################################
class GLLv3HP:
i2c = None
__GLL_ACQ_COMMAND = 0x00
__GLL_STATUS = 0x01
__GLL_SIG_COUNT_VAL = 0x02
__GLL_ACQ_CONFIG_REG = 0x04
__GLL_LEGACY_RESET_EN = 0x06
__GLL_SIGNAL_STRENGTH = 0x0E
__GLL_FULL_DELAY_HIGH = 0x0F
__GLL_FULL_DELAY_LOW = 0x10
__GLL_REF_COUNT_VAL = 0x12
__GLL_UNIT_ID_HIGH = 0x16
__GLL_UNIT_ID_LOW = 0x17
__GLL_I2C_ID_HIGHT = 0x18
__GLL_I2C_ID_LOW = 0x19
__GLL_I2C_SEC_ADDR = 0x1A
__GLL_THRESHOLD_BYPASS = 0x1C
__GLL_I2C_CONFIG = 0x1E
__GLL_PEAK_STACK_HIGH = 0x26
__GLL_PEAK_STACK_LOW = 0x27
__GLL_COMMAND = 0x40
__GLL_HEALTHY_STATUS = 0x48
__GLL_CORR_DATA = 0x52
__GLL_CORR_DATA_SIGN = 0x53
__GLL_POWER_CONTROL = 0x65
def __init__(self, address=0x62):
self.i2c = I2C(address)
self.i2c.write8(self.__GLL_SIG_COUNT_VAL, 0x80)
self.i2c.write8(self.__GLL_ACQ_CONFIG_REG, 0x08)
self.i2c.write8(self.__GLL_REF_COUNT_VAL, 0x05)
self.i2c.write8(self.__GLL_THRESHOLD_BYPASS, 0x00)
def read(self):
acquired = False
# Trigger acquisition
self.i2c.write8(self.__GLL_ACQ_COMMAND, 0x01)
# Poll acquired?
while not acquired:
acquired = not (self.i2c.readU8(self.__GLL_STATUS) & 0x01)
else:
distance = self.i2c.readU16(self.__GLL_FULL_DELAY_HIGH)
if distance == 0:
raise ValueError("GLL out of range")
return distance / 100
####################################################################################################
#
# PID algorithm to take input sensor readings, and target requirements, and output an arbirtrary
# corrective value.
#
####################################################################################################
class PID:
def __init__(self, p_gain, i_gain, d_gain):
self.last_error = 0.0
self.p_gain = p_gain
self.i_gain = i_gain
self.d_gain = d_gain
self.i_error = 0.0
def Error(self, input, target):
return (target - input)
def Compute(self, input, target, dt):
#-------------------------------------------------------------------------------------------
# Error is what the PID alogithm acts upon to derive the output
#-------------------------------------------------------------------------------------------
error = self.Error(input, target)
#-------------------------------------------------------------------------------------------
# The proportional term takes the distance between current input and target
# and uses this proportially (based on Kp) to control the ESC pulse width
#-------------------------------------------------------------------------------------------
p_error = error
#-------------------------------------------------------------------------------------------
# The integral term sums the errors across many compute calls to allow for
# external factors like wind speed and friction
#-------------------------------------------------------------------------------------------
self.i_error += (error + self.last_error) * dt
i_error = self.i_error
#-------------------------------------------------------------------------------------------
# The differential term accounts for the fact that as error approaches 0,
# the output needs to be reduced proportionally to ensure factors such as
# momentum do not cause overshoot.
#-------------------------------------------------------------------------------------------
d_error = (error - self.last_error) / dt
#-------------------------------------------------------------------------------------------
# The overall output is the sum of the (P)roportional, (I)ntegral and (D)iffertial terms
#-------------------------------------------------------------------------------------------
p_output = self.p_gain * p_error
i_output = self.i_gain * i_error
d_output = self.d_gain * d_error
#-------------------------------------------------------------------------------------------
# Store off last error for integral and differential processing next time.
#-------------------------------------------------------------------------------------------
self.last_error = error
#-------------------------------------------------------------------------------------------
# Return the output, which has been tuned to be the increment / decrement in ESC PWM
#-------------------------------------------------------------------------------------------
return p_output, i_output, d_output
####################################################################################################
#
# PID algorithm subclass to come with the yaw angles error calculations.
#
####################################################################################################
class YAW_PID(PID):
def Error(self, input, target):
#-------------------------------------------------------------------------------------------
# target and input are in the 0 - 2 pi range. This is asserted. Results are in the +/- pi
# range to make sure we spin the shorted way.
#-------------------------------------------------------------------------------------------
assert (abs(input) <= math.pi), "yaw input out of range %f" % math.degrees(input)
assert (abs(target) <= math.pi), "yaw target out of range %f" % math.degrees(target)
error = ((target - input) + math.pi) % (2 * math.pi) - math.pi
return error
####################################################################################################
#
# Class for managing each blade + motor configuration via its ESC
#
####################################################################################################
class ESC:
def __init__(self, pin, location, rotation, name):
#-------------------------------------------------------------------------------------------
# The GPIO BCM numbered pin providing PWM signal for this ESC
#-------------------------------------------------------------------------------------------
self.bcm_pin = pin
#-------------------------------------------------------------------------------------------
# Physical parameters of the ESC / motors / propellers
#-------------------------------------------------------------------------------------------
self.motor_location = location
self.motor_rotation = rotation
#-------------------------------------------------------------------------------------------
# Name - for logging purposes only
#-------------------------------------------------------------------------------------------
self.name = name
#-------------------------------------------------------------------------------------------
# Pulse width - for logging purposes only
#-------------------------------------------------------------------------------------------
self.pulse_width = 0
#-------------------------------------------------------------------------------------------
# Initialize the RPIO DMA PWM for this ESC.
#-------------------------------------------------------------------------------------------
self.set(1000)
def set(self, pulse_width):
pulse_width = pulse_width if pulse_width >= 1000 else 1000
pulse_width = pulse_width if pulse_width <= 1999 else 1999
self.pulse_width = pulse_width
PWM.add_channel_pulse(RPIO_DMA_CHANNEL, self.bcm_pin, 0, pulse_width)
####################################################################################################
#
# Get the rotation angles of pitch, roll and yaw from the fixed point of earth reference frame
# gravity + lateral orientation (ultimately compass derived, but currently just the take-off
# orientation) of 0, 0, 1 compared to where gravity is distrubuted across the X, Y and Z axes of the
# accelerometer all based upon the right hand rule.
#
####################################################################################################
def GetRotationAngles(ax, ay, az):
#-----------------------------------------------------------------------------------------------
# What's the angle in the x and y plane from horizontal in radians?
#-----------------------------------------------------------------------------------------------
pitch = math.atan2(-ax, math.pow(math.pow(ay, 2) + math.pow(az, 2), 0.5))
roll = math.atan2(ay, az)
return pitch, roll
####################################################################################################
#
# Absolute angles of tilt compared to the earth gravity reference frame.
#
####################################################################################################
def GetAbsoluteAngles(ax, ay, az):
pitch = math.atan2(-ax, az)
roll = math.atan2(ay, az)
return pitch, roll
####################################################################################################
#
# Convert a body frame rotation rate to the rotation frames
#
####################################################################################################
def Body2EulerRates(qry, qrx, qrz, pa, ra):
#===============================================================================================
# Axes: Convert a set of gyro body frame rotation rates into Euler frames
#
# Matrix
# ---------
# |err| | 1 , sin(ra) * tan(pa) , cos(ra) * tan(pa) | |qrx|
# |epr| = | 0 , cos(ra) , -sin(ra) | |qry|
# |eyr| | 0 , sin(ra) / cos(pa) , cos(ra) / cos(pa) | |qrz|
#
#===============================================================================================
c_pa = math.cos(pa)
t_pa = math.tan(pa)
c_ra = math.cos(ra)
s_ra = math.sin(ra)
err = qrx + qry * s_ra * t_pa + qrz * c_ra * t_pa
epr = qry * c_ra - qrz * s_ra
eyr = qry * s_ra / c_pa + qrz * c_ra / c_pa
return epr, err, eyr
####################################################################################################
#
# Rotate a vector using Euler angles wrt Earth frame co-ordinate system, for example to take the
# earth frame target flight plan vectors, and move it to the quad frame orientations vectors.
#
####################################################################################################
def RotateVector(evx, evy, evz, pa, ra, ya):
#===============================================================================================
# Axes: Convert a vector from earth- to quadcopter frame
#
# Matrix
# ---------
# |qvx| | cos(pa) * cos(ya), cos(pa) * sin(ya), -sin(pa) | |evx|
# |qvy| = | sin(ra) * sin(pa) * cos(ya) - cos(ra) * sin(ya), sin(ra) * sin(pa) * sin(ya) + cos(ra) * cos(ya), sin(ra) * cos(pa)| |evy|
# |qvz| | cos(ra) * sin(pa) * cos(ya) + sin(ra) * sin(ya), cos(ra) * sin(pa) * sin(ya) - sin(ra) * cos(ya), cos(pa) * cos(ra)| |evz|
#
#===============================================================================================
c_pa = math.cos(pa)
s_pa = math.sin(pa)
c_ra = math.cos(ra)
s_ra = math.sin(ra)
c_ya = math.cos(ya)
s_ya = math.sin(ya)
qvx = evx * c_pa * c_ya + evy * c_pa * s_ya - evz * s_pa
qvy = evx * (s_ra * s_pa * c_ya - c_ra * s_ya) + evy * (s_ra * s_pa * s_ya + c_ra * c_ya) + evz * s_ra * c_pa
qvz = evx * (c_ra * s_pa * c_ya + s_ra * s_ya) + evy * (c_ra * s_pa * s_ya - s_ra * c_ya) + evz * c_pa * c_ra
return qvx, qvy, qvz
####################################################################################################
#
# Butterwork IIR Filter calculator and actor - this is carried out in the earth frame as we are track
# gravity drift over time from 0, 0, 1 (the primer values for egx, egy and egz)
#
# Code is derived from http://www.exstrom.com/journal/sigproc/bwlpf.c
#
####################################################################################################
class BUTTERWORTH:
def __init__(self, sampling, cutoff, order, primer):
self.n = int(round(order / 2))
self.A = []
self.d1 = []
self.d2 = []
self.w0 = []
self.w1 = []
self.w2 = []
a = math.tan(math.pi * cutoff / sampling)
a2 = math.pow(a, 2.0)
for ii in range(0, self.n):
r = math.sin(math.pi * (2.0 * ii + 1.0) / (4.0 * self.n))
s = a2 + 2.0 * a * r + 1.0
self.A.append(a2 / s)
self.d1.append(2.0 * (1 - a2) / s)
self.d2.append(-(a2 - 2.0 * a * r + 1.0) / s)
self.w0.append(primer / (self.A[ii] * 4))
self.w1.append(primer / (self.A[ii] * 4))
self.w2.append(primer / (self.A[ii] * 4))
def filter(self, input):
for ii in range(0, self.n):
self.w0[ii] = self.d1[ii] * self.w1[ii] + self.d2[ii] * self.w2[ii] + input
output = self.A[ii] * (self.w0[ii] + 2.0 * self.w1[ii] + self.w2[ii])
self.w2[ii] = self.w1[ii]
self.w1[ii] = self.w0[ii]
return output
####################################################################################################
#
# Initialize hardware PWM
#
####################################################################################################
RPIO_DMA_CHANNEL = 1
def PWMInit():
#-----------------------------------------------------------------------------------------------
# Set up the globally shared single PWM channel
#-----------------------------------------------------------------------------------------------
PWM.set_loglevel(PWM.LOG_LEVEL_ERRORS)
PWM.setup(1) # 1us resolution pulses
PWM.init_channel(RPIO_DMA_CHANNEL, 3000) # pulse every 3ms
####################################################################################################
#
# Cleanup hardware PWM
#
####################################################################################################
def PWMTerm():
PWM.cleanup()
####################################################################################################
#
# GPIO pins initialization for MPU6050 FIFO overflow interrupt
#
####################################################################################################
def GPIOInit(FIFOOverflowISR):
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_FIFO_OVERFLOW_INTERRUPT, GPIO.IN, GPIO.PUD_OFF)
GPIO.add_event_detect(GPIO_FIFO_OVERFLOW_INTERRUPT, GPIO.RISING) #, FIFOOverflowISR)
#AB: GPIO.setup(GPIO_POWER_BROWN_OUT_INTERRUPT, GPIO.IN, GPIO.PUD_OFF)
#AB: GPIO.add_event_detect(GPIO_POWER_BROWN_OUT_INTERRUPT, GPIO.FALLING)
'''
#AB! Regardless of the (UP, OFF, DOWN) * (RISING, FALLING), none of these option raise a DR interrupt
#AB! in v3. v3HP seems to work at a glance.
'''
GPIO.setup(GPIO_GLL_DR_INTERRUPT, GPIO.IN, GPIO.PUD_DOWN)
GPIO.add_event_detect(GPIO_GLL_DR_INTERRUPT, GPIO.FALLING)
GPIO.setup(GPIO_BUZZER, GPIO.OUT)
GPIO.output(GPIO_BUZZER, GPIO.LOW)
####################################################################################################
#
# GPIO pins cleanup for MPU6050 FIFO overflow interrupt
#
####################################################################################################
def GPIOTerm():
#AB: GPIO.remove_event_detect(GPIO_FIFO_OVERFLOW_INTERRUPT)
GPIO.remove_event_detect(GPIO_GLL_DR_INTERRUPT)
GPIO.cleanup()
####################################################################################################
#
# Check CLI validity, set calibrate_sensors / fly or sys.exit(1)
#
####################################################################################################
def CheckCLI(argv):
cli_fly = False
cli_hover_pwm = 1000
#-----------------------------------------------------------------------------------------------
# Other configuration defaults
#-----------------------------------------------------------------------------------------------
cli_test_case = 0
cli_diagnostics = False
cli_tau = 7.5
cli_calibrate_0g = False
cli_fp_filename = ''
cli_cc_compass = False
cli_file_control = False
cli_yaw_control = False
cli_gps_control = False
cli_add_waypoint = False
cli_clear_waypoints = False
cli_rc_control = False
hover_pwm_defaulted = True
#-----------------------------------------------------------------------------------------------
# Defaults for vertical distance PIDs
#-----------------------------------------------------------------------------------------------
cli_vdp_gain = 1.0
cli_vdi_gain = 0.0
cli_vdd_gain = 0.0
#-----------------------------------------------------------------------------------------------
# Defaults for horizontal distance PIDs
#-----------------------------------------------------------------------------------------------
cli_hdp_gain = 1.0
cli_hdi_gain = 0.0
cli_hdd_gain = 0.0
#-----------------------------------------------------------------------------------------------
# Defaults for horizontal velocity PIDs
#-----------------------------------------------------------------------------------------------
cli_hvp_gain = 1.5
cli_hvi_gain = 0.0
cli_hvd_gain = 0.0
#-----------------------------------------------------------------------------------------------
# Per frame specific values. This is the only place where PID integrals are used to compansate
# for stable forces such as gravity, weight imbalance in the frame. Yaw is included here to
# account for frame unique momentum for required for rotation; however this does not need a
# integral as there should not be a constant force that needs to be counteracted.
#-----------------------------------------------------------------------------------------------
if i_am_hermione or i_am_penelope:
#-------------------------------------------------------------------------------------------
# Hermione's PID configuration due to using her frame / ESCs / motors / props
#-------------------------------------------------------------------------------------------
cli_hover_pwm = 1600
#-------------------------------------------------------------------------------------------
# Defaults for vertical velocity PIDs.
#-------------------------------------------------------------------------------------------
cli_vvp_gain = 360.0
cli_vvi_gain = 180.0
cli_vvd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for pitch rotation rate PIDs
#-------------------------------------------------------------------------------------------
cli_prp_gain = 100.0
cli_pri_gain = 1.0
cli_prd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for roll rotation rate PIDs
#-------------------------------------------------------------------------------------------
cli_rrp_gain = 100.0
cli_rri_gain = 1.0
cli_rrd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for yaw rotation rate PIDs
#-------------------------------------------------------------------------------------------
cli_yrp_gain = 180.0
cli_yri_gain = 1.8
cli_yrd_gain = 0.0
elif i_am_zoe:
#-------------------------------------------------------------------------------------------
# Zoe's PID configuration due to using her ESCs / motors / props
#-------------------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------------------
# T-motor antiGravity 9030 CF white props
#-------------------------------------------------------------------------------------------
cli_hover_pwm = 1300
#-------------------------------------------------------------------------------------------
# Defaults for vertical velocity PIDs
#-------------------------------------------------------------------------------------------
cli_vvp_gain = 300.0
cli_vvi_gain = 150.0
cli_vvd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for pitch angle PIDs
#-------------------------------------------------------------------------------------------
cli_prp_gain = 16.0
cli_pri_gain = 0.16
cli_prd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for roll angle PIDs
#-------------------------------------------------------------------------------------------
cli_rrp_gain = 15.0
cli_rri_gain = 0.15
cli_rrd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for yaw angle PIDs
#-------------------------------------------------------------------------------------------
cli_yrp_gain = 40.0
cli_yri_gain = 0.4
cli_yrd_gain = 0.0
'''
#-------------------------------------------------------------------------------------------
# GEMFAN 6040BC 3 Blade Nylon white props
#-------------------------------------------------------------------------------------------
cli_hover_pwm = 1450
#-------------------------------------------------------------------------------------------
# Defaults for vertical velocity PIDs
#-------------------------------------------------------------------------------------------
cli_vvp_gain = 300.0
cli_vvi_gain = 150.0
cli_vvd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for pitch angle PIDs
#-------------------------------------------------------------------------------------------
cli_prp_gain = 35.0
cli_pri_gain = 0.35
cli_prd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for roll angle PIDs
#-------------------------------------------------------------------------------------------
cli_rrp_gain = 25.0
cli_rri_gain = 0.25
cli_rrd_gain = 0.0
#-------------------------------------------------------------------------------------------
# Defaults for yaw angle PIDs
#-------------------------------------------------------------------------------------------
cli_yrp_gain = 50.0
cli_yri_gain = 5.0
cli_yrd_gain = 0.0
#-----------------------------------------------------------------------------------------------
# Right, let's get on with reading the command line and checking consistency
#-----------------------------------------------------------------------------------------------
try:
opts, args = getopt.getopt(argv,'df:gh:y', ['cc', 'rc', 'tc=', 'awp', 'cwp', 'gps', 'tau=', 'vdp=', 'vdi=', 'vdd=', 'vvp=', 'vvi=', 'vvd=', 'hdp=', 'hdi=', 'hdd=', 'hvp=', 'hvi=', 'hvd=', 'prp=', 'pri=', 'prd=', 'rrp=', 'rri=', 'rrd=', 'tau=', 'yrp=', 'yri=', 'yrd='])
except getopt.GetoptError:
logger.critical('Must specify one of -f, --gps, --awp, --cwp, --cc or --tc')
logger.critical(' sudo python ./qc.py')
logger.critical(' -f set the flight plan CSV file')
logger.critical(' -h set the hover PWM pulse width - default: %dus', cli_hover_pwm)
logger.critical(' -d enable diagnostics')
logger.critical(' -g calibrate X, Y axis 0g - futile, ignore!')
logger.critical(' -y use yaw to control the direction of flight')
logger.critical(' --cc check or calibrate compass')
logger.critical(' --rc use the human control RC')
logger.critical(' --tc select which testcase to run')
logger.critical(' --awp add GPS waypoint to flight plan')
logger.critical(' --cwp clear GPS waypoints from flight plan')
logger.critical(' --gps use the GPS waypoint flight plan')
logger.critical(' --tau set the angle CF -3dB point - default: %fs', cli_tau)
logger.critical(' --vdp set vertical distance PID P gain - default: %f', cli_vvp_gain)
logger.critical(' --vdi set vertical distance PID I gain - default: %f', cli_vvi_gain)
logger.critical(' --vdd set vertical distance PID D gain - default: %f', cli_vvd_gain)
logger.critical(' --vvp set vertical speed PID P gain - default: %f', cli_vvp_gain)
logger.critical(' --vvi set vertical speed PID I gain - default: %f', cli_vvi_gain)
logger.critical(' --vvd set vertical speed PID D gain - default: %f', cli_vvd_gain)
logger.critical(' --hdp set horizontal speed PID P gain - default: %f', cli_hdp_gain)
logger.critical(' --hdi set horizontal speed PID I gain - default: %f', cli_hdi_gain)
logger.critical(' --hdd set horizontal speed PID D gain - default: %f', cli_hdd_gain)
logger.critical(' --hvp set horizontal speed PID P gain - default: %f', cli_hvp_gain)
logger.critical(' --hvi set horizontal speed PID I gain - default: %f', cli_hvi_gain)
logger.critical(' --hvd set horizontal speed PID D gain - default: %f', cli_hvd_gain)
logger.critical(' --prp set pitch rotation rate PID P gain - default: %f', cli_prp_gain)
logger.critical(' --pri set pitch rotation rate PID I gain - default: %f', cli_pri_gain)
logger.critical(' --prd set pitch rotation rate PID D gain - default: %f', cli_prd_gain)
logger.critical(' --rrp set roll rotation rate PID P gain - default: %f', cli_rrp_gain)
logger.critical(' --rri set roll rotation rate PID I gain - default: %f', cli_rri_gain)
logger.critical(' --rrd set roll rotation rate PID D gain - default: %f', cli_rrd_gain)
logger.critical(' --yrp set yaw rotation rate PID P gain - default: %f', cli_yrp_gain)
logger.critical(' --yri set yaw rotation rate PID I gain - default: %f', cli_yri_gain)
logger.critical(' --yrd set yaw rotation rate PID D gain - default: %f', cli_yrd_gain)
raise ValueError("Invalid command line")
for opt, arg in opts:
if opt == '-f':
cli_fly = True
cli_file_control = True
cli_fp_filename = arg
elif opt in '-h':
cli_hover_pwm = int(arg)
hover_pwm_defaulted = False
elif opt in '-d':
cli_diagnostics = True
elif opt in '-g':
cli_calibrate_0g = True
elif opt in '-y':
cli_yaw_control = True
elif opt in '--cc':
cli_cc_compass = True
elif opt in '--rc':
cli_fly = True
cli_rc_control = True
elif opt in '--tc':
cli_test_case = int(arg)
elif opt in '--awp':
cli_add_waypoint = True
elif opt in '--cwp':
cli_clear_waypoints = True
elif opt in '--gps':
cli_fly = True
cli_gps_control = True
cli_fp_filename = "GPSWaypoints.csv"
elif opt in '--tau':
cli_tau = float(arg)
elif opt in '--vdp':
cli_vdp_gain = float(arg)
elif opt in '--vdi':
cli_vdi_gain = float(arg)
elif opt in '--vdd':
cli_vdd_gain = float(arg)
elif opt in '--vvp':
cli_vvp_gain = float(arg)
elif opt in '--vvi':
cli_vvi_gain = float(arg)
elif opt in '--vvd':
cli_vvd_gain = float(arg)
elif opt in '--hdp':
cli_hdp_gain = float(arg)
elif opt in '--hdi':
cli_hdi_gain = float(arg)
elif opt in '--hdd':
cli_hdd_gain = float(arg)
elif opt in '--hvp':
cli_hvp_gain = float(arg)
elif opt in '--hvi':
cli_hvi_gain = float(arg)
elif opt in '--hvd':
cli_hvd_gain = float(arg)
elif opt in '--prp':
cli_prp_gain = float(arg)
elif opt in '--pri':
cli_pri_gain = float(arg)
elif opt in '--prd':
cli_prd_gain = float(arg)
elif opt in '--rrp':
cli_rrp_gain = float(arg)
elif opt in '--rri':
cli_rri_gain = float(arg)
elif opt in '--rrd':
cli_rrd_gain = float(arg)
elif opt in '--yrp':
cli_yrp_gain = float(arg)
elif opt in '--yri':
cli_yri_gain = float(arg)
elif opt in '--yrd':
cli_yrd_gain = float(arg)
if not cli_fly and cli_test_case == 0 and not cli_calibrate_0g and not cli_cc_compass and not cli_add_waypoint and not cli_clear_waypoints:
raise ValueError('Must specify one of -f, --awp, --cwp, --gps, --tc or --cc')
elif cli_hover_pwm < 1000 or cli_hover_pwm > 1999:
raise ValueError('Hover speed must lie in the following range: 1000 <= hover pwm < 2000')
elif cli_test_case == 0 and cli_fly:
if not (cli_file_control ^ cli_gps_control ^ cli_rc_control):
raise ValueError('Only one of file, gps or rc control may be chosen')
elif cli_file_control and not os.path.isfile(cli_fp_filename):
raise ValueError('The flight plan file "%s" does not exist.' % cli_fp_filename)
elif cli_gps_control and not os.path.isfile("GPSWaypoints.csv"):
raise ValueError('We need at least the target waypoint set for GPS flight control')
print 'Pre-flight checks passed, enjoy your flight, sir!'
elif cli_test_case == 0 and cli_calibrate_0g:
print 'Proceeding with 0g calibration'
elif cli_test_case == 0 and cli_cc_compass:
print "Proceeding with compass calibration"
elif cli_test_case == 0 and cli_add_waypoint:
print "Proceeding with GPS waypoint acquisition"
elif cli_test_case == 0 and cli_clear_waypoints:
print "Proceeding with GPS waypoint clearance"
elif cli_test_case != 1 and cli_test_case != 2:
raise ValueError('Only 1 or 2 are valid testcases')
elif cli_test_case == 1 and hover_pwm_defaulted:
raise ValueError('You must choose a specific hover speed (-h) for test case 1 - try 1150')
return cli_fp_filename, cli_calibrate_0g, cli_cc_compass, cli_yaw_control, cli_file_control, cli_rc_control, cli_gps_control, cli_add_waypoint, cli_clear_waypoints, cli_hover_pwm, cli_vdp_gain, cli_vdi_gain, cli_vdd_gain, cli_vvp_gain, cli_vvi_gain, cli_vvd_gain, cli_hdp_gain, cli_hdi_gain, cli_hdd_gain, cli_hvp_gain, cli_hvi_gain, cli_hvd_gain, cli_prp_gain, cli_pri_gain, cli_prd_gain, cli_rrp_gain, cli_rri_gain, cli_rrd_gain, cli_yrp_gain, cli_yri_gain, cli_yrd_gain, cli_test_case, cli_tau, cli_diagnostics
####################################################################################################
#
# Flight plan management. Only used by an Pi0W as it only has a single CPU.
#
####################################################################################################
class FlightPlan():
X = 0
Y = 1
Z = 2
PERIOD = 3
NAME = 4
def __init__(self, quadcopter, fp_filename):
self.quadcopter = quadcopter
self.fp_prev_index = 0
self.elapsed_time = 0.0
self.fp = []
self.fp.append((0.0, 0.0, 0.0, 0.0, "RTF"))
self.fp.append((0.0, 0.0, 0.5, 2.0, "TAKEOFF"))
self.fp.append((0.0, 0.0, 0.0, 0.5, "HOVER"))
self.edx_target = 0.0
self.edy_target = 0.0
self.edz_target = 0.0
with open(fp_filename, 'rb') as fp_csv:
fp_reader = csv.reader(fp_csv)
for fp_row in fp_reader:
if len(fp_row) == 0 or (fp_row[0] != '' and fp_row[0][0] == '#'):
continue
if len(fp_row) != 5:
break
self.fp.append((float(fp_row[self.X]),
float(fp_row[self.Y]),
float(fp_row[self.Z]),
float(fp_row[self.PERIOD]),
fp_row[self.NAME].strip()))
else:
self.fp.append((0.0, 0.0, -0.25, 5.0, "LANDING")) # Extended landed for safety
self.fp.append((0.0, 0.0, 0.0, 0.0, "STOP"))
return
raise ValueError("Error in CSV file; '%s'" % fp_row)
def getTargets(self, delta_time):
self.elapsed_time += delta_time
fp_total_time = 0.0
for fp_index in range(len(self.fp)):
fp_total_time += self.fp[fp_index][self.PERIOD]
if self.elapsed_time < fp_total_time:
break
else:
self.quadcopter.keep_looping = False
if fp_index != self.fp_prev_index:
logger.critical("%s", self.fp[fp_index][self.NAME])
self.fp_prev_index = fp_index
evx_target = self.fp[fp_index][self.X]
evy_target = self.fp[fp_index][self.Y]
evz_target = self.fp[fp_index][self.Z]
self.edx_target += evx_target * delta_time
self.edy_target += evy_target * delta_time
self.edz_target += evz_target * delta_time
return evx_target, evy_target, evz_target, self.edx_target, self.edy_target, self.edz_target
####################################################################################################
#
# Functions to lock memory to prevent paging, and move child processes in different process groups
# such that a Ctrl-C / SIGINT to one isn't distributed automatically to all children.
#
####################################################################################################
MCL_CURRENT = 1
MCL_FUTURE = 2
def mlockall(flags = MCL_CURRENT| MCL_FUTURE):
libc_name = ctypes.util.find_library("c")
libc = ctypes.CDLL(libc_name, use_errno=True)
result = libc.mlockall(flags)
if result != 0:
raise Exception("cannot lock memory, errno=%s" % ctypes.get_errno())
def munlockall():
libc_name = ctypes.util.find_library("c")
libc = ctypes.CDLL(libc_name, use_errno=True)
result = libc.munlockall()
if result != 0:
raise Exception("cannot lock memory, errno=%s" % ctypes.get_errno())
def Daemonize():
#-----------------------------------------------------------------------------------------------
# Discondect child processes so ctrl-C doesn't kill them
# Increment priority such that Motion is -10, Autopilot and Video are -5, and Sweep and GPS are 0.
#-----------------------------------------------------------------------------------------------
os.setpgrp()
os.nice(5)
'''
#AB: ###########################################################################################
#AB: # Consider here munlockall() to allow paging for lower priority processes i.e. all be main and video
#AB: ###########################################################################################
'''
####################################################################################################
#
# Start the Scanse Sweep reading process.
#
####################################################################################################
def SweepProcessor():
SWEEP_IGNORE_BOUNDARY = 0.5 # 50cm from Sweep central and the prop tips.
SWEEP_CRITICAL_BOUNDARY = 1.0 # 50cm or less beyond the ignore zone: Hermione's personal space encroached.
SWEEP_WARNING_BOUNDARY = 1.5 # 50cm or less beyond the critical zone: Pause for thought what to do next.
sent_critical = False
warning_distance = 0.0
previous_degrees = 360.0
start_time = time.time()
loops = 0
samples = 0
distance = 0.0
direction = 0.0
warning_distance = SWEEP_WARNING_BOUNDARY
warning_radians = 0.0
with serial.Serial("/dev/ttySWEEP",
baudrate = 115200,
parity = serial.PARITY_NONE,
bytesize = serial.EIGHTBITS,
stopbits = serial.STOPBITS_ONE,
xonxoff = False,
rtscts = False,
dsrdtr = False) as sweep:
try:
sweep.write("ID\n")
resp = sweep.readline()
sweep.write("DS\n")
resp = sweep.readline()
assert (len(resp) == 6), "SWEEP: Bad data"
status = resp[2:4]
assert status == "00", "SWEEP: Failed %s" % status
with io.open("/dev/shm/sweep_stream", mode = "wb", buffering = 0) as sweep_fifo:
log = open("sweep.csv", "wb")
log.write("angle, distance, x, y\n")
unpack_format = '=' + 'B' * 7
unpack_size = struct.calcsize(unpack_format)
pack_format = '=??ff'
while True:
raw = sweep.read(unpack_size)
assert (len(raw) == unpack_size), "Bad data read: %d" % len(raw)
#-------------------------------------------------------------------------------
# Sweep is spinning at 5Hz sampling at 600Hz. For large object detection within
# SWEEP_CRITICAL range, we can discard 80% of all samples, hopefully providing
# more efficient processing and limiting what's sent to the Autopilot.
#AB: 600 samples in 1 seconds at 5 circles per seconds = resolution of 3 degrees.
#AB: Hence 5 below = 15 degrees checking
#-------------------------------------------------------------------------------
samples += 1
if samples % 5 != 0:
continue
formatted = struct.unpack(unpack_format, raw)
assert (len(formatted) == 7), "Bad data type conversion: %d" % len(formatted)
#-------------------------------------------------------------------------------
# Read the azimuth and convert to degrees.
#-------------------------------------------------------------------------------
azimuth_lo = formatted[1]
azimuth_hi = formatted[2]
angle_int = (azimuth_hi << 8) + azimuth_lo
degrees = (angle_int >> 4) + (angle_int & 15) / 16
'''
#AB: ###########################################################################
#AB: # SIX SERIAL REFLECTION FROM THE WIFI ANTENNA TAKES ITS 15CM DISTANCE TO 90CM
#AB: # SMACK BANG IN THE CRITICAL ZONE!!! HENCE WE IGNORE THE RANGE OF ANGLES IT
#AB: # IS SEEN IN!!
#AB: ###########################################################################
'''
if degrees > 95 and degrees < 97:
continue
#-------------------------------------------------------------------------------
# We only send one warning and critical per loop (~0.2s); warnings happen at the start
# of a new loop, criticals immediately.
#-------------------------------------------------------------------------------
if degrees < previous_degrees:
loops += 1
output = None
#---------------------------------------------------------------------------
# Did we get a proximity warning last loop? Send it if so.
#---------------------------------------------------------------------------
if warning_distance < SWEEP_WARNING_BOUNDARY:
output = struct.pack(pack_format, False, True, warning_distance, warning_radians)
log_string = "WARNING: %fm @ %f degrees.\n" % (warning_distance , math.degrees(warning_radians) % 360)
#---------------------------------------------------------------------------
# Have we already sent a critical proximity? No? Then all's clear.
#AB: This could be improved; there's only a need to send a NONE if the previous loop
#AB: sent a WARNING previously, and no WARNING this time round.
#---------------------------------------------------------------------------
elif not sent_critical:
output = struct.pack(pack_format, False, False, 0.0, 0.0)
log_string = "PROXIMITY: %fm @ %f degrees.\n" % (distance, degrees % 360)
if output != None:
sweep_fifo.write(output)
log.write(log_string)
warning_distance = SWEEP_WARNING_BOUNDARY
sent_critical = False
previous_degrees = degrees
#-------------------------------------------------------------------------------
# Sweep rotates ACW = - 360, which when slung underneath equates to CW in the piDrone
# frame POV. Convert to radians and set range to +/- pi radians.
#-------------------------------------------------------------------------------
radians = -((math.radians(degrees) + math.pi) % (2 * math.pi) - math.pi)
#-------------------------------------------------------------------------------
# Read the distance and convert to meters.
#-------------------------------------------------------------------------------
distance_lo = formatted[3]
distance_hi = formatted[4]
distance = ((distance_hi << 8) + distance_lo) / 100
'''
#-------------------------------------------------------------------------------
# Convert the results to a vector aligned with quad frame.
#-------------------------------------------------------------------------------
x = distance * math.cos(radians)
y = distance * math.sin(radians)
log.write("%f, %f, %f, %f\n" % (degrees, distance, x, y))
'''
#-------------------------------------------------------------------------------
# If a reported distance lies inside the danger zone, pass it over to the autopilot
# to react to.
#-------------------------------------------------------------------------------
if distance < SWEEP_IGNORE_BOUNDARY:
pass
elif distance < SWEEP_CRITICAL_BOUNDARY and not sent_critical:
output = struct.pack(pack_format, True, False, distance, radians)
sweep_fifo.write(output)
log.write("CRITICAL: %fm @ %f degrees.\n" % (distance, degrees % 360))
sent_critical = True
elif distance < SWEEP_WARNING_BOUNDARY and warning_distance > distance:
warning_distance = distance
warning_radians = radians
#-------------------------------------------------------------------------------------------
# Catch Ctrl-C - the 'with' wrapped around the FIFO should have closed that by here. Has it?
#-------------------------------------------------------------------------------------------
except KeyboardInterrupt as e:
if not sweep_fifo.closed:
print "Sweep FIFO not closed! WTF!"
#-------------------------------------------------------------------------------------------
# Catch incorrect assumption bugs
#-------------------------------------------------------------------------------------------
except AssertionError as e:
print e
#-------------------------------------------------------------------------------------------
# Cleanup regardless otherwise the next run picks up data from this
#-------------------------------------------------------------------------------------------
finally:
sweep.write("DX\n")
resp = sweep.read()
log.write("Sweep loops: %d\n" % loops)
log.write("Time taken: %f\n" % (time.time() - start_time))
log.write("Samples: %d\n" % samples)
log.close()
####################################################################################################
#
# Process the Scanse Sweep data.
#
####################################################################################################
class SweepManager():
def __init__(self):
#-------------------------------------------------------------------------------------------
# Setup a shared memory based data stream for the Sweep output
#-------------------------------------------------------------------------------------------
os.mkfifo("/dev/shm/sweep_stream")
self.sweep_process = subprocess.Popen(["python", __file__, "SWEEP"], preexec_fn = Daemonize)
while True:
try:
self.sweep_fifo = io.open("/dev/shm/sweep_stream", mode="rb")
except:
continue
else:
break
self.unpack_format = "=??ff"
self.unpack_size = struct.calcsize(self.unpack_format)
def flush(self):
#-------------------------------------------------------------------------------------------
# Read what should be the backlog of reads, and return how many there are.
#-------------------------------------------------------------------------------------------
raw_bytes = self.sweep_fifo.read(self.unpack_size)
assert (len(raw_bytes) % self.unpack_size == 0), "Incomplete Sweep data received"
return int(len(raw_bytes) / self.unpack_size)
def read(self):
raw_bytes = self.sweep_fifo.read(self.unpack_size)
assert (len(raw_bytes) == self.unpack_size), "Incomplete data received from Sweep reader"
critical, warning, distance, direction = struct.unpack(self.unpack_format, raw_bytes)
return critical, warning, distance, direction
def cleanup(self):
#-------------------------------------------------------------------------------------------
# Stop the Sweep process if it's still running, and clean up the FIFO.
#-------------------------------------------------------------------------------------------
try:
if self.sweep_process.poll() == None:
self.sweep_process.send_signal(signal.SIGINT)
self.sweep_process.wait()
except KeyboardInterrupt as e:
pass
self.sweep_fifo.close()
os.unlink("/dev/shm/sweep_stream")
####################################################################################################
#
# Start the GPS reading process.
#
####################################################################################################
def GPSProcessor():
session = gps.gps()
session.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)
num_sats = 0
num_used_sats = 0
latitude = 0.0
altitude = 0.0
longitude = 0.0
'''
time = ""
epx = 0.0
epy = 0.0
epv = 0.0
ept = 0.0
eps = 0.0
climb = 0.0
speed = 0.0
direction = 0.0
'''
new_lat = False
new_lon = False
pack_format = '=dddb' # latitude, longitude, altitude, num satellites
with io.open("/dev/shm/gps_stream", mode = "wb", buffering = 0) as gps_fifo:
log = open("gps.csv", "wb")
log.write("latitude, longitude, altitude, satellites, epx, epy\n")
while True:
try:
report = session.next()
if report['class'] == 'TPV':
if hasattr(report, 'lon'): # Longitude in degrees
longitude = report.lon
new_lon = True
if hasattr(report, 'lat'): # Latitude in degrees
latitude = report.lat
new_lat = True
if hasattr(report, 'alt'): # Altitude - meters
altitude = report.alt
'''
if hasattr(report, 'epx'): # Estimated longitude error - meters
epx = report.epx
if hasattr(report, 'time'): # Time
time = report.time
if hasattr(report, 'ept'): # Estimated timestamp error - seconds
ept = report.ept
if hasattr(report, 'epy'): # Estimated latitude error - meters
epy = report.epy
if hasattr(report, 'epv'): # Estimated altitude error - meters
epv = report.epv
if hasattr(report, 'track'): # Direction - degrees from true north
direction = report.track
if hasattr(report, 'epd'): # Estimated direction error - degrees
epd = report.epd
if hasattr(report, 'climb'): # Climb velocity - meters per second
climb = report.climb
if hasattr(report, 'epc'): # Estimated climb error - meters per seconds
epc = report.epc
if hasattr(report, 'speed'): # Speed over ground - meters per second
speed = report.speed
if hasattr(report, 'eps'): # Estimated speed error - meters per second
eps = report.eps
'''
if report['class'] == 'SKY':
if hasattr(report, 'satellites'):
num_sats = 0
num_used_sats = 0
for satellite in report.satellites:
num_sats += 1
if hasattr(satellite, 'used') and satellite.used:
num_used_sats += 1
#-----------------------------------------------------------------------------
# Send the new batch.
#-----------------------------------------------------------------------------
if new_lon and new_lat:
log.write("%.10f, %.10f, %.10f, %d, %d\n" % (latitude, longitude, altitude, num_sats, num_used_sats))
new_lon = False
new_lat = False
output = struct.pack(pack_format,
latitude,
longitude,
altitude,
num_used_sats)
gps_fifo.write(output)
except KeyError:
pass
except KeyboardInterrupt:
break
except StopIteration:
session = None
break
finally:
pass
log.close()
####################################################################################################
#
# Process the GPS data.
#
####################################################################################################
class GPSManager():
def __init__(self):
#-------------------------------------------------------------------------------------------
# Setup a shared memory based data stream for the GPS output
#-------------------------------------------------------------------------------------------
os.mkfifo("/dev/shm/gps_stream")
self.gps_process = subprocess.Popen(["python", __file__, "GPS"], preexec_fn = Daemonize)
while True:
try:
self.gps_fifo = io.open("/dev/shm/gps_stream", mode="rb")
except:
continue
else:
break
self.waypoints = []
self.unpack_format = '=dddb' # latitude, longitude, altitude, num satellites
self.unpack_size = struct.calcsize(self.unpack_format)
def flush(self):
#-------------------------------------------------------------------------------------------
# Read what should be the backlog of reads, and return how many there are.
#-------------------------------------------------------------------------------------------
raw_bytes = self.gps_fifo.read(self.unpack_size)
assert (len(raw_bytes) % self.unpack_size == 0), "Incomplete GPS data received"
return int(len(raw_bytes) / self.unpack_size)
def cleanup(self):
#-------------------------------------------------------------------------------------------
# Stop the GPS process if it's still running, and clean up the FIFO.
#-------------------------------------------------------------------------------------------
try:
if self.gps_process.poll() == None:
self.gps_process.send_signal(signal.SIGINT)
self.gps_process.wait()
except KeyboardInterrupt as e:
pass
self.gps_fifo.close()
os.unlink("/dev/shm/gps_stream")
def acquireSatellites(self, num_sats = MIN_SATS):
gps_lat = 0.0
gps_lon = 0.0
gps_alt = 0.0
gps_sats = 0
start_time = time.time()
print "Gimme up to a minutes to acquire satellites... 0",
sys.stdout.flush()
while time.time() - start_time < 60:
gps_lat, gps_lon, gps_alt, gps_sats = self.read()
print "\b\b%d" % gps_sats,
sys.stdout.flush()
#---------------------------------------------------------------------------------------
# If we've gpt enough satellites, give up.
#---------------------------------------------------------------------------------------
if gps_sats >= num_sats:
print
break
else:
#---------------------------------------------------------------------------------------
# We ran out of time trying to get the minimum number of satellites. Is what we did get
# enough?
#---------------------------------------------------------------------------------------
print
rsp = raw_input("I only got %d. Good enough? " % gps_sats)
if len(rsp) != 0 and rsp[0] != "y" and rsp[0] != "Y":
raise EnvironmentError("I can't see enough satellites, I give up!")
return gps_lat, gps_lon, gps_alt, gps_sats
def read(self):
raw_bytes = self.gps_fifo.read(self.unpack_size)
assert (len(raw_bytes) == self.unpack_size), "Invalid data block received from GPS reader"
latitude, longitude, altitude, satellites = struct.unpack(self.unpack_format, raw_bytes)
return latitude, longitude, altitude, satellites
####################################################################################################
#
# Start the Autopilot reading process. Invoke the Infinite Improbabilty Drive with a strong cup of tea!
#
####################################################################################################
def AutopilotProcessor(sweep_installed, gps_installed, compass_installed, initial_orientation, file_control = False, gps_control = False, fp_filename = ""):
edx_target = 0.0
edy_target = 0.0
edz_target = 0.0
#-----------------------------------------------------------------------------------------------
# Create our poll object
#-----------------------------------------------------------------------------------------------
poll = select.poll()
#-----------------------------------------------------------------------------------------------
# Define the flight plan tuple indices.
#-----------------------------------------------------------------------------------------------
X = 0
Y = 1
Z = 2
PERIOD = 3
NAME = 4
#-----------------------------------------------------------------------------------------------
# Set up the various flight plans.
#-----------------------------------------------------------------------------------------------
takeoff_fp = []
landing_fp = []
abort_fp = []
file_fp = []
gps_locating_fp = []
gps_tracking_fp = []
gps_waypoints = []
sats_search_start = 0.0
#-----------------------------------------------------------------------------------------------
# Build the standard takeoff and landing flight plans. Takeoff is twice the speed of lander:
# takeoff needs to clear the ground promply avoiding obstacles;
# landing needs to hit the ground gently to avoid impact damage.
#-----------------------------------------------------------------------------------------------
takeoff_fp.append((0.0, 0.0, 0.0, 0.0, "RTF"))
takeoff_fp.append((0.0, 0.0, 0.5, 3.0, "TAKEOFF"))
takeoff_fp.append((0.0, 0.0, 0.0, 0.5, "HOVER"))
landing_fp.append((0.0, 0.0, -0.25, 7.0, "LANDING")) # # Extended landed for safety
#-----------------------------------------------------------------------------------------------
# Build the initial post-takeoff GPS flight plan as a minutes hover pending GPS satellite acquisition.
# If it fails, it drops automatically into landing after that minute.
#-----------------------------------------------------------------------------------------------
gps_locating_fp.append((0.0, 0.0, 0.0, 360, "GPS: WHERE AM I?"))
gps_tracking_fp.append((0.0, 0.0, 0.0, 60, "GPS TRACKING: 0"))
#-----------------------------------------------------------------------------------------------
# None-existent object avoidance flight plan initially.
#-----------------------------------------------------------------------------------------------
oa_fp = None
#-----------------------------------------------------------------------------------------------
# Build the file-based flight plan if that's what we're using.
#-----------------------------------------------------------------------------------------------
if file_control:
with open(fp_filename, 'rb') as fp_csv:
fp_reader = csv.reader(fp_csv)
for fp_row in fp_reader:
if len(fp_row) == 0 or (fp_row[0] != '' and fp_row[0][0] == '#'):
continue
if len(fp_row) != 5:
break
file_fp.append((float(fp_row[0]),
float(fp_row[1]),
float(fp_row[2]),
float(fp_row[3]),
fp_row[4].strip()))
#-----------------------------------------------------------------------------------------------
# Build the GPS waypoint flight plan if that's what we're using.
#-----------------------------------------------------------------------------------------------
elif gps_control:
with open(fp_filename, 'rb') as fp_csv:
fp_reader = csv.reader(fp_csv)
for fp_row in fp_reader:
if len(fp_row) == 0 or (fp_row[0] != '' and fp_row[0][0] == '#'):
continue
if len(fp_row) != 4:
break
gps_waypoints.append((float(fp_row[0]),
float(fp_row[1]),
float(fp_row[2]),
int(fp_row[3])))
else:
#-------------------------------------------------------------------------------------------
# Without file or GPS control, just a standard takeoff and landing happens.
#-------------------------------------------------------------------------------------------
pass
#-----------------------------------------------------------------------------------------------
# Start up the Sweep and GPS processes if installed
#-----------------------------------------------------------------------------------------------
running = True
try:
sweep_started = False
gps_started = False
#-------------------------------------------------------------------------------------------
# Kick off sweep if necessary
#-------------------------------------------------------------------------------------------
if sweep_installed:
sweepp = SweepManager()
sweep_fd = sweepp.sweep_fifo.fileno()
poll.register(sweep_fd, select.POLLIN | select.POLLPRI)
sweep_started = True
#-------------------------------------------------------------------------------------------
# Kick off GPS if necessary
#-------------------------------------------------------------------------------------------
if gps_installed:
gpsp = GPSManager()
gps_fd = gpsp.gps_fifo.fileno()
poll.register(gps_fd, select.POLLIN | select.POLLPRI)
gps_started = True
except:
#-------------------------------------------------------------------------------------------
# By setting this, we drop through the big while running the flight plans, and immediately
# send a finished message to the autopilot processor
#-------------------------------------------------------------------------------------------
running = False
#-----------------------------------------------------------------------------------------------
# Loop for the period of the flight defined by the flight plan contents
#-----------------------------------------------------------------------------------------------
pack_format = '=3f20s?' # edx, edy and edz float targets, string state name, bool running
log = open("autopilot.log", "wb")
#-------------------------------------------------------------------------------------------
# Off we go!
#-------------------------------------------------------------------------------------------
with io.open("/dev/shm/autopilot_stream", mode = "wb", buffering = 0) as autopilot_fifo:
try:
phase = []
prev_phase = []
#--------------------------------------------------------------------------------------
# Do not 'break' out of this loop; this will skip the else at the end doing post successfuk
# flight cleanup.
#--------------------------------------------------------------------------------------
active_fp = takeoff_fp
afp_changed = True
start_time = time.time()
update_time = 0.1
elapsed_time = 0.0
while running:
#-----------------------------------------------------------------------------------
# How long is it since we were last here? Based on that, how long should we sleep (if
# at all) before working out the next step in the flight plan.
#-----------------------------------------------------------------------------------
delta_time = time.time() - start_time - elapsed_time
elapsed_time += delta_time
sleep_time = update_time - delta_time if delta_time < update_time else 0.0
paused_time = 0.0
results = poll.poll(sleep_time * 1000)
#----------------------------------------------------------------------------------
# Check whether there's input from Sweep or GPS to trigger a flight plan change
#----------------------------------------------------------------------------------
for fd, event in results:
if sweep_installed and fd == sweep_fd:
try:
sweep_critical, sweep_warning, sweep_distance, sweep_direction = sweepp.read()
if active_fp == takeoff_fp or active_fp == landing_fp:
#-------------------------------------------------------------------
# Ignore sweep objects on takeoff and landing
#-------------------------------------------------------------------
continue
elif sweep_critical:
#-------------------------------------------------------------------
# What target height has the flight achieved so far? Use this to
# determine how long the descent must be at fixed velocity of 0.3m/s.
# Add another second to make sure this really definitely lands!
#-------------------------------------------------------------------
descent_time = edz_target / 0.3 + 1.0
#-------------------------------------------------------------------
# Override that standard landing_fp to this custom one.
#-------------------------------------------------------------------
landing_fp = [(0.0, 0.0, -0.3, descent_time, "PROXIMITY CRITICAL %.2fm" % sweep_distance),]
#==================================================================#
# FLIGHT PLAN CHANGE #
#==================================================================#
log.write("AP: PROXIMITY LANDING %.2f METERS\n" % edz_target)
active_fp = landing_fp
afp_changed = True
#===================================================================
# FLIGHT PLAN CHANGE #
#===================================================================
elif sweep_warning:
#-------------------------------------------------------------------
# If we're just hovering, there's nothing to do here.
#-------------------------------------------------------------------
if math.pow(evx_target, 2) + math.pow(evy_target, 2) == 0:
continue
#-------------------------------------------------------------------
# Find the direction the frame should be going under the standard flight plan.
#-------------------------------------------------------------------
if active_fp != oa_fp:
paused_direction = math.atan2(evy_target, evx_target)
#-------------------------------------------------------------------
# If the obstacle is behind the direction of travel, ignore it
#-------------------------------------------------------------------
if abs((paused_direction - sweep_direction + math.pi) % (math.pi * 2) - math.pi) > math.pi / 2:
continue
#-------------------------------------------------------------------
# We've spotted an obstruction worth avoiding; if the oa_fp is not already
# running, then save off the current flight plan.
#-------------------------------------------------------------------
if active_fp != oa_fp:
paused_fp = active_fp
paused_time = elapsed_time
#-------------------------------------------------------------------
# We're to move +/- 90 degrees parallel to the obstruction direction;
# find out which is 'forwards' wrt the paused flight direction.
#--------------------------------------------------------------------
if abs((sweep_direction - paused_direction + 3 * math.pi / 2) % (math.pi * 2) - math.pi) < math.pi / 2:
oa_direction = (sweep_direction + 3 * math.pi / 2) % (math.pi * 2) - math.pi
else:
oa_direction = (sweep_direction + math.pi / 2) % (math.pi * 2) - math.pi
#-------------------------------------------------------------------
# Set up the object avoidance flight plan - slow down to 0.3m/s
#-------------------------------------------------------------------
oax_target = 0.3 * math.cos(oa_direction)
oay_target = 0.3 * math.sin(oa_direction)
oa_fp = [(oax_target, oay_target, 0.0, 10.0, "AVOID @ %d DEGREES" % int(math.degrees(sweep_direction))),]
#==================================================================#
# FLIGHT PLAN CHANGE #
#==================================================================#
log.write("AP: AVOIDING OBSTACLE @ %d DEGREES.\n" % int(math.degrees(sweep_direction)))
active_fp = oa_fp
afp_changed = True
#===================================================================
# FLIGHT PLAN CHANGE #
#===================================================================
else:
#-------------------------------------------------------------------
# Neither critial nor warning proximity; if we currently using the oa_fp,
# now reinstated the paused flight plan stored when an obstacle was detected.
#-------------------------------------------------------------------
if active_fp == oa_fp:
#==================================================================#
# FLIGHT PLAN CHANGE #
#==================================================================#
log.write("AP: OBSTACLE AVOIDED, RESUME PAUSED\n")
active_fp = paused_fp
afp_changed = True
#===================================================================
# FLIGHT PLAN CHANGE #
#===================================================================
paused_fp = None
oa_fp = None
except AssertionError as e:
'''
#GPS: Would it be better to set up landing, or is FINISHED / STOP better
#GPS: as something is seriously wrong with object detection? We MUST
#GPS: do either a landing or abort here; FINISHED will hover if not
#GPS: landed first.
'''
running = False
continue
if gps_installed and fd == gps_fd:
#---------------------------------------------------------------------------
# Run the GPS Processor, and convert response to X, Y coordinate in earth NSEW
# frame.
#---------------------------------------------------------------------------
current_gps = gpsp.read()
current_lat, current_lon, current_alt, current_sats = current_gps
#---------------------------------------------------------------------------
# If we aren't using the GPS flight plan, then move to the next
# poll.poll() results list (if any) - note that doing the read above is
# necessary notheless to flush the FIFO.
#---------------------------------------------------------------------------
if not gps_control:
continue
#---------------------------------------------------------------------------
# If we're currently not using a GPS flightplan, keep GPS processing
# out of it.
#---------------------------------------------------------------------------
if active_fp != gps_locating_fp and active_fp != gps_tracking_fp:
continue
#---------------------------------------------------------------------------
# First, make sure the new data comes from enough satellites
#---------------------------------------------------------------------------
if current_sats < MIN_SATS and active_fp != gps_locating_fp:
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
log.write("AP: GPS TOO FEW SATS, LANDING...\n")
active_fp = landing_fp
afp_changed = True
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
continue
#---------------------------------------------------------------------------
# If the active_fp is the gps_locating_fp, then get on with satellite
# acquisition.
#---------------------------------------------------------------------------
if active_fp == gps_locating_fp:
if current_sats >= MIN_SATS:
#-------------------------------------------------------------------
# Set target to current here will trigger an update from the
# waypoint list lower down.
#-------------------------------------------------------------------
target_gps = current_gps
#==================================================================#
# FLIGHT PLAN CHANGE #
#==================================================================#
log.write("AP: GPS TRACKING\n")
active_fp = gps_tracking_fp
afp_changed = True
#==================================================================#
# FLIGHT PLAN CHANGE #
#==================================================================#
elif time.time() - sats_search_start > 60.0:
#==================================================================#
# FLIGHT PLAN CHANGE #
#==================================================================#
log.write("AP: GPS SATS SHORTAGE, LANDING...\n")
active_fp = landing_fp
afp_changed = True
#==================================================================#
# FLIGHT PLAN CHANGE #
#==================================================================#
#---------------------------------------------------------------------------
# First best effort to determine our current orientations based on an the
# initial forward flight.
#---------------------------------------------------------------------------
if active_fp == gps_tracking_fp:
#-----------------------------------------------------------------------
# Latitude = North (+) / South (-) - 0.0 running E/W around the equator;
# range is +/- 90 degrees
# Longitude = East (+) / West (-) - 0.0 running N/S through Greenwich;
# range is +/- 180 degrees
#
# With a base level longitude and latitude in degrees, we can calculate the
# current X and Y coordinates in meters using equirectangular approximation:
#
# ns = movement North / South - movement in a northerly direction is positive
# ew = movement East / West - movement in an easterly direction is positive
# R = average radius of earth in meters = 6,371,000 meters
#
# ns = (lat2 - lat1) * R meters
# ew = (long2 - long1) * cos ((lat1 + lat2) / 2) * R meters
#
# Note longitude / latitude are in degrees and need to be converted into
# radians i.e degrees * pi / 180 both for the cos and also the EARTH_RADIUS scale
#
# More at http://www.movable-type.co.uk/scripts/latlong.html
#
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Have we reached our destination?
#-----------------------------------------------------------------------
wibble = True
while wibble:
#-------------------------------------------------------------------
# Now get the direction from the current location to the target
#-------------------------------------------------------------------
target_lat, target_lon, target_alt, target_sats = target_gps
target_ns = math.radians(target_lat - current_lat) * EARTH_RADIUS
target_ew = math.radians((target_lon - current_lon) * math.cos(math.radians((target_lat + current_lat) / 2))) * EARTH_RADIUS
target_direction = math.atan2(target_ew, target_ns)
#-------------------------------------------------------------------
# Are we near the target?
#-------------------------------------------------------------------
target_distance = math.pow((math.pow(target_ns, 2) + math.pow(target_ew, 2)), 0.5)
if target_distance < 1.0: # meters
#---------------------------------------------------------------
# We're within one meter of the target, dig out the new waypoint
# if there is one, otherwise land.
#---------------------------------------------------------------
if len(gps_waypoints) > 0:
#-----------------------------------------------------------
# Move to the next waypoint target, and loop back to reprocess
# the new target_gps
#-----------------------------------------------------------
gps_waypoint = gps_waypoints.pop(0)
log.write("AP: GPS NEW WAYPOINT\n")
target_gps = gps_waypoint
continue
else:
#==========================================================#
# FLIGHT PLAN CHANGE #
#==========================================================#
log.write("AP: GPS @ TARGET, LANDING...\n")
active_fp = landing_fp
afp_changed = True
#==========================================================#
# FLIGHT PLAN CHANGE #
#==========================================================#
break
else:
#---------------------------------------------------------------
# We're not at the target yet, keep processing.
#---------------------------------------------------------------
wibble = False
else:
#-------------------------------------------------------------------
# If we're still tracking, sort out the processing.
#-------------------------------------------------------------------
if active_fp == gps_tracking_fp:
#---------------------------------------------------------------
# Yaw target based on quad IMU not GPS POV hence...
#---------------------------------------------------------------
yaw_target = (initial_orientation - target_direction + math.pi) % (2 * math.pi) - math.pi
s_yaw = math.sin(yaw_target)
c_yaw = math.cos(yaw_target)
#---------------------------------------------------------------
# Because our max speed is 1m/s and we receive GPS updates at 1Hz
# and each target is 'reached' when it's less than 1m away, we slow
# down near the destination.
#---------------------------------------------------------------
speed = 1.0 if target_distance > 5.0 else target_distance / 5.0
x = c_yaw * speed # m/s evx_target
y = s_yaw * speed # m/s evy_target
#---------------------------------------------------------------
# Pause for though for 0.5s (i.e. stop), then head of in new direction
#---------------------------------------------------------------
gps_tracking_fp = [(x, y, 0.0, 3600, "GPS TARGET %dm %do" % (int(round(target_distance)), int(round(math.degrees(yaw_target)))))]
#==============================================================#
# FLIGHT PLAN CHANGE #
#==============================================================#
log.write("AP: GPS TRACKING UPDATE\n")
active_fp = gps_tracking_fp
afp_changed = True
#==============================================================#
# FLIGHT PLAN CHANGE #
#==============================================================#
else:
#-------------------------------------------------------------------------------
# Finished the poll.poll() results processing; has the world changed beneath our
# feet? If so, reset the timings for the new flight plans, and processes.
#-------------------------------------------------------------------------------
if afp_changed:
afp_changed = False
elapsed_time = paused_time
start_time = time.time() - elapsed_time
#-----------------------------------------------------------------------------------
# Based on the elapsed time since the flight plan started, find which of the flight
# plan phases we are in.
#-----------------------------------------------------------------------------------
phase_time = 0.0
for phase in active_fp:
phase_time += phase[PERIOD]
if elapsed_time <= phase_time:
break
else:
#-------------------------------------------------------------------------------
# We've fallen out the end of one flight plan - change active_fp to the next in
# line.
#-------------------------------------------------------------------------------
if active_fp == takeoff_fp:
if gps_installed and gps_control:
#-----------------------------------------------------------------------
# Take a timestamp of this transition; it's used later to see whether we've
# been unable to find enough satellites in 60s
#-----------------------------------------------------------------------
sats_search_start = time.time()
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
log.write("AP: # SATS: ...\n")
active_fp = gps_locating_fp
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
elif file_control:
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
log.write("AP: FILE FLIGHT PLAN\n")
active_fp = file_fp
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
else:
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
log.write("AP: LANDING...\n")
active_fp = landing_fp
#======================================================================#
# FLIGHT PLAN CHANGE #
#======================================================================#
elif active_fp == gps_locating_fp:
#---------------------------------------------------------------------------
# We've dropped off the end of the satellite acquisition flight plan i.e. it's
# timed out without a good result. Swap to landing.
#---------------------------------------------------------------------------
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
log.write("AP: GPS SATS TIMEOUT, LANDING...\n")
active_fp = landing_fp
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
elif active_fp == gps_tracking_fp:
#---------------------------------------------------------------------------
# We're not going to get here as the tracking fp is set to 1 hour, and swaps
# fp above when it reaches it's GPS target. Nevertheless, lets include it.
# Swap to landing.
#---------------------------------------------------------------------------
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
log.write("AP: GPS TRACKING TIMEOUT, LANDING...\n")
active_fp = landing_fp
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
elif active_fp == file_fp:
#---------------------------------------------------------------------------
# We've finished the hard coded file flight plan, time to land.
#---------------------------------------------------------------------------
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
log.write("AP: FILE COMPLETE, LANDING...\n")
active_fp = landing_fp
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
elif active_fp == oa_fp:
#---------------------------------------------------------------------------
# Object avoidance has run out of time, land.
#---------------------------------------------------------------------------
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
log.write("AP: OA TIMEOUT, LANDING...\n")
active_fp = landing_fp
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
elif active_fp != landing_fp:
#---------------------------------------------------------------------------
# This shouldn't ever get hit; finished flight plans all have next steps
# above, but may as well cover it.
#---------------------------------------------------------------------------
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
log.write("AP: UNEXPLAINED, LANDING...\n")
active_fp = landing_fp
#==========================================================================#
# FLIGHT PLAN CHANGE #
#==========================================================================#
elif active_fp == landing_fp:
#---------------------------------------------------------------------------
# If we've finished the landing flight plan, the autopilot's job is done.
#---------------------------------------------------------------------------
log.write("AP: LANDING COMPLETE\n")
running = False
#-------------------------------------------------------------------------------
# The flight plan has completed and moved onto the next, update the timing accordingly.
#-------------------------------------------------------------------------------
start_time = time.time()
elapsed_time = 0.0
#-----------------------------------------------------------------------------------
# Have we crossed into a new phase of the flight plan? Log it if so.
#-----------------------------------------------------------------------------------
phase_name = phase[NAME]
phase_changed = False
if phase != prev_phase:
phase_changed = True
prev_phase = phase
#-----------------------------------------------------------------------------------
# Get the velocity targets for this phase, and integrate to get distance. Distance
# is used in the abort fp generation.
#-----------------------------------------------------------------------------------
evx_target = phase[X]
evy_target = phase[Y]
evz_target = phase[Z]
edx_target += evx_target * delta_time
edy_target += evy_target * delta_time
edz_target += evz_target * delta_time
#-----------------------------------------------------------------------------------
# No point updating the main processor velocities if nothing has changed.
#-----------------------------------------------------------------------------------
if not phase_changed:
continue
log.write("AP: PHASE CHANGE: %s\n" % phase_name)
output = struct.pack(pack_format,
evx_target,
evy_target,
evz_target,
phase_name,
running)
autopilot_fifo.write(output)
else:
#-----------------------------------------------------------------------------------
# We've dropped out of the end of all flight plans - let the motion processor know we
# are done.
#-----------------------------------------------------------------------------------
log.write("AP: FINISHED\n")
output = struct.pack(pack_format,
0.0,
0.0,
0.0,
"FINISHED",
False)
autopilot_fifo.write(output)
except KeyboardInterrupt as e:
#---------------------------------------------------------------------------------------
# The motion processor is finished with us, we should too, and we have by breaking out of
# the with.
#---------------------------------------------------------------------------------------
if not autopilot_fifo.closed:
print "Autopilot FIFO not closed! WTF!"
except Exception as e:
log.write("AP: UNIDENTIFIED EXCEPTION: %s\n" % e)
finally:
#---------------------------------------------------------------------------------------
# Cleanup Sweep if installed.
#---------------------------------------------------------------------------------------
if sweep_installed and sweep_started:
print "Stopping Sweep... ",
sweepp.cleanup()
poll.unregister(sweep_fd)
print "stopped."
#---------------------------------------------------------------------------------------
# Cleanup GPS if installed.
#---------------------------------------------------------------------------------------
if gps_installed and gps_started:
print "Stopping GPS... ",
gpsp.cleanup()
poll.unregister(gps_fd)
print "stopped."
log.close()
####################################################################################################
#
# Process the Autopilot data.
#
####################################################################################################
class AutopilotManager():
def __init__(self, sweep_installed, gps_installed, compass_installed, initial_orientation, file_control, gps_control, fp_filename):
#-------------------------------------------------------------------------------------------
# Setup a shared memory based data stream for the Sweep output
#-------------------------------------------------------------------------------------------
os.mkfifo("/dev/shm/autopilot_stream")
self.autopilot_process = subprocess.Popen(["python", __file__, "AUTOPILOT", "%s" % sweep_installed, "%s" % gps_installed, "%s" % compass_installed, "%f" % initial_orientation, "%s" % file_control, "%s" % gps_control, fp_filename], preexec_fn = Daemonize)
while True:
try:
self.autopilot_fifo = io.open("/dev/shm/autopilot_stream", mode="rb")
except:
continue
else:
break
self.unpack_format = "=3f20s?"
self.unpack_size = struct.calcsize(self.unpack_format)
def flush(self):
#-------------------------------------------------------------------------------------------
# Read what should be the backlog of reads, and return how many there are.
#-------------------------------------------------------------------------------------------
raw_bytes = self.autopilot_fifo.read(self.unpack_size)
assert (len(raw_bytes) % self.unpack_size == 0), "Incomplete Autopilot data received"
return int(len(raw_bytes) / self.unpack_size)
def read(self):
raw_bytes = self.autopilot_fifo.read(self.unpack_size)
assert (len(raw_bytes) == self.unpack_size), "Incomplete data received from Autopilot reader"
evx_target, evy_target, evz_target, state_name, keep_looping = struct.unpack(self.unpack_format, raw_bytes)
return evx_target, evy_target, evz_target, state_name, keep_looping
def cleanup(self):
#-------------------------------------------------------------------------------------------
# Stop the Autopilot process if it's still running, and cleanup the FIFO.
#-------------------------------------------------------------------------------------------
try:
if self.autopilot_process.poll() == None:
self.autopilot_process.send_signal(signal.SIGINT)
self.autopilot_process.wait()
except KeyboardInterrupt as e:
pass
self.autopilot_fifo.close()
os.unlink("/dev/shm/autopilot_stream")
####################################################################################################
#
# Remote control manager
#
####################################################################################################
class RCManager():
def __init__(self):
self.server = socket.socket()
addr = "192.168.42.1"
port = 31415
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((addr, port))
self.server.listen(5)
def connect(self):
pack_format = "=?"
self.connection, addr = self.server.accept()
connection_fd = self.connection.fileno()
output = struct.pack(pack_format, True)
self.connection.send(output)
return connection_fd
def send(self):
pass
def read(self):
unpack_format = "=ffffb?"
unpack_size = struct.calcsize(unpack_format)
raw = self.connection.recv(unpack_size)
assert (len(raw) == unpack_size), "Invalid data"
#-----------------------------------------------------------------------------------
# React on the action
#-----------------------------------------------------------------------------------
formatted = struct.unpack(unpack_format, raw)
assert (len(formatted) == 6), "Bad formatted size"
evz_target = formatted[0]
yr_target = formatted[1]
evx_target = formatted[2]
evy_target = formatted[3]
state = formatted[4]
beep = formatted[5]
return evx_target, evy_target, evz_target, yr_target, state, beep
def disconnect(self):
pack_format = "=?"
output = struct.pack(pack_format, False)
self.connection.send(output)
self.connection.close()
def close(self):
self.server.shutdown(socket.SHUT_RDWR)
self.server.close()
####################################################################################################
#
# Video at 10fps. Each frame is 320 x 320 pixels. Each macro-block is 16 x 16 pixels. Due to an
# extra column of macro-blocks (dunno why), that means each frame breaks down into 21 columns by
# 20 rows = 420 macro-blocks, each of which is 4 bytes - 1 signed byte X, 1 signed byte Y and 2 unsigned
# bytes SAD (sum of absolute differences).
#
####################################################################################################
def VideoProcessor(frame_width, frame_height, frame_rate):
with picamera.PiCamera() as camera:
camera.resolution = (frame_width, frame_height)
camera.framerate = frame_rate
#-------------------------------------------------------------------------------------------
# 50% contrast seems to work well - completely arbitrary.
#-------------------------------------------------------------------------------------------
camera.contrast = 42
with io.open("/dev/shm/video_stream", mode = "wb", buffering = 0) as vofi:
camera.start_recording('/dev/null', format='h264', motion_output=vofi, quality=42)
try:
while True:
camera.wait_recording(10)
except KeyboardInterrupt:
pass
finally:
camera.stop_recording()
####################################################################################################
#
# Class to process video frame macro-block motion tracking.
#
####################################################################################################
class VideoManager:
def __init__(self, video_fifo, yaw_increment):
self.video_fifo = video_fifo
self.yaw_increment = yaw_increment
self.phase = 0
mb_size = 16 # 16 x 16 pixels are combined to make a macro-block
bytes_per_mb = 4 # Each macro-block is 4 bytes, 1 X, 1 Y and 2 SAD
self.mbs_per_frame = int(round((frame_width / mb_size + 1) * (frame_height / mb_size)))
self.bytes_per_frame = self.mbs_per_frame * bytes_per_mb
def flush(self):
#-------------------------------------------------------------------------------------------
# Read what should be the backlog of frames, and return how many there are.
#-------------------------------------------------------------------------------------------
frame_bytes = self.video_fifo.read(self.bytes_per_frame)
assert (len(frame_bytes) % self.bytes_per_frame == 0), "Incomplete video frames received: %f" % (len(frame_bytes) / self.bytes_per_frame)
return (len(frame_bytes) / self.bytes_per_frame)
def phase0(self):
#-------------------------------------------------------------------------------------------
# Read the video stream and parse into a list of macro-block vectors
#-------------------------------------------------------------------------------------------
self.vector_dict = {}
self.vector_list = []
self.c_yaw = math.cos(self.yaw_increment)
self.s_yaw = math.sin(self.yaw_increment)
sign = 1 # Was '-1 if i_am_chloe else 1' as Chloe had the camera twisted by 180 degrees
frames = self.video_fifo.read(self.bytes_per_frame)
assert (len(frames) != 0), "Shouldn't be here, no bytes to read"
assert (len(frames) % self.bytes_per_frame == 0), "Incomplete frame bytes read"
num_frames = int(len(frames) / self.bytes_per_frame)
assert (num_frames == 1), "Read more than one frame somehow?"
#-------------------------------------------------------------------------------------------
# Convert the data to byte, byte, ushort of x, y, sad structure and process them. The
# exception here happens when a macro-block is filled with zeros, indicating either a full
# reset or no movement, and hence no processing required.
#-------------------------------------------------------------------------------------------
format = '=' + 'bbH' * self.mbs_per_frame * num_frames
iframe = struct.unpack(format, frames)
assert (len(iframe) % 3 == 0), "iFrame size error"
self.mbs_per_iframe = int(round(len(iframe) / 3 / num_frames))
assert (self.mbs_per_iframe == self.mbs_per_frame), "iframe mb count different to frame mb count"
#---------------------------------------------------------------------------------------
# Split the iframe into a list of macro-block vectors. The mapping from each iframe
# in idx, idy depends on how the camera is orientated WRT the frame.
# This must be checked callibrated.
#
# Note: all macro-block vectors are even integers, so we divide them by 2 here for use
# walking the vector dictionary for neighbours; we reinstate this at the end.
#
#---------------------------------------------------------------------------------------
for ii in range(self.mbs_per_iframe):
idx = iframe[3 * ii + 1]
idy = iframe[3 * ii]
assert (idx % 2 == 0 and idy % 2 == 0), "Odd (not even) MB vector"
idx = int(round(sign * idx / 2))
idy = int(round(sign * idy / 2))
if idx == 0 and idy == 0:
continue
self.vector_list.append((idx, idy))
#-------------------------------------------------------------------------------------------
# If the dictionary is empty, this indicates a new frame; this is not an error strictly,
# more of a reset to tell the outer world about.
#-------------------------------------------------------------------------------------------
if len(self.vector_list) == 0:
raise ValueError("Empty Video Frame Object")
def phase1(self):
#-------------------------------------------------------------------------------------------
# Unyaw the list of vectors, overwriting the yaw list
#-------------------------------------------------------------------------------------------
unyawed_vectors = []
#-------------------------------------------------------------------------------------------
# Undo the yaw increment for better macro-block matching. Also, multiple interations of this
# keeps the distance / direction in earth frame as is required for fusion.
#-------------------------------------------------------------------------------------------
for vector in self.vector_list:
idx, idy = vector
uvx = self.c_yaw * idx - self.s_yaw * idy
uvy = self.s_yaw * idx + self.c_yaw * idy
unyawed_vectors.append((int(round(uvx)), int(round(uvy))))
self.vector_list = unyawed_vectors
def phase2(self):
#-------------------------------------------------------------------------------------------
# Build the dictionary of unyawed vectors; they score 2 because of the next phase
#-------------------------------------------------------------------------------------------
for (idx, idy) in self.vector_list:
if (idx, idy) in self.vector_dict:
self.vector_dict[(idx, idy)] += 2
else:
self.vector_dict[(idx, idy)] = 2
def phase3(self):
#-------------------------------------------------------------------------------------------
# Pass again through the dictionary of vectors, building up clusters based on neighbours.
#-------------------------------------------------------------------------------------------
best_score = 0
self.best_vectors = []
for vector in self.vector_dict.keys():
vector_score = self.vector_dict[vector]
for ii in range(-1, 2):
for jj in range(-1, 2):
if ii == 0 and jj == 0:
continue
vector_x, vector_y = vector
neighbour = (vector_x + ii, vector_y + jj)
if neighbour in self.vector_dict:
vector_score += self.vector_dict[neighbour]
if vector_score > best_score:
best_score = vector_score
self.best_vectors = [(vector, vector_score)]
elif vector_score == best_score:
self.best_vectors.append((vector, vector_score))
def phase4(self):
#-------------------------------------------------------------------------------------------
# Now we've collected the clusters of the best score vectors in the frame, average and reyaw
# it before returning the result.
#-------------------------------------------------------------------------------------------
sum_score = 0
sum_x = 0
sum_y = 0
for (vector_x, vector_y), vector_score in self.best_vectors:
sum_x += vector_x * vector_score
sum_y += vector_y * vector_score
sum_score += vector_score
idx = self.c_yaw * sum_x + self.s_yaw * sum_y
idy = -self.s_yaw * sum_x + self.c_yaw * sum_y
return 2 * idx / sum_score, 2 * idy / sum_score
def process(self):
assert(self.phase < 5), "Phase shift in motion vector processing"
#-------------------------------------------------------------------------------------------
# Phase 0 - load the data and convert into a macro-block vector list
#-------------------------------------------------------------------------------------------
if self.phase == 0:
self.phase0()
rv = None
#-------------------------------------------------------------------------------------------
# Phase 1 - take the list of macro blocks and undo yaw
#-------------------------------------------------------------------------------------------
elif self.phase == 1:
self.phase1()
rv = None
#-------------------------------------------------------------------------------------------
# Phase 2 - build the dictionary of int rounded unyawed vectors
#-------------------------------------------------------------------------------------------
elif self.phase == 2:
self.phase2()
rv = None
#-------------------------------------------------------------------------------------------
# Phase 3 - walk the dictionary, looking for neighbouring clusters and score them
#-------------------------------------------------------------------------------------------
elif self.phase == 3:
self.phase3()
rv = None
#-------------------------------------------------------------------------------------------
# Phase 4 - average highest peak clusters, redo yaw, and return result
#-------------------------------------------------------------------------------------------
elif self.phase == 4:
idx, idy = self.phase4()
rv = idx, idy
self.phase += 1
return rv
####################################################################################################
#
# Class to split initialation, flight startup and flight control
#
####################################################################################################
class Quadcopter:
MOTOR_LOCATION_FRONT = 0b00000001
MOTOR_LOCATION_BACK = 0b00000010
MOTOR_LOCATION_LEFT = 0b00000100
MOTOR_LOCATION_RIGHT = 0b00001000
MOTOR_ROTATION_CW = 1
MOTOR_ROTATION_ACW = 2
keep_looping = False
#===============================================================================================
# One-off initialization
#===============================================================================================
def __init__(self):
#-------------------------------------------------------------------------------------------
# Who am I?
#-------------------------------------------------------------------------------------------
global i_am_zoe
global i_am_hermione
global i_am_penelope
i_am_zoe = False
i_am_hermione = False
i_am_penelope = False
my_name = os.uname()[1]
if my_name == "zoe.local" or my_name == "zoe":
print "Hi, I'm Zoe. Nice to meet you!"
i_am_zoe = True
elif my_name == "hermione.local" or my_name == "hermione":
print "Hi, I'm Hermione. Nice to meet you!"
i_am_hermione = True
elif my_name == "penelope.local" or my_name == "penelope":
print "Hi, I'm Penelope. Nice to meet you!"
i_am_penelope = True
else:
print "Sorry, I'm not qualified to fly this piDrone."
return
#-------------------------------------------------------------------------------------------
# Set up the global poll object
#-------------------------------------------------------------------------------------------
global poll
poll = select.poll()
#-------------------------------------------------------------------------------------------
# Set up extra sensors based on quad identify.
# - Zoe is a Pi0W with a single CPU; many features are turned off to avoid spawning multiple
# processes within that single CPU. She runs one and a bit process - the bit is the Video
# processing which is mostly handled by the GPU.
# - Hermione is a B3 with 4 CPUs. As a result she can run the four and a bit process required
# for all features to be enabled.
# - Penelope is a B3+ with 4 CPUs. As a result she can run the four and a bit process required
# for all features to be enabled.
#-------------------------------------------------------------------------------------------
X8 = False
if i_am_zoe:
self.compass_installed = False
self.camera_installed = True
self.gll_installed = True
self.gps_installed = False
self.sweep_installed = False
self.autopilot_installed = False
self.rc_installed = True
elif i_am_hermione:
self.compass_installed = True
self.camera_installed = True
self.gll_installed = True
self.gps_installed = True
self.sweep_installed = True
self.autopilot_installed = True
self.rc_installed = False
X8 = True
elif i_am_penelope:
self.compass_installed = True
self.camera_installed = True
self.gll_installed = True
self.gps_installed = True
self.sweep_installed = False
self.autopilot_installed = True
self.rc_installed = False
X8 = True
assert (self.autopilot_installed ^ self.rc_installed), "Autopilot or RC but not both nor neither"
'''
#AB! Swap the above to autonomous_control and manual_control
'''
#-------------------------------------------------------------------------------------------
# Lock code permanently in memory - no swapping to disk
#-------------------------------------------------------------------------------------------
mlockall()
#-------------------------------------------------------------------------------------------
# Set up the base logging
#-------------------------------------------------------------------------------------------
global logger
logger = logging.getLogger('QC logger')
logger.setLevel(logging.INFO)
#-------------------------------------------------------------------------------------------
# Create file and console logger handlers - the file is written into shared memory and only
# dumped to disk / SD card at the end of a flight for performance reasons
#-------------------------------------------------------------------------------------------
global file_handler
file_handler = logging.FileHandler("qcstats.csv", 'w')
file_handler.setLevel(logging.WARNING)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.CRITICAL)
#-------------------------------------------------------------------------------------------
# Create a formatter and add it to both handlers
#-------------------------------------------------------------------------------------------
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
file_formatter = logging.Formatter('[%(levelname)s] (%(threadName)-10s) %(funcName)s %(lineno)d, %(message)s')
file_handler.setFormatter(file_formatter)
#-------------------------------------------------------------------------------------------
# Add both handlers to the logger
#-------------------------------------------------------------------------------------------
logger.addHandler(console_handler)
logger.addHandler(file_handler)
#-------------------------------------------------------------------------------------------
# First log, whose flying and under what configuration
#-------------------------------------------------------------------------------------------
logger.warning("%s is flying.", "Zoe" if i_am_zoe else "Hermione" if i_am_hermione else "Penelope")
#-------------------------------------------------------------------------------------------
# Set the BCM pin assigned to the FIFO overflow
#-------------------------------------------------------------------------------------------
global GPIO_POWER_BROWN_OUT_INTERRUPT
GPIO_POWER_BROWN_OUT_INTERRUPT = 35
global GPIO_FIFO_OVERFLOW_INTERRUPT
GPIO_FIFO_OVERFLOW_INTERRUPT = 24 if X8 else 22
global GPIO_GLL_DR_INTERRUPT
GPIO_GLL_DR_INTERRUPT = 5
global GPIO_BUZZER
GPIO_BUZZER = 25
#-------------------------------------------------------------------------------------------
# Enable RPIO for ESC PWM. This must be set up prior to adding the SignalHandler below or it
# will override what we set thus killing the "Kill Switch"..
#-------------------------------------------------------------------------------------------
PWMInit()
#-------------------------------------------------------------------------------------------
# Enable GPIO for the FIFO overflow interrupt.
#-------------------------------------------------------------------------------------------
GPIOInit(self.fifoOverflowISR)
#-------------------------------------------------------------------------------------------
# Set the signal handler here so the core processing loop can be stopped (or not started) by
# Ctrl-C.
#-------------------------------------------------------------------------------------------
signal.signal(signal.SIGINT, self.shutdownSignalHandler)
#-------------------------------------------------------------------------------------------
# Zoe is a Quad, Hermione and Penelope are X8
#-------------------------------------------------------------------------------------------
ESC_BCM_FLT = 0
ESC_BCM_FRT = 0
ESC_BCM_BLT = 0
ESC_BCM_BRT = 0
ESC_BCM_FLU = 0
ESC_BCM_FRU = 0
ESC_BCM_BLU = 0
ESC_BCM_BRU = 0
if not X8:
ESC_BCM_FLT = 27
ESC_BCM_FRT = 17
ESC_BCM_BLT = 26
ESC_BCM_BRT = 19
else:
ESC_BCM_FLT = 27
ESC_BCM_FRT = 17
ESC_BCM_BLT = 26
ESC_BCM_BRT = 20
ESC_BCM_FLU = 22
ESC_BCM_FRU = 23
ESC_BCM_BLU = 16
ESC_BCM_BRU = 19
pin_list = [ESC_BCM_FLT,
ESC_BCM_FRT,
ESC_BCM_BLT,
ESC_BCM_BRT,
ESC_BCM_FLU,
ESC_BCM_FRU,
ESC_BCM_BLU,
ESC_BCM_BRU]
location_list = [self.MOTOR_LOCATION_FRONT | self.MOTOR_LOCATION_LEFT,
self.MOTOR_LOCATION_FRONT | self.MOTOR_LOCATION_RIGHT,
self.MOTOR_LOCATION_BACK | self.MOTOR_LOCATION_LEFT,
self.MOTOR_LOCATION_BACK | self.MOTOR_LOCATION_RIGHT,
self.MOTOR_LOCATION_FRONT | self.MOTOR_LOCATION_LEFT,
self.MOTOR_LOCATION_FRONT | self.MOTOR_LOCATION_RIGHT,
self.MOTOR_LOCATION_BACK | self.MOTOR_LOCATION_LEFT,
self.MOTOR_LOCATION_BACK | self.MOTOR_LOCATION_RIGHT]
rotation_list = [self.MOTOR_ROTATION_ACW,
self.MOTOR_ROTATION_CW,
self.MOTOR_ROTATION_CW,
self.MOTOR_ROTATION_ACW,
self.MOTOR_ROTATION_CW,
self.MOTOR_ROTATION_ACW,
self.MOTOR_ROTATION_ACW,
self.MOTOR_ROTATION_CW]
name_list = ['front left topside',
'front right topside',
'back left topside',
'back right topside',
'front left underside',
'front right underside',
'back left underside',
'back right underside']
#-------------------------------------------------------------------------------------------
# Prime the ESCs to stop their annoying beeping! All 3 of P, H & Z use the T-motor ESCs
# with the same ESC firmware so have the same spin_pwm
#-------------------------------------------------------------------------------------------
global stfu_pwm
global spin_pwm
stfu_pwm = 1000
spin_pwm = 0
if i_am_zoe:
spin_pwm = 1150
elif i_am_hermione:
spin_pwm = 1150
elif i_am_penelope:
spin_pwm = 1150
self.esc_list = []
for esc_index in range(8 if X8 else 4):
esc = ESC(pin_list[esc_index], location_list[esc_index], rotation_list[esc_index], name_list[esc_index])
self.esc_list.append(esc)
#===========================================================================================
# Globals for the IMU setup
# adc_frequency - the sampling rate of the ADC
# sampling_rate - the data sampling rate and thus data ready interrupt rate
# motion processing.
# motion_rate - the target frequency motion processing occurs under perfect conditions.
# fusion_rate - the sampling rate of the GLL and the video frame rate
# alpf - the accelerometer low pass filter
# glpf - the gyrometer low pass filter
#===========================================================================================
global adc_frequency
global sampling_rate
global motion_rate
global fusion_rate
adc_frequency = 1000 # defined by dlpf >= 1; DO NOT USE ZERO => 8000 adc_frequency
fusion_rate = 10
if self.camera_installed or self.gll_installed:
if i_am_hermione:
sampling_rate = 500 # Hz
motion_rate = 75 # Hz
elif i_am_penelope:
sampling_rate = 500 # Hz
motion_rate = 75 # Hz
elif i_am_zoe:
sampling_rate = 333 # Hz
motion_rate = 66 # Hz
else:
sampling_rate = 500 # Hz - thought 1000 should work, but not
motion_rate = 75 # Hz - thought 100 should work, but not
glpf = 1 # 184Hz
#-------------------------------------------------------------------------------------------
# This is not for antialiasing: the accelerometer low pass filter happens between the ADC
# rate and our IMU sampling rate. ADC rate is 1kHz through this case. However, I've seen poor
# behavious in double integration when IMU sampling rate is 500Hz and alpf = 460Hz.
#-------------------------------------------------------------------------------------------
if sampling_rate == 1000: # SRD = 0 (1kHz)
alpf = 0 # alpf = 460Hz
elif sampling_rate == 500: # SRD = 1 (500Hz)
alpf = 1 # alpf = 184Hz
elif sampling_rate >= 200: # SRD = 2, 3, 4 (333, 250, 200Hz)
alpf = 2 # alpf = 92Hz
elif sampling_rate >= 100: # SRD = 5, 6, 7, 8, 9 (166, 143, 125, 111, 100Hz)
alpf = 3 # alpf = 41Hz
else:
#--------------------------------------------------------------------------------------
# There's no point going less than 100Hz IMU sampling; we need about 100Hz motion
# processing for some degree of level of stability.
#--------------------------------------------------------------------------------------
print "SRD + alpf useless: forget it!"
return
global mpu6050
mpu6050 = MPU6050(0x68, alpf, glpf)
#-------------------------------------------------------------------------------------------
# Scheduling parameters defining standard, and critical FIFO block counts
#
# FIFO_MINIMUM - The least number of batches of collect and average for running them through
# the motion processor
# FIFO_MAXIMUM - The most number of batches to be allowed through the motion processor; any
# higher risks FIFO overflow.
#
# 512/12 is the maximum number of batches in the IMU FIFO
#
#-------------------------------------------------------------------------------------------
self.FIFO_MINIMUM = int(round(sampling_rate / motion_rate))
self.FIFO_MAXIMUM = int(round(512 / 12)) - self.FIFO_MINIMUM
#-------------------------------------------------------------------------------------------
# Initialize the compass object.
#-------------------------------------------------------------------------------------------
if self.compass_installed:
mpu6050.initCompass()
#-------------------------------------------------------------------------------------------
# Initialize the Garmin LiDAR-Lite V3 at 10Hz - this is also used for the camera frame rate.
#AB? The only time I tried 20 on a dimly lit lawn, it leapt up and crashed down.
#-------------------------------------------------------------------------------------------
if self.gll_installed:
global gll
if i_am_penelope or i_am_zoe:
gll = GLLv3HP()
else:
gll = GLLv3(rate = fusion_rate)
#===============================================================================================
# Keyboard / command line input between flights for CLI update etc
#===============================================================================================
def go(self):
cli_argv = ""
if self.rc_installed:
self.rc = RCManager()
self.rc_status = RC_DONE
shutdown = False
while not shutdown:
print "============================================"
cli_argv = raw_input("Wassup? ")
print "============================================"
if len(cli_argv) != 0 and (cli_argv == 'exit' or cli_argv == 'quit'):
shutdown = True
continue
self.argv = sys.argv[1:] + cli_argv.split()
#---------------------------------------------------------------------------------------
# Check the command line for calibration or flight parameters
#---------------------------------------------------------------------------------------
print "Just checking a few details. Gimme a few seconds..."
try:
cli_parms = CheckCLI(self.argv)
except ValueError, err:
print "Command line error: %s" % err
continue
self.fly(cli_parms)
else:
if self.rc_installed:
self.rc.close()
self.shutdown()
#===============================================================================================
# Per-flight configuration, initializations and flight control itself
#===============================================================================================
def fly(self, cli_parms):
print "Just checking a few details. Gimme a few seconds..."
#-------------------------------------------------------------------------------------------
# Check the command line for calibration or flight parameters
#-------------------------------------------------------------------------------------------
fp_filename, calibrate_0g, cc_compass, yaw_control, file_control, rc_control, gps_control, add_waypoint, clear_waypoints, hover_pwm, vdp_gain, vdi_gain, vdd_gain, vvp_gain, vvi_gain, vvd_gain, hdp_gain, hdi_gain, hdd_gain, hvp_gain, hvi_gain, hvd_gain, prp_gain, pri_gain, prd_gain, rrp_gain, rri_gain, rrd_gain, yrp_gain, yri_gain, yrd_gain, test_case, atau, diagnostics = cli_parms
logger.warning("fp_filename = %s, calibrate_0g = %d, check / calibrate compass = %s, yaw_control = %s, file_control = %s, gps_control = %s, add_waypoint = %s, clear_waypoints = %s, hover_pwm = %d, vdp_gain = %f, vdi_gain = %f, vdd_gain= %f, vvp_gain = %f, vvi_gain = %f, vvd_gain= %f, hdp_gain = %f, hdi_gain = %f, hdd_gain = %f, hvp_gain = %f, hvi_gain = %f, hvd_gain = %f, prp_gain = %f, pri_gain = %f, prd_gain = %f, rrp_gain = %f, rri_gain = %f, rrd_gain = %f, yrp_gain = %f, yri_gain = %f, yrd_gain = %f, test_case = %d, atau = %f, diagnostics = %s",
fp_filename, calibrate_0g, cc_compass, yaw_control, file_control, gps_control, add_waypoint, clear_waypoints, hover_pwm, vdp_gain, vdi_gain, vdd_gain, vvp_gain, vvi_gain, vvd_gain, hdp_gain, hdi_gain, hdd_gain, hvp_gain, hvi_gain, hvd_gain, prp_gain, pri_gain, prd_gain, rrp_gain, rri_gain, rrd_gain, yrp_gain, yri_gain, yrd_gain, test_case, atau, diagnostics)
#-------------------------------------------------------------------------------------------
# Calibrate gravity or use previous settings
#-------------------------------------------------------------------------------------------
if calibrate_0g:
if not mpu6050.calibrate0g():
print "0g calibration error, abort"
return
elif not mpu6050.load0gCalibration():
print "0g calibration data not found."
return
#-------------------------------------------------------------------------------------------
# Calibrate compass.
#-------------------------------------------------------------------------------------------
if self.compass_installed:
if cc_compass:
if not mpu6050.compassCheckCalibrate():
print "Compass check / calibration error, abort"
return
elif not mpu6050.loadCompassCalibration():
print "Compass calibration data not found"
return
elif cc_compass:
print "Compass not installed, check / calibration not possible."
return
#-------------------------------------------------------------------------------------------
# Sanity check that if we are using RC flight plan, then RC needs to have been installed.
#-------------------------------------------------------------------------------------------
if rc_control and not self.rc_installed:
print "Can't do RC processing without RC installed!"
return
#-------------------------------------------------------------------------------------------
# Sanity check that if we are using a GPS flight plan, then GPS needs to have been installed.
#-------------------------------------------------------------------------------------------
if (gps_control or add_waypoint) and not self.gps_installed:
print "Can't do GPS processing without GPS installed!"
return
#-------------------------------------------------------------------------------------------
# Add GPS waypoint.
#-------------------------------------------------------------------------------------------
if add_waypoint:
gpsp = GPSManager()
try:
lat, lon, alt, sats = gpsp.acquireSatellites()
except EnvironmentError as e:
print e
else:
with open("GPSWaypoints.csv", "ab") as gps_waypoints:
gps_waypoints.write("%.10f, %.10f, %.10f, %d\n" % (lat, lon, alt, sats))
finally:
gpsp.cleanup()
gpsp = None
return
#-------------------------------------------------------------------------------------------
# Clear GPS waypoints.
#-------------------------------------------------------------------------------------------
if clear_waypoints:
try:
os.remove("GPSWaypoints.csv")
except OSError:
pass
return
#-------------------------------------------------------------------------------------------
# START TESTCASE 1 CODE: spin up each blade individually for 5s each and check they all turn
# the right way. At the same time, log X, Y and Z accelerometer readings
# to measure noise from the motors and props due to possible prop and motor
# damage.
#-------------------------------------------------------------------------------------------
if test_case == 1:
print "TESTCASE 1: Check props are spinning as expected"
for esc in self.esc_list:
print "%s prop should rotate %s." % (esc.name, "anti-clockwise" if esc.motor_rotation == self.MOTOR_ROTATION_ACW else "clockwise")
#-----------------------------------------------------------------------------------
# Get the prop up to the configured spin rate. Sleep for 5s then stop and move
# on to the next prop.
#-----------------------------------------------------------------------------------
esc.set(hover_pwm)
time.sleep(5)
esc.set(stfu_pwm)
return
#-------------------------------------------------------------------------------------------
# END TESTCASE 1 CODE: spin up each blade individually for 10s each and check they all turn the
# right way
#-------------------------------------------------------------------------------------------
#===========================================================================================
# OK, we're in flight mode, better get on with it
#===========================================================================================
self.keep_looping = True
edx_target = 0.0
edy_target = 0.0
edz_target = 0.0
evx_target = 0.0
evy_target = 0.0
evz_target = 0.0
eyr_target = 0.0
ya_target = 0.0
qdx_input = 0.0
qdy_input = 0.0
qdz_input = 0.0
qvx_input = 0.0
qvy_input = 0.0
qvz_input = 0.0
edx_fuse = 0.0
edy_fuse = 0.0
edz_fuse = 0.0
evx_fuse = 0.0
evy_fuse = 0.0
evz_fuse = 0.0
qdx_fuse = 0.0
qdy_fuse = 0.0
qdz_fuse = 0.0
qvx_fuse = 0.0
qvy_fuse = 0.0
qvz_fuse = 0.0
#===========================================================================================
# Tuning: Set up the PID gains - some are hard coded mathematical approximations, some come
# from the CLI parameters to allow for tuning - 12 in all!
# - Quad X axis distance
# - Quad Y axis distance
# - Quad Z axis distance
# - Quad X axis velocity
# - Quad Y axis velocity
# - Quad Z axis velocity
# - Pitch angle
# - Pitch rotation rate
# - Roll angle
# - Roll rotation rate
# - Yaw angle
# = Yaw rotation rate
#===========================================================================================
#-------------------------------------------------------------------------------------------
# The quad X axis PID controls fore / aft distance
#-------------------------------------------------------------------------------------------
PID_QDX_P_GAIN = hdp_gain
PID_QDX_I_GAIN = hdi_gain
PID_QDX_D_GAIN = hdd_gain
#-------------------------------------------------------------------------------------------
# The quad Y axis PID controls left / right distance
#-------------------------------------------------------------------------------------------
PID_QDY_P_GAIN = hdp_gain
PID_QDY_I_GAIN = hdi_gain
PID_QDY_D_GAIN = hdd_gain
#-------------------------------------------------------------------------------------------
# The quad Z axis PID controls up / down distance
#-------------------------------------------------------------------------------------------
PID_QDZ_P_GAIN = vdp_gain
PID_QDZ_I_GAIN = vdi_gain
PID_QDZ_D_GAIN = vdd_gain
#-------------------------------------------------------------------------------------------
# The quad X axis speed controls fore / aft speed
#-------------------------------------------------------------------------------------------
PID_QVX_P_GAIN = hvp_gain
PID_QVX_I_GAIN = hvi_gain
PID_QVX_D_GAIN = hvd_gain
#-------------------------------------------------------------------------------------------
# The quad Y axis speed PID controls left / right speed
#-------------------------------------------------------------------------------------------
PID_QVY_P_GAIN = hvp_gain
PID_QVY_I_GAIN = hvi_gain
PID_QVY_D_GAIN = hvd_gain
#-------------------------------------------------------------------------------------------
# The quad Z axis speed PID controls up / down speed
#-------------------------------------------------------------------------------------------
PID_QVZ_P_GAIN = vvp_gain
PID_QVZ_I_GAIN = vvi_gain
PID_QVZ_D_GAIN = vvd_gain
#-------------------------------------------------------------------------------------------
# The roll angle PID controls stable angles around the Y-axis
#-------------------------------------------------------------------------------------------
PID_PA_P_GAIN = 2.0 # pap_gain
PID_PA_I_GAIN = 0.0 # pai_gain
PID_PA_D_GAIN = 0.0 # pad_gain
#-------------------------------------------------------------------------------------------
# The pitch rate PID controls stable rotation rate around the Y-axis
#-------------------------------------------------------------------------------------------
PID_PR_P_GAIN = prp_gain
PID_PR_I_GAIN = pri_gain
PID_PR_D_GAIN = prd_gain
#-------------------------------------------------------------------------------------------
# The roll angle PID controls stable angles around the X-axis
#-------------------------------------------------------------------------------------------
PID_RA_P_GAIN = 2.0 # rap_gain
PID_RA_I_GAIN = 0.0 # rai_gain
PID_RA_D_GAIN = 0.0 # rad_gain
#-------------------------------------------------------------------------------------------
# The roll rate PID controls stable rotation rate around the X-axis
#-------------------------------------------------------------------------------------------
PID_RR_P_GAIN = rrp_gain
PID_RR_I_GAIN = rri_gain
PID_RR_D_GAIN = rrd_gain
#-------------------------------------------------------------------------------------------
# The yaw angle PID controls stable angles around the Z-axis
#-------------------------------------------------------------------------------------------
PID_YA_P_GAIN = 8.0 # yap_gain
PID_YA_I_GAIN = 0.0 # yai_gain
PID_YA_D_GAIN = 0.0 # yad_gain
#-------------------------------------------------------------------------------------------
# The yaw rate PID controls stable rotation speed around the Z-axis
#-------------------------------------------------------------------------------------------
PID_YR_P_GAIN = yrp_gain
PID_YR_I_GAIN = yri_gain
PID_YR_D_GAIN = yrd_gain
#-------------------------------------------------------------------------------------------
# Start the X, Y (horizontal) and Z (vertical) distance PID
#-------------------------------------------------------------------------------------------
qdx_pid = PID(PID_QDX_P_GAIN, PID_QDX_I_GAIN, PID_QDX_D_GAIN)
qdy_pid = PID(PID_QDY_P_GAIN, PID_QDY_I_GAIN, PID_QDY_D_GAIN)
qdz_pid = PID(PID_QDZ_P_GAIN, PID_QDZ_I_GAIN, PID_QDZ_D_GAIN)
#-------------------------------------------------------------------------------------------
# Start the X, Y (horizontal) and Z (vertical) velocity PIDs
#-------------------------------------------------------------------------------------------
qvx_pid = PID(PID_QVX_P_GAIN, PID_QVX_I_GAIN, PID_QVX_D_GAIN)
qvy_pid = PID(PID_QVY_P_GAIN, PID_QVY_I_GAIN, PID_QVY_D_GAIN)
qvz_pid = PID(PID_QVZ_P_GAIN, PID_QVZ_I_GAIN, PID_QVZ_D_GAIN)
#-------------------------------------------------------------------------------------------
# Start the pitch, roll and yaw angle PID - note the different PID class for yaw.
#-------------------------------------------------------------------------------------------
pa_pid = PID(PID_PA_P_GAIN, PID_PA_I_GAIN, PID_PA_D_GAIN)
ra_pid = PID(PID_RA_P_GAIN, PID_RA_I_GAIN, PID_RA_D_GAIN)
ya_pid = YAW_PID(PID_YA_P_GAIN, PID_YA_I_GAIN, PID_YA_D_GAIN)
#-------------------------------------------------------------------------------------------
# Start the pitch, roll and yaw rotation rate PIDs
#-------------------------------------------------------------------------------------------
pr_pid = PID(PID_PR_P_GAIN, PID_PR_I_GAIN, PID_PR_D_GAIN)
rr_pid = PID(PID_RR_P_GAIN, PID_RR_I_GAIN, PID_RR_D_GAIN)
yr_pid = PID(PID_YR_P_GAIN, PID_YR_I_GAIN, PID_YR_D_GAIN)
#-------------------------------------------------------------------------------------------
# Set up the constants for motion fusion used if we have lateral and vertical distance / velocity
# sensors.
# - vvf, hvf, vdf, hdf flag set true for fusion to be triggered
# - fusion_tau used for the fusion complementary filter
#-------------------------------------------------------------------------------------------
vvf = False
hvf = False
vdf = False
hdf = False
fusion_tau = 10 / fusion_rate
#------------------------------------------------------------------------------------------
# Set the props spinning at their base rate to ensure initial kick-start doesn't get spotted
# by the sensors messing up the flight thereafter. spin_pwm is determined by running testcase 1
# multiple times incrementing -h slowly until a level of PWM is found where all props just spin.
# This depends on the firmware in the ESCs
#------------------------------------------------------------------------------------------
print "Starting up the motors..."
for esc in self.esc_list:
esc.set(spin_pwm)
#-------------------------------------------------------------------------------------------
# Initialize the base setting of earth frame take-off height - i.e. the vertical distance from
# the height sensor or the take-off platform / leg height if no sensor is available.
#-------------------------------------------------------------------------------------------
eftoh = 0.0
#-------------------------------------------------------------------------------------------
# Get an initial take-off height
#-------------------------------------------------------------------------------------------
g_dist = 0.0
if self.gll_installed:
print "Couple of seconds to let the LiDAR settle..."
for ii in range(2 * fusion_rate):
time.sleep(1 / fusion_rate)
try:
g_dist = gll.read()
except ValueError as e:
break
eftoh += g_dist
eftoh /= (2 * fusion_rate)
#-------------------------------------------------------------------------------------------
# The distance from grounds to the GLLv3 can't be measured accurately; hard code them.
#-------------------------------------------------------------------------------------------
if i_am_zoe:
eftoh = 0.04 # meters
elif i_am_penelope:
eftoh = 0.18 # meters
else:
assert i_am_hermione, "Hey, I'm not supported"
eftoh = 0.23 # meters
#-------------------------------------------------------------------------------------------
# Set up the GLL base values for the very rate case that g_* don't get set up (as they always
# should) by gll.read() down in the core.
#-------------------------------------------------------------------------------------------
g_distance = eftoh
g_velocity = 0.0
#-------------------------------------------------------------------------------------------
# Set up the video macro-block parameters
# Video supported upto 1080p @ 30Hz but restricted by speed of macro-block processing.
#-------------------------------------------------------------------------------------------
vmp = None
vmpt = 0.0
pvmpt = 0.0
if self.camera_installed:
print "Couple of seconds to let the video settle..."
global frame_width
global frame_height
if i_am_penelope: # RPi 3B+
frame_width = 400 # an exact multiple of mb_size (16)
elif i_am_hermione: # RPi 3B
frame_width = 320 # an exact multiple of mb_size (16)
elif i_am_zoe: # RPi 0W
frame_width = 240 # an exact multiple of mb_size (16)
frame_height = frame_width
frame_rate = fusion_rate
video_update = False
#------------------------------------------------------------------------------------------
# Scale is the convertion from macro-blocks to meters at a given height.
# - V1 camera angle of view (aov): 54 x 41 degrees
# - V2 camera angle of view (aov): 62.2 x 48.8 degrees.
# Because we're shooting a 320 x 320 video from with a V2 camera this means a macro-block is
# 2 x height (h) x tan ( aov / 2) / 320 meters:
#
# ^
# /|\
# / | \
# / | \
# / h \
# / | \
# / | \
# / | \
# /_______v_______\
#
# \______/V\______/
#
# aov = 48.8 degrees
#
# The macro-block vector is the movement in pixels between frames. This is guessed by the
# fact each vector can only be between +/- 128 in X and Y which allows for shifts up to
# +/- 2048 pixels in a frame which seems reasonable given the h.264 compression.
#
# Testing has proven this true - all errors are just a percent or so - well within the
# scope of the "nut behind the wheel" error.
#
# scale just needs to be multiplied by (macro-block shift x height) to produce the increment of
# horizontal movement in meters.
#------------------------------------------------------------------------------------------
camera_version = 2
aov = math.radians(48.8 if camera_version == 2 else 41)
scale = 2 * math.tan(aov / 2) / frame_width
#---------------------------------------------------------------------------------------
# Setup a shared memory based data stream for the PiCamera video motion output
#---------------------------------------------------------------------------------------
os.mkfifo("/dev/shm/video_stream")
video_process = subprocess.Popen(["python", __file__, "VIDEO", str(frame_width), str(frame_height), str(frame_rate)], preexec_fn = Daemonize)
while True:
try:
video_fifo = io.open("/dev/shm/video_stream", mode="rb")
except:
continue
else:
break
#---------------------------------------------------------------------------------------
# Register fd for polling
#---------------------------------------------------------------------------------------
video_fd = video_fifo.fileno()
poll.register(video_fd, select.POLLIN | select.POLLPRI)
logger.warning("Video @, %d, %d, pixels, %d, fps", frame_width, frame_height, frame_rate)
#--------------------------------------------------------------------------------------------
# Last chance to change your mind about the flight if all's ok so far
#--------------------------------------------------------------------------------------------
rtg = "" if rc_control else raw_input("Ready when you are!")
if len(rtg) != 0:
print "OK, I'll skip at the next possible opportunity."
self.keep_looping = False
print ""
print "################################################################################"
print "# #"
print "# Thunderbirds are go! #"
print "# #"
print "################################################################################"
print ""
################################### INITIAL IMU READINGS ###################################
#-------------------------------------------------------------------------------------------
# Get IMU takeoff info.
# Note the use of qr? as gyrometer results (i.e. rotation); qg? is gravity.
#-------------------------------------------------------------------------------------------
mpu6050.flushFIFO()
qax = 0.0
qay = 0.0
qaz = 0.0
qrx = 0.0
qry = 0.0
qrz = 0.0
sigma_dt = 0.0
loops = 0
while sigma_dt < 1.0: # seconds
time.sleep(FULL_FIFO_BATCHES / sampling_rate)
nfb = mpu6050.numFIFOBatches()
ax, ay, az, rx, ry, rz, dt = mpu6050.readFIFO(nfb)
loops += 1
sigma_dt += dt
qax += ax
qay += ay
qaz += az
qrx += rx
qry += ry
qrz += rz
qax /= loops
qay /= loops
qaz /= loops
qrx /= loops
qry /= loops
qrz /= loops
temp = mpu6050.readTemperature()
logger.critical("IMU core temp (start): ,%f", temp / 333.86 + 21.0)
#-------------------------------------------------------------------------------------------
# Feed back the gyro offset calibration
#-------------------------------------------------------------------------------------------
mpu6050.setGyroOffsets(qrx, qry, qrz)
#-------------------------------------------------------------------------------------------
# Read the IMU acceleration to obtain angles and gravity.
#-------------------------------------------------------------------------------------------
qax, qay, qaz, qrx, qry, qrz = mpu6050.scaleSensors(qax, qay, qaz, qrx, qry, qrz)
#-------------------------------------------------------------------------------------------
# Calculate the angles - ideally takeoff should be on a horizontal surface but a few degrees
# here or there won't matter.
#-------------------------------------------------------------------------------------------
pa, ra = GetRotationAngles(qax, qay, qaz)
ya = 0.0
apa, ara = GetAbsoluteAngles(qax, qay, qaz)
aya = 0.0
aya_fused = 0.0 # used for compass fusion
apa_increment = 0.0
ara_increment = 0.0
aya_increment = 0.0
#-------------------------------------------------------------------------------------------
# Get the value for gravity.
#-------------------------------------------------------------------------------------------
egx, egy, egz = RotateVector(qax, qay, qaz, -pa, -ra, -ya)
eax = egx
eay = egy
eaz = egz
#-------------------------------------------------------------------------------------------
# Setup and prime the butterworth - 0.1Hz 8th order, primed with the stable measured above.
#-------------------------------------------------------------------------------------------
bfx = BUTTERWORTH(motion_rate, 0.1, 8, egx)
bfy = BUTTERWORTH(motion_rate, 0.1, 8, egy)
bfz = BUTTERWORTH(motion_rate, 0.1, 8, egz)
#-------------------------------------------------------------------------------------------
# The tilt ratio is used to compensate sensor height (and thus velocity) for the fact the
# sensors are leaning.
#
# tilt ratio is derived from cos(tilt angle);
# - tilt angle a = arctan(sqrt(x*x + y*y) / z)
# - cos(arctan(a)) = 1 / (sqrt(1 + a*a))
# This all collapses down to the following. 0 <= Tilt ratio <= 1
#-------------------------------------------------------------------------------------------
tilt_ratio = qaz / egz
eftoh *= tilt_ratio
#-------------------------------------------------------------------------------------------
# Log the critical parameters from this warm-up: the take-off surface tilt, and gravity.
# Note that some of the variables used above are used in the main processing loop. Messing
# with the above code can have very unexpected effects in flight.
#-------------------------------------------------------------------------------------------
logger.warning("pitch, %f, roll, %f", math.degrees(pa), math.degrees(ra))
logger.warning("egx, %f, egy, %f, egz %f", egx, egy, egz)
logger.warning("based upon %d samples", sigma_dt * sampling_rate)
logger.warning("EFTOH:, %f", eftoh)
#-------------------------------------------------------------------------------------------
# Prime the direction vector of the earth's magnetic core to provide long term yaw stability.
#-------------------------------------------------------------------------------------------
mgx = 0.0
mgy = 0.0
mgz = 0.0
cya = 0.0
cya_base = 0.0
initial_orientation = 0.0
if self.compass_installed:
#---------------------------------------------------------------------------------------
# Take 100 samples at the sampling rate
#---------------------------------------------------------------------------------------
mgx_ave = 0.0
mgy_ave = 0.0
mgz_ave = 0.0
for ii in range(100):
mgx, mgy, mgz = mpu6050.readCompass()
mgx_ave += mgx
mgy_ave += mgy
mgz_ave += mgz
time.sleep(1 / sampling_rate)
mgx = mgx_ave / 100
mgy = mgy_ave / 100
mgz = mgz_ave / 100
#---------------------------------------------------------------------------------------
# Rotate compass readings back to earth plane and tweak to be 0 - 2 pi radians.
# Local magnetic declination is -1o 5'. Declination is the angle between true and magnetic
# north i.e. true + declination = magnetic
#---------------------------------------------------------------------------------------
cay, cax, caz = RotateVector(mgy, -mgx, -mgz, -pa, -ra, 0.0)
initial_orientation = (-math.atan2(cax, cay) + math.radians(1 + 5/60) + math.pi) % (2 * math.pi) - math.pi
cya_base = math.atan2(cax, cay)
logger.critical("Initial GPS orientation:, %f" % math.degrees(initial_orientation))
logger.critical("Initial yaw:, %f." % (math.degrees(cya_base)))
######################################### GO GO GO! ########################################
if self.autopilot_installed:
#---------------------------------------------------------------------------------------
# Start the autopilot - use compass angle plus magnetic declination angle (1o 5') to pass
# through the take-off orientation angle wrt GPS / true north
#---------------------------------------------------------------------------------------
app = AutopilotManager(self.sweep_installed, self.gps_installed, self.compass_installed, initial_orientation, file_control, gps_control, fp_filename)
autopilot_fifo = app.autopilot_fifo
autopilot_fd = autopilot_fifo.fileno()
elif not rc_control:
#---------------------------------------------------------------------------------------
# Register the flight plan with the authorities
#---------------------------------------------------------------------------------------
try:
fp = FlightPlan(self, fp_filename)
except Exception, err:
print "%s error: %s" % (fp_filename, err)
return
#-------------------------------------------------------------------------------------------
# Set up the various timing constants and stats.
#-------------------------------------------------------------------------------------------
start_flight = time.time()
motion_dt = 0.0
fusion_dt = 0.0
gll_dt = 0.0
rc_dt = 0.0
sampling_loops = 0
motion_loops = 0
fusion_loops = 0
gll_loops = 0
video_loops = 0
autopilot_loops = 0
gll_dr_interrupts = 0
gll_misses = 0
#-------------------------------------------------------------------------------------------
# Diagnostic log header
#-------------------------------------------------------------------------------------------
if diagnostics:
pwm_header = "FL PWM, FR PWM, BL PWM, BR PWM" if i_am_zoe else "FLT PWM, FRT PWM, BLT PWM, BRT PWM, FLB PWM, FRB PWM, BLB PWM, BRB PWM"
logger.warning("time, dt, loops, " +
"temperature, " +
"mgx, mgy, mgz, cya, " +
"edx_fuse, edy_fuse, edz_fuse, " +
"evx_fuse, evy_fuse, evz_fuse, " +
"edx_target, edy_target, edz_target, " +
"evx_target, evy_target, evz_target, " +
"qrx, qry, qrz, " +
"qax, qay, qaz, " +
"eax, eay, eaz, " +
"qgx, qgy, qgz, " +
"egx, egy, egz, " +
"pitch, roll, yaw, cya, ya_fused, " +
# "qdx_input, qdy_input, qdz_input, " +
# "qdx_target, qdy_target, qdz_target' " +
# "qvx_input, qvy_input, qvz_input, " +
# "qvx_target, qvy_target, qvz_target, " +
# "qvz_out, " +
# "pa_input, ra_input, ya_input, " +
# "pa_target, ra_target, ya_target, " +
# "pr_input, rr_input, yr_input, " +
# "pr_target, rr_target, yr_target, " +
# "pr_out, rr_out, yr_out, " +
# "qdx_input, qdx_target, qvx_input, qvx_target, pa_input, pa_target, pr_input, pr_target, pr_out, " +
# "qdy_input, qdy_target, qvy_input, qvy_target, ra_input, ra_target, rr_input, rr_target, rr_out, " +
# "qdz_input, qdz_target, qvz_input, qvz_target, qvz_out, " +
"ya_input, ya_target, yr_input, yr_target, yr_out, " +
pwm_header)
#-------------------------------------------------------------------------------------------
# Flush the video motion FIFO - historically sweep and GPS fed into here too, hence the OTT
# way of emptying what's now just video
#-------------------------------------------------------------------------------------------
if self.camera_installed:
vmp = VideoManager(video_fifo, 0)
video_flush = 0
flushing = True
while flushing:
results = poll.poll(0.0)
for fd, event in results:
if fd == video_fd:
video_flush += vmp.flush()
else:
if len(results) == 0:
flushing = False
else:
print "Video Flush: %d" % video_flush
vmp = None
#-------------------------------------------------------------------------------------------
# Only once the video FIFO has been flushed can the autopilot / rc fd be added to the polling.
#-------------------------------------------------------------------------------------------
if self.autopilot_installed:
poll.register(autopilot_fd, select.POLLIN | select.POLLPRI)
elif rc_control:
#---------------------------------------------------------------------------------------
# Accept RC connection and send go-go-go
#---------------------------------------------------------------------------------------
rc_fd = self.rc.connect()
poll.register(rc_fd, select.POLLIN | select.POLLPRI)
#-------------------------------------------------------------------------------------------
# Flush the IMU FIFO and enable the FIFO overflow interrupt
#-------------------------------------------------------------------------------------------
GPIO.event_detected(GPIO_FIFO_OVERFLOW_INTERRUPT)
mpu6050.flushFIFO()
mpu6050.enableFIFOOverflowISR()
#===========================================================================================
#
# Motion and PID processing loop naming conventions
#
# qd* = quad frame distance
# qv* = quad frame velocity
# qa? = quad frame acceleration
# qg? = quad frame gravity
# qr? = quad frame rotation
# ea? = earth frame acceleration
# eg? = earth frame gravity
# ua? = euler angles between frames
# ur? = euler rotation between frames
# a?a = absoluted angles between frames
#
#===========================================================================================
while self.keep_looping:
############################### SENSOR INPUT SCHEDULING ################################
#---------------------------------------------------------------------------------------
# Check on the number of IMU batches already stashed in the FIFO, and if not enough,
# check autopilot and video, and ultimate sleep.
#---------------------------------------------------------------------------------------
nfb = mpu6050.numFIFOBatches()
if nfb >= self.FIFO_MAXIMUM:
logger.critical("ABORT: FIFO too full risking overflow: %d.", nfb)
if vmp != None:
logger.critical(" Next VFP phase: %d", vmp.phase)
break
if nfb < self.FIFO_MINIMUM:
#-----------------------------------------------------------------------------------
# Assume that initially we have time to wait for external sensors
#-----------------------------------------------------------------------------------
timeout = (self.FIFO_MINIMUM - nfb) / sampling_rate
#-----------------------------------------------------------------------------------
# We have some spare time before we need to run the next motion processing; see if
# there's any processing we can do. First, have we already got a video frame we can
# continue processing?
#-----------------------------------------------------------------------------------
if vmp != None:
result = vmp.process()
if result != None:
vvx, vvy = result
vvx *= scale
vvy *= scale
video_update = True
vmp = None
#-------------------------------------------------------------------------------
# If we've done a video loop, still check the other sensors, but don't sleep polling.
# Previously, this was a 'continue'; this gives a better priorities over these
# inputs but risks completely ruling out below inputs if video processing takes too long.
#-------------------------------------------------------------------------------
if True:
timeout = 0.0
else:
continue
#-----------------------------------------------------------------------------------
# Check for other external data sources with lower priority or performance impact.
#-----------------------------------------------------------------------------------
try:
results = poll.poll(timeout * 1000)
except:
logger.critical("ABORT: poll error")
break
for fd, event in results:
if self.autopilot_installed and fd == autopilot_fd:
#---------------------------------------------------------------------------
# Run the Autopilot Processor to get the latest stage of the flight plan.
#---------------------------------------------------------------------------
autopilot_loops += 1
evx_target, evy_target, evz_target, state_name, self.keep_looping = app.read()
logger.critical(state_name)
if "PROXIMITY" in state_name:
if not GPIO.input(GPIO_BUZZER):
GPIO.output(GPIO_BUZZER, GPIO.HIGH)
elif GPIO.input(GPIO_BUZZER):
GPIO.output(GPIO_BUZZER, GPIO.LOW)
if rc_control and fd == rc_fd:
#---------------------------------------------------------------------------
# Get the WiFi targets etc e.g. make sure keep_looping from rc is triggered
# somehow between flights
#---------------------------------------------------------------------------
evx_target, evy_target, evz_target, eyr_target, rc_status, rc_beep = self.rc.read()
if rc_status != self.rc_status:
self.rc_status = rc_status
logger.critical(rc_status_name[rc_status])
if self.rc_status == RC_DONE or self.rc_status == RC_ABORT:
self.keep_looping = False
rc_dt = 0.0
if self.camera_installed and fd == video_fd and vmp == None and not video_update:
#---------------------------------------------------------------------------
# Run the Video Motion Processor.
#---------------------------------------------------------------------------
vmp_dt = vmpt - pvmpt
pvmpt = vmpt
apa_fusion = apa_increment
ara_fusion = ara_increment
aya_fusion = aya_increment
apa_increment = 0.0
ara_increment = 0.0
aya_increment = 0.0
try:
vmp = VideoManager(video_fifo, aya_fusion)
'''
#----------------------------------------------------------------------
# Annoyingly, despite having flushed the video stream just above prior to
# takeoff, it only seems to flush the last 32 MBs max meaning there are
# several backlogged at this point. vmp_dt == 0.0 signifies backlog which
# all gets clearer prior to any significant processing below. I don't
# like this, but can't see how to stop it.
#----------------------------------------------------------------------
'''
if vmp_dt == 0.0:
vmp.flush()
vmp = None
else:
video_loops += 1
vmp.process()
except ValueError as e:
#-----------------------------------------------------------------------
# First pass of the video frame shows no movement detected, and thus no
# further processing.
#-----------------------------------------------------------------------
vmp = None
#-----------------------------------------------------------------------------------
# We had free time, do we still? Better check.
#-----------------------------------------------------------------------------------
continue
####################################### IMU FIFO #######################################
#---------------------------------------------------------------------------------------
# Before proceeding further, check the FIFO overflow interrupt to ensure we didn't sleep
# too long
#---------------------------------------------------------------------------------------
'''
if GPIO.event_detected(GPIO_FIFO_OVERFLOW_INTERRUPT):
logger.critical("ABORT: FIFO overflow.")
break
'''
#---------------------------------------------------------------------------------------
# Power brownout check - doesn't work on 3B onwards
#---------------------------------------------------------------------------------------
'''
if GPIO.event_detected(GPIO_POWER_BROWN_OUT_INTERRUPT):
logger.critical("ABORT: Brown-out.")
break
'''
#---------------------------------------------------------------------------------------
# Now get the batch of averaged data from the FIFO.
#---------------------------------------------------------------------------------------
try:
qax, qay, qaz, qrx, qry, qrz, motion_dt = mpu6050.readFIFO(nfb)
except IOError as err:
logger.critical("ABORT: IMU problem.")
for arg in err.args:
logger.critical(" %s", arg)
break
#---------------------------------------------------------------------------------------
# Sort out units and calibration for the incoming data
#---------------------------------------------------------------------------------------
qax, qay, qaz, qrx, qry, qrz = mpu6050.scaleSensors(qax,
qay,
qaz,
qrx,
qry,
qrz)
#---------------------------------------------------------------------------------------
# Track the number of motion loops and sampling loops. motion_dt on which their are based
# are the core timing provided by the IMU and are used for all timing events later such
# as integration and PID Intergral and Differential factors.
#---------------------------------------------------------------------------------------
motion_loops += 1
sampling_loops += motion_dt * sampling_rate
fusion_dt += motion_dt
gll_dt += motion_dt
rc_dt += motion_dt
vmpt += motion_dt
#---------------------------------------------------------------------------------------
# If we're on RC control, and we've heard nothing from it in 0.5 seconds, abort. The RC
# sends requests at 5Hz.
#AB: Ideally this should be an ordered landing by permanently override the flights
#AB: but this'll do for now.
#---------------------------------------------------------------------------------------
if rc_control and rc_dt > 0.5: # seconds
logger.critical("ABORT: RC lost")
break
################################## ANGLES PROCESSING ###################################
#---------------------------------------------------------------------------------------
# Euler angle fusion: Merge the 'integral' of the previous euler rotation rates with
# the noisy accelermeter current values. Keep yaw within +/- pi radians
#---------------------------------------------------------------------------------------
urp, urr, ury = Body2EulerRates(qry, qrx, qrz, pa, ra)
pa += urp * motion_dt
ra += urr * motion_dt
ya += ury * motion_dt
ya = (ya + math.pi) % (2 * math.pi) - math.pi
upa, ura = GetRotationAngles(qax, qay, qaz)
atau_fraction = atau / (atau + motion_dt)
pa = atau_fraction * pa + (1 - atau_fraction) * upa
ra = atau_fraction * ra + (1 - atau_fraction) * ura
#---------------------------------------------------------------------------------------
# Absolute angle fusion: Merge the 'integral' of the gyro rotation rates with
# the noisy accelermeter current values. Keep yaw within +/- pi radians
#---------------------------------------------------------------------------------------
apa += qry * motion_dt
ara += qrx * motion_dt
aya += qrz * motion_dt
aya = (aya + math.pi) % (2 * math.pi) - math.pi
upa, ura = GetAbsoluteAngles(qax, qay, qaz)
atau_fraction = atau / (atau + motion_dt)
apa = atau_fraction * apa + (1 - atau_fraction) * upa
ara = atau_fraction * ara + (1 - atau_fraction) * ura
apa_increment += qry * motion_dt
ara_increment += qrx * motion_dt
aya_increment += qrz * motion_dt
'''
#AB! If apa or ara > 90 degrees, abort? Problem here is then falling on her side which
#AB! may be worst than flipping. Is it better to have combination of acceleration and gll_installed
#AB! both suggesting upside down?
'''
############################### IMU VELOCITY / DISTANCE ################################
#---------------------------------------------------------------------------------------
# Low pass butterworth filter to account for long term drift to the IMU due to temperature
# drift - this happens significantly in a cold environment.
# Note the butterworth can be disabled by deleting one surrounding pair of '''.
#---------------------------------------------------------------------------------------
'''
eax, eay, eaz = RotateVector(qax, qay, qaz, -pa, -ra, -ya)
egx = bfx.filter(eax)
egy = bfy.filter(eay)
egz = bfz.filter(eaz)
'''
qgx, qgy, qgz = RotateVector(egx, egy, egz, pa, ra, ya)
#---------------------------------------------------------------------------------------
# The tilt ratio is the ratio of gravity measured in the quad-frame Z axis and total gravity.
# It's used to compensate for LiDAR height (and thus velocity) for the fact the laser may
# not be pointing directly vertically down.
#
# - tilt angle a = arctan(sqrt(x*x + y*y) / z)
# - compensated height = cos(arctan(a)) = 1 / (sqrt(1 + a*a))
#
# http://www.rapidtables.com/math/trigonometry/arctan/cos-of-arctan.htm
#
#---------------------------------------------------------------------------------------
tilt_ratio = qgz / egz
#==================== Velocity and Distance Increment processing =======================
#---------------------------------------------------------------------------------------
# Delete reorientated gravity from raw accelerometer readings and integrate over time
# to make velocity all in quad frame.
#---------------------------------------------------------------------------------------
qvx_increment = (qax - qgx) * GRAV_ACCEL * motion_dt
qvy_increment = (qay - qgy) * GRAV_ACCEL * motion_dt
qvz_increment = (qaz - qgz) * GRAV_ACCEL * motion_dt
qvx_input += qvx_increment
qvy_input += qvy_increment
qvz_input += qvz_increment
#---------------------------------------------------------------------------------------
# Integrate again the velocities to get distance.
#---------------------------------------------------------------------------------------
qdx_increment = qvx_input * motion_dt
qdy_increment = qvy_input * motion_dt
qdz_increment = qvz_input * motion_dt
qdx_input += qdx_increment
qdy_input += qdy_increment
qdz_input += qdz_increment
######################## ABSOLUTE DISTANCE / ORIENTATION SENSORS #######################
#---------------------------------------------------------------------------------------
# Read the compass to determine yaw and orientation.
#---------------------------------------------------------------------------------------
if self.compass_installed:
mgx, mgy, mgz = mpu6050.readCompass()
#-----------------------------------------------------------------------------------
# Rotate compass readings back to earth plane and align with gyro rotation direction.
#-----------------------------------------------------------------------------------
cay, cax, caz = RotateVector(mgy, -mgx, -mgz, -pa, -ra, 0.0)
cya = math.atan2(cax, cay)
cya = ((cya - cya_base) + math.pi) % (2 * math.pi) - math.pi
ya_tau = 1.0
ya_fraction = ya_tau / (ya_tau + motion_dt)
aya_fused = ya_fraction * (aya_fused + qrz * motion_dt) + (1 - ya_fraction) * cya
aya_fused = (aya_fused + math.pi) % (2 * math.pi) - math.pi
'''
#AB!--------------------------------------------------------------------------------
#AB! For the moment, yaw fusion of compass and integrated gyro is disabled.
#AB: 1. yaw_control with a setting of yaw change of 180 degrees will cause an awful
#AB! mess due to the 'noise' causing compass flipping backways and forwards between
#AB! +/- 180.
#AB! 2. Hermione is too heavy, meaning to perform intentional yaw results in PWM
#AB! overflow - this is very bad as it actually stops the motors; lowing the yaw
#AB! and yaw rate PID gains to prevent this then reduced stability in !yaw_control.
#AB! 3. Worse than the above, the motors seem to produce variable magnetic fields
#AB! such that the compass value shift as the motors rates changes. Even in a
#AB! zero-yaw target flight, this shifts the compass by 40 degrees from the
#AB! unpowered motor compass calibration values.
#AB!--------------------------------------------------------------------------------
aya = aya_fused
'''
#=======================================================================================
# Acquire vertical distance (height) first, prioritizing the best sensors,
# Garmin LiDAR-Lite first. We need get this every motion processing loop so it's always
# up to date at the point we use it for camera lateral tracking.
#=======================================================================================
'''
#AB! Can we get a data ready interrupt working here? Failed so far. Better if so to reduce
#AB! motion processing and as a result, perhaps be Zoe working. Until that's available,
#AB! then next best option is to only read the GLL when we have video data worth processing.
if GPIO.event_detected(GPIO_GLL_DR_INTERRUPT):
gll_dr_interrupts += 1
'''
if self.gll_installed and video_update:
gll_loops += 1
try:
g_distance = gll.read()
except ValueError as e:
#-------------------------------------------------------------------------------
# Too far or poor reflection (windy wobbles?) for the GLL to work.
#-------------------------------------------------------------------------------
gll_misses += 1
else:
pass
finally:
#-------------------------------------------------------------------------------
# We may have a new value, or may be using the previous one. This is the best
# compromise that then is used below for video lateral tracking.
#-------------------------------------------------------------------------------
evz_fuse = ((g_distance * tilt_ratio - eftoh) - edz_fuse) / gll_dt
edz_fuse = g_distance * tilt_ratio - eftoh
gll_dt = 0.0
#-------------------------------------------------------------------------------
# Set the flags for vertical velocity and distance fusion
#-------------------------------------------------------------------------------
vvf = True
vdf = True
gll_update = True
#=======================================================================================
# Acquire horizontal distance next, again with prioritization of accuracy
#=======================================================================================
#---------------------------------------------------------------------------------------
# If the camera is installed, and we have an absolute height measurement, get the horizontal
# distance and velocity.
#AB: gll_update if added above and test here would always be true.
#---------------------------------------------------------------------------------------
if self.camera_installed and video_update and self.gll_installed and gll_update:
#-----------------------------------------------------------------------------------
# Take the increment of the scaled X and Y distance, and muliply by the height to
# get the absolute position, allowing for tilt increment.
#-----------------------------------------------------------------------------------
edx_increment = g_distance * tilt_ratio * (vvx + apa_fusion)
edy_increment = g_distance * tilt_ratio * (vvy - ara_fusion)
#-----------------------------------------------------------------------------------
# Unraw the video results back to get true earth frame direction increments.
#-----------------------------------------------------------------------------------
edx_increment, edy_increment, __ = RotateVector(edx_increment, edy_increment, 0.0, 0.0, 0.0, -ya)
#-----------------------------------------------------------------------------------
# Add the incremental distance to the total distance, and differentiate against time for
# velocity.
#-----------------------------------------------------------------------------------
edx_fuse += edx_increment
edy_fuse += edy_increment
evx_fuse = edx_increment / vmp_dt
evy_fuse = edy_increment / vmp_dt
#-----------------------------------------------------------------------------------
# Set the flags for horizontal distance and velocity fusion
#-----------------------------------------------------------------------------------
hdf = True
hvf = True
video_update = False
gll_update = False
######################################## FUSION ########################################
#---------------------------------------------------------------------------------------
# If we have new full set of data, fuse it.
#---------------------------------------------------------------------------------------
if vvf and vdf and hvf and hdf:
qdx_fuse, qdy_fuse, qdz_fuse = RotateVector(edx_fuse, edy_fuse, edz_fuse, pa, ra, ya)
qvx_fuse, qvy_fuse, qvz_fuse = RotateVector(evx_fuse, evy_fuse, evz_fuse, pa, ra, ya)
fusion_fraction = fusion_tau / (fusion_tau + fusion_dt)
qvx_input = fusion_fraction * qvx_input + (1 - fusion_fraction) * qvx_fuse
qdx_input = fusion_fraction * qdx_input + (1 - fusion_fraction) * qdx_fuse
qvy_input = fusion_fraction * qvy_input + (1 - fusion_fraction) * qvy_fuse
qdy_input = fusion_fraction * qdy_input + (1 - fusion_fraction) * qdy_fuse
qvz_input = fusion_fraction * qvz_input + (1 - fusion_fraction) * qvz_fuse
qdz_input = fusion_fraction * qdz_input + (1 - fusion_fraction) * qdz_fuse
fusion_loops += 1
fusion_dt = 0.0
#-----------------------------------------------------------------------------------
# Clear the flags for vertical distance and velocity fusion
#-----------------------------------------------------------------------------------
hdf = False
hvf = False
vdf = False
vvf = False
########################### VELOCITY / DISTANCE PID TARGETS ############################
if not self.autopilot_installed and not rc_control:
#-----------------------------------------------------------------------------------
# Check the flight plan for earth frame velocity and distance targets.
#-----------------------------------------------------------------------------------
evx_target, evy_target, evz_target, edx_target, edy_target, edz_target = fp.getTargets(motion_dt)
if edz_fuse > edz_target + 0.5:
logger.critical("ABORT: Height breach! %f target, %f actual", edz_target, edz_fuse)
break
#---------------------------------------------------------------------------------------
# Convert earth-frame distance targets to quadcopter frame.
#---------------------------------------------------------------------------------------
edx_target += evx_target * motion_dt
edy_target += evy_target * motion_dt
edz_target += evz_target * motion_dt
qdx_target, qdy_target, qdz_target = RotateVector(edx_target, edy_target, edz_target, pa, ra, ya)
#---------------------------------------------------------------------------------------
# If using RC, take yaw from the rotation and integrated rotation rate target to yaw angle target
#---------------------------------------------------------------------------------------
if rc_control:
qdx_target, qdy_target, qdz_target = RotateVector(edx_target, edy_target, edz_target, pa, ra, 0.0)
ya_target += eyr_target * motion_dt
ya_target = (ya_target + math.pi) % (2 * math.pi) - math.pi
########### QUAD FRAME VELOCITY / DISTANCE / ANGLE / ROTATION PID PROCESSING ###########
#=======================================================================================
# Distance PIDs
#=======================================================================================
[p_out, i_out, d_out] = qdx_pid.Compute(qdx_input, qdx_target, motion_dt)
qvx_target = p_out + i_out + d_out
[p_out, i_out, d_out] = qdy_pid.Compute(qdy_input, qdy_target, motion_dt)
qvy_target = p_out + i_out + d_out
[p_out, i_out, d_out] = qdz_pid.Compute(qdz_input, qdz_target, motion_dt)
qvz_target = p_out + i_out + d_out
if yaw_control and not (abs(evx_target) + abs(evy_target) == 0.0):
#-----------------------------------------------------------------------------------
# Under yaw control, the piDrone only moves forwards, and it's yaw which manages
# turning to do the right direction. Hence force qv?_input and targets such they only
# do that.
#-----------------------------------------------------------------------------------
qvx_target = math.pow(math.pow(qvx_target, 2) + math.pow(qvy_target, 2), 0.5)
qvy_target = 0.0
'''
'''
#---------------------------------------------------------------------------------------
# Constrain the target velocity to 1.5m/s.
#---------------------------------------------------------------------------------------
MAX_VEL = 1.5
qvx_target = qvx_target if abs(qvx_target) < MAX_VEL else (qvx_target / abs(qvx_target) * MAX_VEL)
qvy_target = qvy_target if abs(qvy_target) < MAX_VEL else (qvy_target / abs(qvy_target) * MAX_VEL)
qvz_target = qvz_target if abs(qvz_target) < MAX_VEL else (qvz_target / abs(qvz_target) * MAX_VEL)
'''
'''
#=======================================================================================
# Velocity PIDs
#=======================================================================================
[p_out, i_out, d_out] = qvx_pid.Compute(qvx_input, qvx_target, motion_dt)
qax_target = p_out + i_out + d_out
[p_out, i_out, d_out] = qvy_pid.Compute(qvy_input, qvy_target, motion_dt)
qay_target = p_out + i_out + d_out
[p_out, i_out, d_out] = qvz_pid.Compute(qvz_input, qvz_target, motion_dt)
qaz_out = p_out + i_out + d_out
#---------------------------------------------------------------------------------------
# We now need to convert desired acceleration to desired angles before running the angular
# PIDs. Via the right hand rule:
#
# A positive x-axis acceleration (fore) needs a nose-down lean which is a positive
# rotation around the y axis
# A positive y-axis acceleration (port) needs a port-down lean which is a negative
# rotation around the x axis
#
# If yaw control is enabled, the yaw angle target is set such that she's facing the way
# she should be travelling based upon the earth frame velocity targets. If these
# targets are zero, then no yaw happens.
#
# If yaw control is disabled, the yaw angle target is zero - she always points in the
# direction she took off in.
#
# Note this must use atan2 to safely handle division by 0.
#---------------------------------------------------------------------------------------
pa_target = math.atan(qax_target)
ra_target = -math.atan(qay_target)
ya_target = ya_target if not yaw_control else (ya_target if (abs(evx_target) + abs(evy_target)) == 0 else math.atan2(evy_target, evx_target))
'''
'''
#---------------------------------------------------------------------------------------
# Constrain the target pitch / roll angle to 30 degrees.
#---------------------------------------------------------------------------------------
MAX_ANGLE = math.pi / 6
pa_target = pa_target if abs(pa_target) < MAX_ANGLE else (pa_target / abs(pa_target) * MAX_ANGLE)
ra_target = ra_target if abs(ra_target) < MAX_ANGLE else (ra_target / abs(ra_target) * MAX_ANGLE)
'''
'''
#======================================================================================
# Angle PIDs
#======================================================================================
[p_out, i_out, d_out] = pa_pid.Compute(apa, pa_target, motion_dt)
pr_target = p_out + i_out + d_out
[p_out, i_out, d_out] = ra_pid.Compute(ara, ra_target, motion_dt)
rr_target = p_out + i_out + d_out
[p_out, i_out, d_out] = ya_pid.Compute(aya, ya_target, motion_dt)
yr_target = p_out + i_out + d_out
'''
'''
#---------------------------------------------------------------------------------------
# Constrain the target yaw rotation rate to 90 degrees / second.
#---------------------------------------------------------------------------------------
MAX_RATE = math.pi / 2 # per-second
yr_target = yr_target if abs(yr_target) < MAX_RATE else (yr_target / abs(yr_target) * MAX_RATE)
'''
'''
#=======================================================================================
# Rotation rate PIDs
#=======================================================================================
#---------------------------------------------------------------------------------------
# START TESTCASE 2 CODE: Override motion processing results; take-off from horizontal
# platform, tune the pr*_gain and rr*_gain PID gains for
# stability.
#---------------------------------------------------------------------------------------
if test_case == 2:
pr_target = 0.0
rr_target = 0.0
yr_target = 0.0
#---------------------------------------------------------------------------------------
# END TESTCASE 2 CODE: Override motion processing results; take-off from horizontal
# platform, tune the pr*_gain and rr*_gain PID gains for
# stability.
#---------------------------------------------------------------------------------------
[p_out, i_out, d_out] = pr_pid.Compute(qry, pr_target, motion_dt)
pr_out = p_out + i_out + d_out
[p_out, i_out, d_out] = rr_pid.Compute(qrx, rr_target, motion_dt)
rr_out = p_out + i_out + d_out
[p_out, i_out, d_out] = yr_pid.Compute(qrz, yr_target, motion_dt)
yr_out = p_out + i_out + d_out
################################## PID OUTPUT -> PWM CONVERSION ########################
#---------------------------------------------------------------------------------------
# Convert the vertical velocity PID output direct to ESC input PWM pulse width.
#---------------------------------------------------------------------------------------
vert_out = hover_pwm + qaz_out
#---------------------------------------------------------------------------------------
# Convert the rotation rate PID outputs direct to ESC input PWM pulse width
#---------------------------------------------------------------------------------------
pr_out /= 2
rr_out /= 2
yr_out /= 2
#=======================================================================================
# PID output distribution: Walk through the ESCs, and apply the PID outputs i.e. the
# updates PWM pulse widths according to where the ESC is sited on the frame
#=======================================================================================
for esc in self.esc_list:
#-----------------------------------------------------------------------------------
# Update all blades' power in accordance with the z error
#-----------------------------------------------------------------------------------
pulse_width = vert_out
#-----------------------------------------------------------------------------------
# For a left downwards roll, the x gyro goes negative, so the PID error is positive,
# meaning PID output is positive, meaning this needs to be added to the left blades
# and subtracted from the right.
#-----------------------------------------------------------------------------------
pulse_width -= (rr_out if esc.motor_location & self.MOTOR_LOCATION_RIGHT else -rr_out)
#-----------------------------------------------------------------------------------
# For a forward downwards pitch, the y gyro goes positive The PID error is negative as a
# result, meaning PID output is negative, meaning this needs to be subtracted from the
# front blades and added to the back.
#-----------------------------------------------------------------------------------
pulse_width += (pr_out if esc.motor_location & self.MOTOR_LOCATION_BACK else -pr_out)
#-----------------------------------------------------------------------------------
# For CW yaw, the z gyro goes negative, so the PID error is postitive, meaning PID
# output is positive, meaning this need to be added to the ACW (FL and BR) blades and
# subtracted from the CW (FR & BL) blades.
#-----------------------------------------------------------------------------------
pulse_width += (yr_out if esc.motor_rotation == self.MOTOR_ROTATION_CW else -yr_out)
#-----------------------------------------------------------------------------------
# Ensure the props don't stop in poorly tuned scenarios.
#-----------------------------------------------------------------------------------
if pulse_width < spin_pwm:
pulse_width = spin_pwm
'''
logger.critical("PWM BREACH!")
'''
#-----------------------------------------------------------------------------------
# Apply the blended outputs to the esc PWM signal
#-----------------------------------------------------------------------------------
esc.set(int(round(pulse_width)))
'''
esc.set(stfu_pwm)
'''
#---------------------------------------------------------------------------------------
# Diagnostic log - every motion loop
#---------------------------------------------------------------------------------------
if diagnostics:
temp = mpu6050.readTemperature()
pwm_data = "%d, %d, %d, %d" % (self.esc_list[0].pulse_width,
self.esc_list[1].pulse_width,
self.esc_list[2].pulse_width,
self.esc_list[3].pulse_width) if i_am_zoe else "%d, %d, %d, %d, %d, %d, %d, %d" % (self.esc_list[0].pulse_width,
self.esc_list[1].pulse_width,
self.esc_list[2].pulse_width,
self.esc_list[3].pulse_width,
self.esc_list[4].pulse_width,
self.esc_list[5].pulse_width,
self.esc_list[6].pulse_width,
self.esc_list[7].pulse_width)
logger.warning("%f, %f, %d, " % (sampling_loops / sampling_rate, motion_dt, sampling_loops) +
"%f, " % (temp / 333.86 + 21) +
"%f, %f, %f, %f, " % (mgx, mgy, mgz, math.degrees(cya)) +
"%f, %f, %f, " % (edx_fuse, edy_fuse, edz_fuse) +
"%f, %f, %f, " % (evx_fuse, evy_fuse, evz_fuse) +
"%f, %f, %f, " % (edx_target, edy_target, edz_target) +
"%f, %f, %f, " % (evx_target, evy_target, evz_target) +
"%f, %f, %f, " % (qrx, qry, qrz) +
"%f, %f, %f, " % (qax, qay, qaz) +
"%f, %f, %f, " % (eax, eay, eaz) +
"%f, %f, %f, " % (qgx, qgy, qgz) +
"%f, %f, %f, " % (egx, egy, egz) +
"%f, %f, %f, %f, %f, " % (math.degrees(pa), math.degrees(ra), math.degrees(ya), math.degrees(cya), math.degrees(aya_fused)) +
# "%f, %f, %f, " % (qdx_input, qdy_input, qdz_input) +
# "%f, %f, %f, " % (qdx_target, qdy_target, qdz_target) +
# "%f, %f, %f, " % (qvx_input, qvy_input, qvz_input) +
# "%f, %f, %f, " % (qvx_target, qvy_target, qvz_target) +
# "%f, " % qvz_out +
# "%f, %f, %f, " % (math.degrees(apa), math.degrees(ara), math.degrees(aya)) +
# "%f, %f, %f, " % (math.degrees(pa_target), math.degrees(ra_target), math.degrees(ya_target) +
# "%f, %f, %f, " % (math.degrees(qry), math.degrees(qrx), math.degrees(qrz)) +
# "%f, %f, %f, " % (math.degrees(pr_target), math.degrees(rr_target), math.degrees(yr_target)) +
# "%f, %f, %f, " % (math.degrees(pr_out), math.degrees(rr_out), math.degrees(yr_out)) +
# "%f, %f, %f, %f, %f, %f, %f, %f, %d, " % (qdx_input, qdx_target, qvx_input, qvx_target, math.degrees(apa), math.degrees(pa_target), math.degrees(qry), math.degrees(pr_target), pr_out) +
# "%f, %f, %f, %f, %f, %f, %f, %f, %d, " % (qdy_input, qdy_target, qvy_input, qvy_target, math.degrees(ara), math.degrees(ra_target), math.degrees(qrx), math.degrees(rr_target), rr_out) +
# "%f, %f, %f, %f, %d, " % (qdz_input, qdz_target, qvz_input, qvz_target, qaz_out) +
"%f, %f, %f, %f, %d, " % (math.degrees(aya), math.degrees(ya_target), math.degrees(qrz), math.degrees(yr_target), yr_out) +
pwm_data)
logger.critical("Flight time %f", time.time() - start_flight)
logger.critical("Sampling loops: %d", sampling_loops)
logger.critical("Motion processing loops: %d", motion_loops)
logger.critical("Fusion processing loops: %d", fusion_loops)
logger.critical("Autopilot processing loops: %d.", autopilot_loops)
logger.critical("GLL processing loops: %d", gll_loops)
logger.critical("GLL missed: %d", gll_misses)
logger.critical("GLL DR Interrupts: %d.", gll_dr_interrupts)
if sampling_loops != 0:
logger.critical("Video frame rate: %f", video_loops * sampling_rate / sampling_loops )
temp = mpu6050.readTemperature()
logger.critical("IMU core temp (end): ,%f", temp / 333.86 + 21.0)
max_az, min_az, max_gx, min_gx, max_gy, min_gy, max_gz, min_gz, = mpu6050.getStats()
logger.critical("Max Z acceleration: %f", max_az)
logger.critical("Min Z acceleration: %f", min_az)
logger.critical("Max X gyrometer: %f", max_gx)
logger.critical("Min X gyrometer: %f", min_gx)
logger.critical("Max Y gyrometer: %f", max_gy)
logger.critical("Min Y gyrometer: %f", min_gy)
logger.critical("Max Z gyrometer: %f", max_gz)
logger.critical("Min Z gyrometer: %f", min_gz)
#-------------------------------------------------------------------------------------------
# Stop the PWM and FIFO overflow interrupt between flights
#-------------------------------------------------------------------------------------------
for esc in self.esc_list:
esc.set(stfu_pwm)
mpu6050.disableFIFOOverflowISR()
#-------------------------------------------------------------------------------------------
# Stop the buzzer.
#-------------------------------------------------------------------------------------------
GPIO.output(GPIO_BUZZER, GPIO.LOW)
#-------------------------------------------------------------------------------------------
# Unregister poll registrars
#-------------------------------------------------------------------------------------------
if self.autopilot_installed:
poll.unregister(autopilot_fd)
if self.camera_installed:
poll.unregister(video_fd)
if rc_control:
poll.unregister(rc_fd)
self.rc.disconnect()
#-------------------------------------------------------------------------------------------
# Stop the Camera process if it's still running, and clean up the FIFO.
#-------------------------------------------------------------------------------------------
if self.camera_installed:
print "Stopping video... ",
try:
if video_process.poll() == None:
video_process.send_signal(signal.SIGINT)
video_process.wait()
except KeyboardInterrupt as e:
pass
video_fifo.close()
os.unlink("/dev/shm/video_stream")
print "stopped."
#-------------------------------------------------------------------------------------------
# Stop the autopilot
#-------------------------------------------------------------------------------------------
if self.autopilot_installed:
print "Stopping autopilot... ",
app.cleanup()
print "stopped."
################################################################################################
#
# Shutdown triggered by early Ctrl-C or end of script
#
################################################################################################
def shutdown(self):
#-------------------------------------------------------------------------------------------
# Stop the signal handler
#-------------------------------------------------------------------------------------------
signal.signal(signal.SIGINT, signal.SIG_IGN)
#-------------------------------------------------------------------------------------------
# Stop the blades spinning
#-------------------------------------------------------------------------------------------
for esc in self.esc_list:
esc.set(stfu_pwm)
#-------------------------------------------------------------------------------------------
# Close stats logging file.
#-------------------------------------------------------------------------------------------
file_handler.close()
#-------------------------------------------------------------------------------------------
# Unlock memory we've used from RAM
#-------------------------------------------------------------------------------------------
munlockall()
#-------------------------------------------------------------------------------------------
# Clean up PWM / GPIO, but pause beforehand to give the ESCs time to stop properly
#-------------------------------------------------------------------------------------------
time.sleep(1.0)
PWMTerm()
#-------------------------------------------------------------------------------------------
# Clean up the GPIO FIFO Overflow ISR
#-------------------------------------------------------------------------------------------
GPIOTerm()
#-------------------------------------------------------------------------------------------
# Reset the signal handler to default
#-------------------------------------------------------------------------------------------
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(0)
####################################################################################################
#
# Signal handler for Ctrl-C => abort cleanly; should really be just a "try: except KeyboardInterrupt:"
#
####################################################################################################
def shutdownSignalHandler(self, signal, frame):
if not self.keep_looping:
self.shutdown()
self.keep_looping = False
####################################################################################################
#
# Interrupt Service Routine for FIFO overflow => abort flight cleanly - RETIRED, JUST POLL NOW
#
####################################################################################################
def fifoOverflowISR(self, pin):
if self.keep_looping:
print "FIFO OVERFLOW, ABORT"
self.keep_looping = False
####################################################################################################
# If we've been called directly, this is the spawned video, GPS, Sweep or autopilot process or a
# misinformed user trying to start the code.
####################################################################################################
if __name__ == '__main__':
if len(sys.argv) >= 2:
#-------------------------------------------------------------------------------------------
# Start the process recording video macro-blocks
#-------------------------------------------------------------------------------------------
if sys.argv[1] == "VIDEO":
assert (len(sys.argv) == 5), "Bad parameters for Video"
frame_width = int(sys.argv[2])
frame_height = int(sys.argv[3])
frame_rate = int(sys.argv[4])
VideoProcessor(frame_width, frame_height, frame_rate)
#-------------------------------------------------------------------------------------------
# Start the process recording GPS
#-------------------------------------------------------------------------------------------
elif sys.argv[1] == "GPS":
assert (len(sys.argv) == 2), "Bad parameters for GPS"
GPSProcessor()
#-------------------------------------------------------------------------------------------
# Start the process recording Sweep
#-------------------------------------------------------------------------------------------
elif sys.argv[1] == "SWEEP":
assert (len(sys.argv) == 2), "Bad parameters for Sweep"
SweepProcessor()
#-------------------------------------------------------------------------------------------
# Start the process recording Autopilot
#-------------------------------------------------------------------------------------------
elif sys.argv[1] == "AUTOPILOT":
assert (len(sys.argv) == 9), "Bad parameters for Autopilot"
sweep_installed = True if (sys.argv[2] == "True") else False
gps_installed = True if (sys.argv[3] == "True") else False
compass_installed = True if (sys.argv[4] == "True") else False
initial_orientation = float(sys.argv[5])
file_control = True if (sys.argv[6] == "True") else False
gps_control = True if (sys.argv[7] == "True") else False
fp_filename = sys.argv[8]
AutopilotProcessor(sweep_installed, gps_installed, compass_installed, initial_orientation, file_control, gps_control, fp_filename)
else:
assert (False), "Invalid process request."
else:
print "If you're trying to run me, use 'sudo python ./qc.py'"
| PiStuffing/Quadcopter | Quadcopter.py | Python | gpl-2.0 | 267,801 |
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
class MassMailController(http.Controller):
@http.route('/mail/track/<int:mail_id>/blank.gif', type='http', auth='none')
def track_mail_open(self, mail_id, **post):
""" Email tracking. """
mail_mail_stats = request.registry.get('mail.mail.statistics')
mail_mail_stats.set_opened(request.cr, SUPERUSER_ID, mail_mail_ids=[mail_id])
response = werkzeug.wrappers.Response()
response.mimetype = 'image/gif'
response.data = 'R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='.decode('base64')
return response
@http.route(['/mail/mailing/<int:mailing_id>/unsubscribe'], type='http', auth='none')
def mailing(self, mailing_id, email=None, res_id=None, **post):
cr, uid, context = request.cr, request.uid, request.context
MassMailing = request.registry['mail.mass_mailing']
mailing_ids = MassMailing.exists(cr, SUPERUSER_ID, [mailing_id], context=context)
if not mailing_ids:
return 'KO'
mailing = MassMailing.browse(cr, SUPERUSER_ID, mailing_ids[0], context=context)
if mailing.mailing_model == 'mail.mass_mailing.contact':
list_ids = [l.id for l in mailing.contact_list_ids]
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('list_id', 'in', list_ids), ('id', '=', res_id), ('email', 'ilike', email)], context=context)
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
else:
email_fname = None
if 'email_from' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email_from'
elif 'email' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email'
if email_fname:
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('id', '=', res_id), (email_fname, 'ilike', email)], context=context)
if 'opt_out' in request.registry[mailing.mailing_model]._all_columns:
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
return 'OK'
| 3dfxsoftware/cbss-addons | mass_mailing/controllers/main.py | Python | gpl-2.0 | 2,342 |
# -*- coding: utf-8 -*-
default_app_config = 'escolar.apps.EscolarConfig' | kaajavi/ninformes | escolar/__init__.py | Python | gpl-2.0 | 73 |
#
# Copyright 2006-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""XLIFF classes specifically suited for handling the PO representation in
XLIFF.
This way the API supports plurals as if it was a PO file, for example.
"""
import re
from lxml import etree
from translate.misc.multistring import multistring
from translate.misc.xml_helpers import setXMLspace
from translate.storage import base, lisa, poheader, xliff
from translate.storage.placeables import general
def hasplurals(thing):
if not isinstance(thing, multistring):
return False
return len(thing.strings) > 1
class PoXliffUnit(xliff.xliffunit):
"""A class to specifically handle the plural units created from a po file."""
rich_parsers = general.parsers
def __init__(self, source=None, empty=False, **kwargs):
self._rich_source = None
self._rich_target = None
self._state_n = 0
self.units = []
if empty:
return
if not hasplurals(source):
super().__init__(source)
return
self.xmlelement = etree.Element(self.namespaced("group"))
self.xmlelement.set("restype", "x-gettext-plurals")
self.source = source
def __eq__(self, other):
if isinstance(other, PoXliffUnit):
if len(self.units) != len(other.units):
return False
if not super().__eq__(other):
return False
for i in range(len(self.units) - 1):
if not self.units[i + 1] == other.units[i + 1]:
return False
return True
if len(self.units) <= 1:
if isinstance(other, lisa.LISAunit):
return super().__eq__(other)
else:
return self.source == other.source and self.target == other.target
return False
# XXX: We don't return language nodes correctly at the moment
# def getlanguageNodes(self):
# if not self.hasplural():
# return super().getlanguageNodes()
# else:
# return self.units[0].getlanguageNodes()
@property
def source(self):
if not self.hasplural():
return super().source
return multistring([unit.source for unit in self.units])
@source.setter
def source(self, source):
self.setsource(source, sourcelang="en")
def setsource(self, source, sourcelang="en"):
# TODO: consider changing from plural to singular, etc.
self._rich_source = None
if not hasplurals(source):
super().setsource(source, sourcelang)
else:
target = self.target
for unit in self.units:
try:
self.xmlelement.remove(unit.xmlelement)
except ValueError:
pass
self.units = []
for s in source.strings:
newunit = xliff.xliffunit(s)
# newunit.namespace = self.namespace #XXX?necessary?
self.units.append(newunit)
self.xmlelement.append(newunit.xmlelement)
self.target = target
# We don't support any rich strings yet
multistring_to_rich = base.TranslationUnit.multistring_to_rich
rich_to_multistring = base.TranslationUnit.rich_to_multistring
rich_source = base.TranslationUnit.rich_source
rich_target = base.TranslationUnit.rich_target
def gettarget(self, lang=None):
if self.hasplural():
strings = [unit.target for unit in self.units]
if strings:
return multistring(strings)
else:
return None
else:
return super().gettarget(lang)
def settarget(self, target, lang="xx", append=False):
self._rich_target = None
if self.target == target:
return
if not self.hasplural():
super().settarget(target, lang, append)
return
if not isinstance(target, multistring):
target = multistring(target)
source = self.source
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * (targetl - sourcel)
targets = target.strings
id = self.getid()
self.source = multistring(sources)
self.setid(id)
elif targetl < sourcel:
targets = target.strings + [""] * (sourcel - targetl)
else:
targets = target.strings
for i in range(len(self.units)):
self.units[i].target = targets[i]
def addnote(self, text, origin=None, position="append"):
"""Add a note specifically in a "note" tag"""
note = etree.SubElement(self.xmlelement, self.namespaced("note"))
note.text = text
if origin:
note.set("from", origin)
for unit in self.units[1:]:
unit.addnote(text, origin)
def getnotes(self, origin=None):
# NOTE: We support both <context> and <note> tags in xliff files for comments
if origin == "translator":
notes = super().getnotes("translator")
trancomments = self.gettranslatorcomments()
if notes == trancomments or trancomments.find(notes) >= 0:
notes = ""
elif notes.find(trancomments) >= 0:
trancomments = notes
notes = ""
return trancomments + notes
elif origin in ["programmer", "developer", "source code"]:
devcomments = super().getnotes("developer")
autocomments = self.getautomaticcomments()
if devcomments == autocomments or autocomments.find(devcomments) >= 0:
devcomments = ""
elif devcomments.find(autocomments) >= 0:
autocomments = devcomments
devcomments = ""
return autocomments
else:
return super().getnotes(origin)
def markfuzzy(self, value=True):
super().markfuzzy(value)
for unit in self.units[1:]:
unit.markfuzzy(value)
def marktranslated(self):
super().marktranslated()
for unit in self.units[1:]:
unit.marktranslated()
def setid(self, id):
super().setid(id)
if len(self.units) > 1:
for i in range(len(self.units)):
self.units[i].setid("%s[%d]" % (id, i))
def getlocations(self):
"""Returns all the references (source locations)"""
groups = self.getcontextgroups("po-reference")
references = []
for group in groups:
sourcefile = ""
linenumber = ""
for (type, text) in group:
if type == "sourcefile":
sourcefile = text
elif type == "linenumber":
linenumber = text
assert sourcefile
if linenumber:
sourcefile = sourcefile + ":" + linenumber
references.append(sourcefile)
return references
def getautomaticcomments(self):
"""Returns the automatic comments (x-po-autocomment), which corresponds
to the #. style po comments.
"""
def hasautocomment(grp):
return grp[0] == "x-po-autocomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hasautocomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def gettranslatorcomments(self):
"""Returns the translator comments (x-po-trancomment), which
corresponds to the # style po comments.
"""
def hastrancomment(grp):
return grp[0] == "x-po-trancomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hastrancomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def isheader(self):
return "gettext-domain-header" in (self.getrestype() or "")
def istranslatable(self):
return super().istranslatable() and not self.isheader()
@classmethod
def createfromxmlElement(cls, element, namespace=None):
if element.tag.endswith("trans-unit"):
object = cls(None, empty=True)
object.xmlelement = element
object.namespace = namespace
return object
assert element.tag.endswith("group")
group = cls(None, empty=True)
group.xmlelement = element
group.namespace = namespace
units = list(element.iterdescendants(group.namespaced("trans-unit")))
for unit in units:
subunit = xliff.xliffunit.createfromxmlElement(unit)
subunit.namespace = namespace
group.units.append(subunit)
return group
def hasplural(self):
return self.xmlelement.tag == self.namespaced("group")
class PoXliffFile(xliff.xlifffile, poheader.poheader):
"""a file for the po variant of Xliff files"""
UnitClass = PoXliffUnit
def __init__(self, *args, **kwargs):
if "sourcelanguage" not in kwargs:
kwargs["sourcelanguage"] = "en-US"
xliff.xlifffile.__init__(self, *args, **kwargs)
def createfilenode(self, filename, sourcelanguage="en-US", datatype="po"):
# Let's ignore the sourcelanguage parameter opting for the internal
# one. PO files will probably be one language
return super().createfilenode(
filename, sourcelanguage=self.sourcelanguage, datatype="po"
)
def _insert_header(self, header):
header.xmlelement.set("restype", "x-gettext-domain-header")
header.xmlelement.set("approved", "no")
setXMLspace(header.xmlelement, "preserve")
self.addunit(header)
def addheaderunit(self, target, filename):
unit = self.addsourceunit(target, filename, True)
unit.target = target
unit.xmlelement.set("restype", "x-gettext-domain-header")
unit.xmlelement.set("approved", "no")
setXMLspace(unit.xmlelement, "preserve")
return unit
def addplural(self, source, target, filename, createifmissing=False):
"""This method should now be unnecessary, but is left for reference"""
assert isinstance(source, multistring)
if not isinstance(target, multistring):
target = multistring(target)
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * targetl - sourcel
targets = target.strings
else:
sources = source.strings
targets = target.strings
self._messagenum += 1
pluralnum = 0
group = self.creategroup(filename, True, restype="x-gettext-plural")
for (src, tgt) in zip(sources, targets):
unit = self.UnitClass(src)
unit.target = tgt
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
if pluralnum < sourcel:
for string in sources[pluralnum:]:
unit = self.UnitClass(src)
unit.xmlelement.set("translate", "no")
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
return self.units[-pluralnum]
def parse(self, xml):
"""Populates this object from the given xml string"""
# TODO: Make more robust
def ispluralgroup(node):
"""determines whether the xml node refers to a getttext plural"""
return node.get("restype") == "x-gettext-plurals"
def isnonpluralunit(node):
"""determindes whether the xml node contains a plural like id.
We want to filter out all the plural nodes, except the very first
one in each group.
"""
return re.match(r".+\[[123456]\]$", node.get("id") or "") is None
def pluralunits(pluralgroups):
for pluralgroup in pluralgroups:
yield self.UnitClass.createfromxmlElement(
pluralgroup, namespace=self.namespace
)
self.filename = getattr(xml, "name", "")
if hasattr(xml, "read"):
xml.seek(0)
xmlsrc = xml.read()
xml = xmlsrc
parser = etree.XMLParser(resolve_entities=False)
self.document = etree.fromstring(xml, parser).getroottree()
self.initbody()
root_node = self.document.getroot()
assert root_node.tag == self.namespaced(self.rootNode)
groups = root_node.iterdescendants(self.namespaced("group"))
pluralgroups = filter(ispluralgroup, groups)
termEntries = root_node.iterdescendants(
self.namespaced(self.UnitClass.rootNode)
)
singularunits = list(filter(isnonpluralunit, termEntries))
if len(singularunits) == 0:
return
pluralunit_iter = pluralunits(pluralgroups)
nextplural = next(pluralunit_iter, None)
for entry in singularunits:
term = self.UnitClass.createfromxmlElement(entry, namespace=self.namespace)
if nextplural and str(term.getid()) == ("%s[0]" % nextplural.getid()):
self.addunit(nextplural, new=False)
nextplural = next(pluralunit_iter, None)
else:
self.addunit(term, new=False)
| miurahr/translate | translate/storage/poxliff.py | Python | gpl-2.0 | 14,579 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017-2022 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import os
import sys
import unittest
from unittest import mock
import time
import logging
import tempfile
from os.path import join
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from gallery_dl import config, extractor # noqa E402
class TestCookiejar(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.path = tempfile.TemporaryDirectory()
cls.cookiefile = join(cls.path.name, "cookies.txt")
with open(cls.cookiefile, "w") as file:
file.write("""# HTTP Cookie File
.example.org\tTRUE\t/\tFALSE\t253402210800\tNAME\tVALUE
""")
cls.invalid_cookiefile = join(cls.path.name, "invalid.txt")
with open(cls.invalid_cookiefile, "w") as file:
file.write("""# asd
.example.org\tTRUE/FALSE\t253402210800\tNAME\tVALUE
""")
@classmethod
def tearDownClass(cls):
cls.path.cleanup()
config.clear()
def test_cookiefile(self):
config.set((), "cookies", self.cookiefile)
cookies = extractor.find("test:").session.cookies
self.assertEqual(len(cookies), 1)
cookie = next(iter(cookies))
self.assertEqual(cookie.domain, ".example.org")
self.assertEqual(cookie.path , "/")
self.assertEqual(cookie.name , "NAME")
self.assertEqual(cookie.value , "VALUE")
def test_invalid_cookiefile(self):
self._test_warning(self.invalid_cookiefile, ValueError)
def test_invalid_filename(self):
self._test_warning(join(self.path.name, "nothing"), FileNotFoundError)
def _test_warning(self, filename, exc):
config.set((), "cookies", filename)
log = logging.getLogger("test")
with mock.patch.object(log, "warning") as mock_warning:
cookies = extractor.find("test:").session.cookies
self.assertEqual(len(cookies), 0)
self.assertEqual(mock_warning.call_count, 1)
self.assertEqual(mock_warning.call_args[0][0], "cookies: %s")
self.assertIsInstance(mock_warning.call_args[0][1], exc)
class TestCookiedict(unittest.TestCase):
def setUp(self):
self.cdict = {"NAME1": "VALUE1", "NAME2": "VALUE2"}
config.set((), "cookies", self.cdict)
def tearDown(self):
config.clear()
def test_dict(self):
cookies = extractor.find("test:").session.cookies
self.assertEqual(len(cookies), len(self.cdict))
self.assertEqual(sorted(cookies.keys()), sorted(self.cdict.keys()))
self.assertEqual(sorted(cookies.values()), sorted(self.cdict.values()))
def test_domain(self):
for category in ["exhentai", "idolcomplex", "nijie", "seiga"]:
extr = _get_extractor(category)
cookies = extr.session.cookies
for key in self.cdict:
self.assertTrue(key in cookies)
for c in cookies:
self.assertEqual(c.domain, extr.cookiedomain)
class TestCookieLogin(unittest.TestCase):
def tearDown(self):
config.clear()
def test_cookie_login(self):
extr_cookies = {
"exhentai" : ("ipb_member_id", "ipb_pass_hash"),
"idolcomplex": ("login", "pass_hash"),
"nijie" : ("nemail", "nlogin"),
"seiga" : ("user_session",),
}
for category, cookienames in extr_cookies.items():
cookies = {name: "value" for name in cookienames}
config.set((), "cookies", cookies)
extr = _get_extractor(category)
with mock.patch.object(extr, "_login_impl") as mock_login:
extr.login()
mock_login.assert_not_called()
class TestCookieUtils(unittest.TestCase):
def test_check_cookies(self):
extr = extractor.find("test:")
self.assertFalse(extr._cookiejar, "empty")
self.assertFalse(extr.cookiedomain, "empty")
# always returns False when checking for empty cookie list
self.assertFalse(extr._check_cookies(()))
self.assertFalse(extr._check_cookies(("a",)))
self.assertFalse(extr._check_cookies(("a", "b")))
self.assertFalse(extr._check_cookies(("a", "b", "c")))
extr._cookiejar.set("a", "1")
self.assertTrue(extr._check_cookies(("a",)))
self.assertFalse(extr._check_cookies(("a", "b")))
self.assertFalse(extr._check_cookies(("a", "b", "c")))
extr._cookiejar.set("b", "2")
self.assertTrue(extr._check_cookies(("a",)))
self.assertTrue(extr._check_cookies(("a", "b")))
self.assertFalse(extr._check_cookies(("a", "b", "c")))
def test_check_cookies_domain(self):
extr = extractor.find("test:")
self.assertFalse(extr._cookiejar, "empty")
extr.cookiedomain = ".example.org"
self.assertFalse(extr._check_cookies(("a",)))
self.assertFalse(extr._check_cookies(("a", "b")))
extr._cookiejar.set("a", "1")
self.assertFalse(extr._check_cookies(("a",)))
extr._cookiejar.set("a", "1", domain=extr.cookiedomain)
self.assertTrue(extr._check_cookies(("a",)))
extr._cookiejar.set("a", "1", domain="www" + extr.cookiedomain)
self.assertEqual(len(extr._cookiejar), 3)
self.assertTrue(extr._check_cookies(("a",)))
extr._cookiejar.set("b", "2", domain=extr.cookiedomain)
extr._cookiejar.set("c", "3", domain=extr.cookiedomain)
self.assertTrue(extr._check_cookies(("a", "b", "c")))
def test_check_cookies_expires(self):
extr = extractor.find("test:")
self.assertFalse(extr._cookiejar, "empty")
self.assertFalse(extr.cookiedomain, "empty")
now = int(time.time())
log = logging.getLogger("test")
extr._cookiejar.set("a", "1", expires=now-100)
with mock.patch.object(log, "warning") as mw:
self.assertFalse(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 1)
self.assertEqual(mw.call_args[0], ("Cookie '%s' has expired", "a"))
extr._cookiejar.set("a", "1", expires=now+100)
with mock.patch.object(log, "warning") as mw:
self.assertFalse(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 1)
self.assertEqual(mw.call_args[0], (
"Cookie '%s' will expire in less than %s hour%s", "a", 1, ""))
extr._cookiejar.set("a", "1", expires=now+100+7200)
with mock.patch.object(log, "warning") as mw:
self.assertFalse(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 1)
self.assertEqual(mw.call_args[0], (
"Cookie '%s' will expire in less than %s hour%s", "a", 3, "s"))
extr._cookiejar.set("a", "1", expires=now+100+24*3600)
with mock.patch.object(log, "warning") as mw:
self.assertTrue(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 0)
def _get_extractor(category):
for extr in extractor.extractors():
if extr.category == category and hasattr(extr, "_login_impl"):
url = next(extr._get_tests())[0]
return extr.from_url(url)
if __name__ == "__main__":
unittest.main()
| mikf/gallery-dl | test/test_cookies.py | Python | gpl-2.0 | 7,492 |
from os import path
try:
from lib.settings_build import Configure
except ImportError:
import sys
from os.path import expanduser, join
sys.path.append(join(expanduser("~"), 'workspace/automation/launchy'))
from lib.settings_build import Configure
class Default(Configure):
def __init__(self):
self.beta = False
self.local = False
self.project = 'nedcompost'
self.php = True
self.database_name = self.project
self.database_user = self.project
self.path_project_root = path.join('/mnt', self.project)
self.setDefaults()
if getattr(self, 'host', False):
self.setHost()
class Local(Default):
def __init__(self):
self.beta = True
self.local = True
self.database_root_password = 'password'
super(Local, self).__init__()
class Production(Default):
def __init__(self):
self.host = ['aws-php-3', ]
self.domain = 'nedcompost.org'
self.database_root_password = 'password'
# self.database_password = 'iNcJ%kx87[M>L:!6pkY$fXZIu'
self.database_password = 'zHR-mp)@ZZydJ=s9R}*S+4,!a'
super(Production, self).__init__()
class Beta(Default):
def __init__(self):
self.beta = True
self.host = ['aws-php-3', ]
self.domain = 'nedcompost.mitesdesign.com'
self.database_root_password = 'password'
self.database_password = 'zHR-mp)@ZZydJ=s9R}*S+4,!a'
super(Beta, self).__init__()
try:
from local_settings import *
except ImportError:
pass | amites/nedcompost_wordpress | fabsettings.py | Python | gpl-2.0 | 1,613 |
from numpy import *
from matplotlib.pyplot import *
import scipy.constants as sc
import copy
import scipy.integrate as integ
# test sun/earth with hw5(1.989e30,5.972e24,149.6e6,0.0167,1000)
def hw5(m1, m2, a, e, tmax, tstep=0.001, tplot=0.025, method='leapfrog'):
if method != 'leapfrog' and method != 'odeint':
print("That's not a method")
return()
# initialize commonly used variables
period = sqrt((4*(pi**2)*(a**3)) / (sc.G*(m1 + m2)))
dt = period*tstep
# initialize objects at time 0
q = m1 / m2
r0 = (1-e)*a/(1+q)
v0 = (1/(1+q))*sqrt((1+e)/(1-e))*sqrt(sc.G*(m1+m2)/a)
rv = array([r0, 0, 0, v0, -q*r0, 0, 0, -q*v0])
# set up figure
figure(1)
gca().set_aspect('equal')
xlim([-2*a, 2*a])
ylim([-2*a, 2*a])
rv_list = []
if method == 'leapfrog':
timeCounter = 0
frameCounter = 0
while timeCounter < tmax:
# plot positions if tplot time has passed
if frameCounter >= tplot:
frameCounter = 0
rv_list.append(copy.deepcopy(rv))
# calc positions
rv[0] = rv[0] + rv[2]*dt
rv[1] = rv[1] + rv[3]*dt
rv[4] = rv[4] + rv[6]*dt
rv[5] = rv[5] + rv[7]*dt
# calc acceleration
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
# calc velocity
rv[2] = rv[2] - (force[0]/m1)*dt
rv[3] = rv[3] - (force[1]/m1)*dt
rv[6] = rv[6] + (force[0]/m2)*dt
rv[7] = rv[7] + (force[1]/m2)*dt
# increment counters
timeCounter += tstep
frameCounter += tstep
# plot final position
rv_list.append(copy.deepcopy(rv))
rv_list_plot = rv_list
else:
# odeint
rv_list = integ.odeint(deriv, rv, arange(0, tmax*period, dt), (m1, m2))
# needed to calculate using tstep, but we want to plot
# using tplot,
t_interval = tplot / tstep
rv_list_plot = rv_list[::t_interval]
# plot
for i in range(len(rv_list_plot)):
plot(rv_list_plot[i][0],rv_list_plot[i][1],'bo')
plot(rv_list_plot[i][4],rv_list_plot[i][5],'go')
draw()
def deriv(rv, dt, m1, m2):
# calc position deriv
rv_copy = zeros(8)
rv_copy[0] = rv[2]
rv_copy[1] = rv[3]
rv_copy[4] = rv[6]
rv_copy[5] = rv[7]
# calc velocity deriv
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
rv_copy[2] = - (force[0]/m1)
rv_copy[3] = - (force[1]/m1)
rv_copy[6] = + (force[0]/m2)
rv_copy[7] = + (force[1]/m2)
return rv_copy
| smorad/ast119 | hw5.py | Python | gpl-2.0 | 2,857 |
from django import forms
from getresults_aliquot.models import Aliquot
from .models import Order
class OrderForm(forms.ModelForm):
def clean_aliquot_identifier(self):
aliquot_identifier = self.cleaned_data.get('aliquot_identifier')
try:
Aliquot.objects.get(aliquot_identifier=aliquot_identifier)
except Aliquot.DoesNotExist:
raise forms.ValidationError('Invalid Aliquot Identifier. Got {}'.format(aliquot_identifier))
class Meta:
model = Order
| botswana-harvard/getresults-order | getresults_order/forms.py | Python | gpl-2.0 | 514 |
#
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import tempfile
import os
import storage.fileUtils as fileUtils
import testValidation
from testrunner import VdsmTestCase as TestCaseBase
class DirectFileTests(TestCaseBase):
@classmethod
def getConfigTemplate(cls):
return {}
def testRead(self):
data = """Vestibulum. Libero leo nostra, pede nunc eu. Pellentesque
platea lacus morbi nisl montes ve. Ac. A, consectetuer erat, justo eu.
Elementum et, phasellus fames et rutrum donec magnis eu bibendum. Arcu,
ante aliquam ipsum ut facilisis ad."""
srcFd, srcPath = tempfile.mkstemp()
f = os.fdopen(srcFd, "wb")
f.write(data)
f.flush()
f.close()
with fileUtils.open_ex(srcPath, "dr") as f:
self.assertEquals(f.read(), data)
os.unlink(srcPath)
def testSeekRead(self):
data = """
Habitasse ipsum at fusce litora metus, placerat dui purus aenean ante,
ve. Pede hymenaeos ut primis cum, rhoncus, lectus, nunc. Vestibulum
curabitur vitae etiam magna auctor velit, mi tempus vivamus orci eros.
Pellentesque curabitur risus fermentum eget. Elementum curae, donec
nisl egestas ve, ut odio eu nunc elit felis primis id. Ridiculus metus
morbi nulla erat, amet nisi. Amet ligula nisi, id penatibus risus in.
Purus velit duis. Aenean eget, pellentesque eu rhoncus arcu et
consectetuer laoreet, augue nisi dictum lacinia urna. Fermentum
torquent. Ut interdum vivamus duis. Felis consequat nec pede. Orci
sollicitudin parturient orci felis. Enim, diam velit sapien
condimentum fames semper nibh. Integer at, egestas pede consectetuer
ac augue pharetra dolor non placerat quisque id cursus ultricies.
Ligula mi senectus sit. Habitasse. Integer sollicitudin dapibus cum
quam.
"""
self.assertTrue(len(data) > 512)
srcFd, srcPath = tempfile.mkstemp()
f = os.fdopen(srcFd, "wb")
f.write(data)
f.flush()
f.close()
with fileUtils.open_ex(srcPath, "dr") as f:
f.seek(512)
self.assertEquals(f.read(), data[512:])
os.unlink(srcPath)
def testWrite(self):
data = """In ut non platea egestas, quisque magnis nunc nostra ac etiam
suscipit nec integer sociosqu. Fermentum. Ante orci luctus, ipsum
ullamcorper enim arcu class neque inceptos class. Ut, sagittis
torquent, commodo facilisi."""
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
with fileUtils.open_ex(srcPath, "dw") as f:
f.write(data)
with fileUtils.open_ex(srcPath, "r") as f:
self.assertEquals(f.read(len(data)), data)
os.unlink(srcPath)
def testSmallWrites(self):
data = """
Aliquet habitasse tellus. Fringilla faucibus tortor parturient
consectetuer sodales, venenatis platea habitant. Hendrerit nostra nunc
odio. Primis porttitor consequat enim ridiculus. Taciti nascetur,
nibh, convallis sit, cum dis mi. Nonummy justo odio cursus, ac hac
curabitur nibh. Tellus. Montes, ut taciti orci ridiculus facilisis
nunc. Donec. Risus adipiscing habitant donec vehicula non vitae class,
porta vitae senectus. Nascetur felis laoreet integer, tortor ligula.
Pellentesque vestibulum cras nostra. Ut sollicitudin posuere, per
accumsan curabitur id, nisi fermentum vel, eget netus tristique per,
donec, curabitur senectus ut fusce. A. Mauris fringilla senectus et
eni facilisis magna inceptos eu, cursus habitant fringilla neque.
Nibh. Elit facilisis sed, elit, nostra ve torquent dictumst, aenean
sapien quam, habitasse in. Eu tempus aptent, diam, nisi risus
pharetra, ac, condimentum orci, consequat mollis. Cras lacus augue
ultrices proin fermentum nibh sed urna. Ve ipsum ultrices curae,
feugiat faucibus proin et elementum vivamus, lectus. Torquent. Tempus
facilisi. Cras suspendisse euismod consectetuer ornare nostra. Fusce
amet cum amet diam.
"""
self.assertTrue(len(data) > 512)
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
with fileUtils.open_ex(srcPath, "dw") as f:
f.write(data[:512])
f.write(data[512:])
with fileUtils.open_ex(srcPath, "r") as f:
self.assertEquals(f.read(len(data)), data)
os.unlink(srcPath)
def testUpdateRead(self):
data = """
Aliquet. Aliquam eni ac nullam iaculis cras ante, adipiscing. Enim
eget egestas pretium. Ultricies. Urna cubilia in, hac. Curabitur.
Nibh. Purus ridiculus natoque sed id. Feugiat lacus quam, arcu
maecenas nec egestas. Hendrerit duis nunc eget dis lacus porttitor per
sodales class diam condimentum quisque condimentum nisi ligula.
Dapibus blandit arcu nam non ac feugiat diam, dictumst. Ante eget
fames eu penatibus in, porta semper accumsan adipiscing tellus in
sagittis. Est parturient parturient mi fermentum commodo, per
fermentum. Quis duis velit at quam risus mi. Facilisi id fames.
Turpis, conubia rhoncus. Id. Elit eni tellus gravida, ut, erat morbi.
Euismod, enim a ante vestibulum nibh. Curae curae primis vulputate
adipiscing arcu ipsum suspendisse quam hymenaeos primis accumsan
vestibulum.
"""
self.assertTrue(len(data) > 512)
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
with fileUtils.open_ex(srcPath, "wd") as f:
f.write(data[:512])
with fileUtils.open_ex(srcPath, "r+d") as f:
f.seek(512)
f.write(data[512:])
with fileUtils.open_ex(srcPath, "r") as f:
self.assertEquals(f.read(len(data)), data)
os.unlink(srcPath)
class ChownTests(TestCaseBase):
@testValidation.ValidateRunningAsRoot
def test(self):
targetId = 666
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
fileUtils.chown(srcPath, targetId, targetId)
stat = os.stat(srcPath)
self.assertTrue(stat.st_uid == stat.st_gid == targetId)
os.unlink(srcPath)
@testValidation.ValidateRunningAsRoot
def testNames(self):
# I convert to some id because I have no
# idea what users are defined and what
# there IDs are apart from root
tmpId = 666
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
fileUtils.chown(srcPath, tmpId, tmpId)
stat = os.stat(srcPath)
self.assertTrue(stat.st_uid == stat.st_gid == tmpId)
fileUtils.chown(srcPath, "root", "root")
stat = os.stat(srcPath)
self.assertTrue(stat.st_uid == stat.st_gid == 0)
class CopyUserModeToGroupTests(TestCaseBase):
MODE_MASK = 0777
# format: initialMode, expectedMode
modesList = [
(0770, 0770), (0700, 0770), (0750, 0770), (0650, 0660),
]
def testCopyUserModeToGroup(self):
fd, path = tempfile.mkstemp()
try:
os.close(fd)
for initialMode, expectedMode in self.modesList:
os.chmod(path, initialMode)
fileUtils.copyUserModeToGroup(path)
self.assertEquals(os.stat(path).st_mode & self.MODE_MASK,
expectedMode)
finally:
os.unlink(path)
| edwardbadboy/vdsm-ubuntu | tests/fileUtilTests.py | Python | gpl-2.0 | 8,296 |
try:
from django.conf.urls import url, patterns
except ImportError:
from django.conf.urls.defaults import url, patterns
from actstream import feeds
from actstream import views
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('actstream.views',
# Syndication Feeds
url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/atom/$',
feeds.AtomObjectActivityFeed(), name='actstream_object_feed_atom'),
url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
feeds.ObjectActivityFeed(), name='actstream_object_feed'),
url(r'^feed/(?P<content_type_id>\d+)/atom/$',
feeds.AtomModelActivityFeed(), name='actstream_model_feed_atom'),
url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/as/$',
feeds.ActivityStreamsObjectActivityFeed(),
name='actstream_object_feed_as'),
url(r'^feed/(?P<content_type_id>\d+)/$',
feeds.ModelActivityFeed(), name='actstream_model_feed'),
url(r'^feed/$', feeds.UserActivityFeed(), name='actstream_feed'),
url(r'^feed/atom/$', feeds.AtomUserActivityFeed(),
name='actstream_feed_atom'),
# Follow/Unfollow API
url(r'^follow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'follow_unfollow', name='actstream_follow'),
url(r'^follow_all/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'follow_unfollow', {'actor_only': False}, name='actstream_follow_all'),
url(r'^unfollow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'follow_unfollow', {'do_follow': False}, name='actstream_unfollow'),
# Follower and Actor lists
url(r'^followers/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'followers', name='actstream_followers'),
url(r'^actors/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'actor', name='actstream_actor'),
url(r'^actors/(?P<content_type_id>\d+)/$',
'model', name='actstream_model'),
url(r'^new_wall_post/$', view=login_required (views.new_wall_post), name='new_wall_post'),
url(r'^detail/(?P<action_id>\d+)/$', view=login_required(views.detail), name='actstream_detail'),
url(r'^(?P<username>[-\w]+)/$', view=login_required (views.user), name='actstream_user'),
url(r'^$', view=login_required (views.stream), name='actstream'),
url(r'^new_group_post', view=login_required (views.new_group_post), name='new_group_post'),
) | meletakis/collato | esn/actstream/urls.py | Python | gpl-2.0 | 2,399 |
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print line
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right here"])
bulls_on_parade = Song(["They rally around tha family",
"With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
| mshcruz/LearnPythonTheHardWay | ex40.py | Python | gpl-2.0 | 484 |
from __future__ import absolute_import
import os
import sys
import shutil
import unittest
import xml.dom.minidom
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
from pcs_test_functions import pcs,ac
import rule
empty_cib = "empty.xml"
temp_cib = "temp.xml"
class DateValueTest(unittest.TestCase):
def testParse(self):
for value, item in enumerate(rule.DateCommonValue.allowed_items, 1):
self.assertEquals(
str(value),
rule.DateCommonValue("%s=%s" % (item, value)).parts[item]
)
value = rule.DateCommonValue(
"hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 weeks=6 "
"years=7 weekyears=8 moon=9"
)
self.assertEquals("1", value.parts["hours"])
self.assertEquals("2", value.parts["monthdays"])
self.assertEquals("3", value.parts["weekdays"])
self.assertEquals("4", value.parts["yeardays"])
self.assertEquals("5", value.parts["months"])
self.assertEquals("6", value.parts["weeks"])
self.assertEquals("7", value.parts["years"])
self.assertEquals("8", value.parts["weekyears"])
self.assertEquals("9", value.parts["moon"])
value = rule.DateCommonValue("hours=1 monthdays=2 hours=3")
self.assertEquals("2", value.parts["monthdays"])
self.assertEquals("3", value.parts["hours"])
value = rule.DateCommonValue(" hours=1 monthdays=2 hours=3 ")
self.assertEquals("2", value.parts["monthdays"])
self.assertEquals("3", value.parts["hours"])
self.assertSyntaxError(
"missing one of 'hours=', 'monthdays=', 'weekdays=', 'yeardays=', "
"'months=', 'weeks=', 'years=', 'weekyears=', 'moon=' in date-spec",
"",
rule.DateSpecValue
)
self.assertSyntaxError(
"missing value after 'hours=' in date-spec",
"hours=",
rule.DateSpecValue
)
self.assertSyntaxError(
"missing =value after 'hours' in date-spec",
"hours",
rule.DateSpecValue
)
self.assertSyntaxError(
"unexpected 'foo=bar' in date-spec",
"foo=bar",
rule.DateSpecValue
)
self.assertSyntaxError(
"unexpected 'foo=bar' in date-spec",
"hours=1 foo=bar",
rule.DateSpecValue
)
def testDurationValidate(self):
for value, item in enumerate(rule.DateCommonValue.allowed_items, 1):
self.assertEquals(
str(value),
rule.DateDurationValue("%s=%s" % (item, value)).parts[item]
)
for item in rule.DateCommonValue.allowed_items:
self.assertSyntaxError(
"invalid %s '%s' in 'duration'" % (item, "foo"),
"%s=foo" % item,
rule.DateDurationValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'duration'" % (item, "-1"),
"%s=-1" % item,
rule.DateDurationValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'duration'" % (item, "2foo"),
"%s=2foo" % item,
rule.DateDurationValue
)
def testDateSpecValidation(self):
for item in rule.DateCommonValue.allowed_items:
value = 1
self.assertEquals(
str(value),
rule.DateSpecValue("%s=%s" % (item, value)).parts[item]
)
self.assertEquals(
"%s-%s" % (value, value + 1),
rule.DateSpecValue(
"%s=%s-%s" % (item, value, value + 1)
).parts[item]
)
self.assertEquals(
"hours=9-16 weekdays=1-5",
str(rule.DateSpecValue("hours=9-16 weekdays=1-5"))
)
for item in rule.DateCommonValue.allowed_items:
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "foo"),
"%s=foo" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "1-foo"),
"%s=1-foo" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "foo-1"),
"%s=foo-1" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "1-2-3"),
"%s=1-2-3" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "2-1"),
"%s=2-1" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid hours '24' in 'date-spec'",
"hours=24",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid monthdays '32' in 'date-spec'",
"monthdays=32",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid weekdays '8' in 'date-spec'",
"weekdays=8",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid yeardays '367' in 'date-spec'",
"yeardays=367",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid months '13' in 'date-spec'",
"months=13",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid weeks '54' in 'date-spec'",
"weeks=54",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid weekyears '54' in 'date-spec'",
"weekyears=54",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid moon '8' in 'date-spec'",
"moon=8",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid hours '12-8' in 'date-spec'",
"hours=12-8",
rule.DateSpecValue
)
def assertSyntaxError(self, syntax_error, parts_string, value_class=None):
value_class = value_class if value_class else rule.DateCommonValue
self.assertRaises(rule.SyntaxError, value_class, parts_string)
try:
value_class(parts_string)
except rule.SyntaxError as e:
self.assertEquals(syntax_error, str(e))
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = rule.RuleParser()
def testEmptyInput(self):
self.assertRaises(rule.UnexpectedEndOfInput, self.parser.parse, [])
def testSingleLiteral(self):
self.assertSyntaxError(
"missing one of 'eq', 'ne', 'lt', 'gt', 'lte', 'gte', 'in_range', "
"'defined', 'not_defined', 'date-spec'",
["#uname"]
)
self.assertSyntaxError(
"missing one of 'eq', 'ne', 'lt', 'gt', 'lte', 'gte', 'in_range', "
"'defined', 'not_defined', 'date-spec'",
["string", "node1"]
)
def testSingleLiteralDatespec(self):
self.assertEquals(
"(date-spec (literal hours=1))",
str(self.parser.parse(["date-spec", "hours=1"]))
)
self.assertEquals(
"(date-spec (literal hours=1-14 months=1 monthdays=20-30))",
str(self.parser.parse([
"date-spec", "hours=1-14 months=1 monthdays=20-30"
]))
)
self.assertUnexpectedEndOfInput(["date-spec"])
def testSimpleExpression(self):
self.assertEquals(
"(eq (literal #uname) (literal node1))",
str(self.parser.parse(["#uname", "eq", "node1"]))
)
self.assertEquals(
"(ne (literal #uname) (literal node2))",
str(self.parser.parse(["#uname", "ne", "node2"]))
)
self.assertEquals(
"(gt (literal int) (literal 123))",
str(self.parser.parse(["int", "gt", "123"]))
)
self.assertEquals(
"(gte (literal int) (literal 123))",
str(self.parser.parse(["int", "gte", "123"]))
)
self.assertEquals(
"(lt (literal int) (literal 123))",
str(self.parser.parse(["int", "lt", "123"]))
)
self.assertEquals(
"(lte (literal int) (literal 123))",
str(self.parser.parse(["int", "lte", "123"]))
)
def testSimpleExpressionBad(self):
self.assertSyntaxError(
"unexpected 'eq'",
["eq"]
)
self.assertUnexpectedEndOfInput(["#uname", "eq"])
self.assertSyntaxError(
"unexpected 'node1'",
["#uname", "node1"]
)
self.assertSyntaxError(
"unexpected 'eq'",
["eq", "#uname"]
)
self.assertSyntaxError(
"unexpected 'eq'",
["eq", "lt"]
)
self.assertSyntaxError(
"unexpected 'string' before 'eq'",
["string", "#uname", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'date-spec' before 'eq'",
["date-spec", "hours=1", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'date-spec' after 'eq'",
["#uname", "eq", "date-spec", "hours=1"]
)
self.assertSyntaxError(
"unexpected 'duration' before 'eq'",
["duration", "hours=1", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'duration' after 'eq'",
["#uname", "eq", "duration", "hours=1"]
)
def testDefinedExpression(self):
self.assertEquals(
"(defined (literal pingd))",
str(self.parser.parse(["defined", "pingd"]))
)
self.assertEquals(
"(not_defined (literal pingd))",
str(self.parser.parse(["not_defined", "pingd"]))
)
def testDefinedExpressionBad(self):
self.assertUnexpectedEndOfInput(["defined"])
self.assertUnexpectedEndOfInput(["not_defined"])
self.assertSyntaxError(
"unexpected 'eq'",
["defined", "eq"]
)
self.assertSyntaxError(
"unexpected 'and'",
["defined", "and"]
)
self.assertSyntaxError(
"unexpected 'string' after 'defined'",
["defined", "string", "pingd"]
)
self.assertSyntaxError(
"unexpected 'date-spec' after 'defined'",
["defined", "date-spec", "hours=1"]
)
self.assertSyntaxError(
"unexpected 'duration' after 'defined'",
["defined", "duration", "hours=1"]
)
def testTypeExpression(self):
self.assertEquals(
"(eq (literal #uname) (string (literal node1)))",
str(self.parser.parse(["#uname", "eq", "string", "node1"]))
)
self.assertEquals(
"(eq (literal #uname) (integer (literal 12345)))",
str(self.parser.parse(["#uname", "eq", "integer", "12345"]))
)
self.assertEquals(
"(eq (literal #uname) (integer (literal -12345)))",
str(self.parser.parse(["#uname", "eq", "integer", "-12345"]))
)
self.assertEquals(
"(eq (literal #uname) (version (literal 1)))",
str(self.parser.parse(["#uname", "eq", "version", "1"]))
)
self.assertEquals(
"(eq (literal #uname) (version (literal 1.2.3)))",
str(self.parser.parse(["#uname", "eq", "version", "1.2.3"]))
)
self.assertEquals(
"(eq (literal #uname) (string (literal string)))",
str(self.parser.parse(["#uname", "eq", "string", "string"]))
)
self.assertEquals(
"(eq (literal #uname) (string (literal and)))",
str(self.parser.parse(["#uname", "eq", "string", "and"]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (string (literal integer))) "
"(ne (literal #uname) (string (literal version)))"
")",
str(self.parser.parse([
"#uname", "ne", "string", "integer",
"and",
"#uname", "ne", "string", "version"
]))
)
def testTypeExpressionBad(self):
self.assertUnexpectedEndOfInput(["string"])
self.assertUnexpectedEndOfInput(["#uname", "eq", "string"])
self.assertSyntaxError(
"unexpected 'string' before 'eq'",
["string", "#uname", "eq", "node1"]
)
self.assertSyntaxError(
"invalid integer value 'node1'",
["#uname", "eq", "integer", "node1"]
)
self.assertSyntaxError(
"invalid version value 'node1'",
["#uname", "eq", "version", "node1"]
)
def testDateExpression(self):
self.assertEquals(
"(gt (literal date) (literal 2014-06-26))",
str(self.parser.parse(["date", "gt", "2014-06-26"]))
)
self.assertEquals(
"(lt (literal date) (literal 2014-06-26))",
str(self.parser.parse(["date", "lt", "2014-06-26"]))
)
self.assertEquals(
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
")",
str(self.parser.parse([
"date", "in_range", "2014-06-26", "to", "2014-07-26"
]))
)
self.assertEquals(
"(in_range "
"(literal date) "
"(literal 2014-06-26) (duration (literal years=1))"
")",
str(self.parser.parse([
"date", "in_range", "2014-06-26", "to", "duration", "years=1"
]))
)
def testDateExpressionBad(self):
self.assertUnexpectedEndOfInput(
["date", "in_range"]
)
self.assertSyntaxError(
"missing 'to'",
["date", "in_range", '2014-06-26']
)
self.assertUnexpectedEndOfInput(
["date", "in_range", "2014-06-26", "to"]
)
self.assertSyntaxError(
"unexpected 'in_range'",
["in_range", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"expecting 'to', got 'eq'",
["date", "in_range", '#uname', "eq", "node1", "to", "2014-07-26"]
)
self.assertSyntaxError(
"invalid date '#uname' in 'in_range ... to'",
["date", "in_range", "2014-06-26", "to", '#uname', "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'defined' after 'in_range'",
["date", "in_range", "defined", "pingd", "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'defined' after 'in_range ... to'",
["date", "in_range", "2014-06-26", "to", "defined", "pingd"]
)
self.assertSyntaxError(
"unexpected 'string' before 'in_range'",
["string", "date", "in_range", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'string' after 'in_range'",
["date", "in_range", "string", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'string' after 'in_range ... to'",
["date", "in_range", '2014-06-26', "to", "string", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'string' after '2014-06-26'",
["date", "in_range", '2014-06-26', "string", "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'in_range'",
["#uname", "in_range", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"invalid date '2014-13-26' in 'in_range ... to'",
["date", "in_range", '2014-13-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"invalid date '2014-13-26' in 'in_range ... to'",
["date", "in_range", '2014-06-26', "to", "2014-13-26"]
)
def testAndOrExpression(self):
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"#uname", "ne", "node1", "and", "#uname", "ne", "node2"
]))
)
self.assertEquals(
"(or "
"(eq (literal #uname) (literal node1)) "
"(eq (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"#uname", "eq", "node1", "or", "#uname", "eq", "node2"
]))
)
self.assertEquals(
"(and "
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
") "
"(ne (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"and", "#uname", "ne", "node3"
]))
)
self.assertEquals(
"(or "
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
") "
"(eq (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"or", "#uname", "eq", "node3"
]))
)
self.assertEquals(
"(and "
"(or "
"(eq (literal #uname) (literal node1)) "
"(eq (literal #uname) (literal node2))"
") "
"(ne (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"#uname", "eq", "node1",
"or", "#uname", "eq", "node2",
"and", "#uname", "ne", "node3"
]))
)
self.assertEquals(
"(and "
"(defined (literal pingd)) "
"(lte (literal pingd) (literal 1))"
")",
str(self.parser.parse([
"defined", "pingd", "and", "pingd", "lte", "1"
]))
)
self.assertEquals(
"(or "
"(gt (literal pingd) (literal 1)) "
"(not_defined (literal pingd))"
")",
str(self.parser.parse([
"pingd", "gt", "1", "or", "not_defined", "pingd"
]))
)
def testAndOrExpressionDateSpec(self):
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(date-spec (literal hours=1-12))"
")",
str(self.parser.parse([
"#uname", "ne", "node1", "and", "date-spec", "hours=1-12"
]))
)
self.assertEquals(
"(or "
"(date-spec (literal monthdays=1-12)) "
"(ne (literal #uname) (literal node1))"
")",
str(self.parser.parse([
"date-spec", "monthdays=1-12", "or", "#uname", "ne", "node1"
]))
)
self.assertEquals(
"(or "
"(date-spec (literal monthdays=1-10)) "
"(date-spec (literal monthdays=11-20))"
")",
str(self.parser.parse([
"date-spec", "monthdays=1-10",
"or",
"date-spec", "monthdays=11-20"
]))
)
def testAndOrExpressionDate(self):
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
")"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and",
"date", "in_range", "2014-06-26", "to", "2014-07-26"
]))
)
self.assertEquals(
"(and "
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
") "
"(ne (literal #uname) (literal node1))"
")",
str(self.parser.parse([
"date", "in_range", "2014-06-26", "to", "2014-07-26",
"and",
"#uname", "ne", "node1"
]))
)
def testAndOrExpressionBad(self):
self.assertSyntaxError(
"unexpected 'and'",
["and"]
)
self.assertSyntaxError(
"unexpected 'or'",
["or"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'and'",
["#uname", "and", "node1"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'or'",
["#uname", "or", "node1"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'or'",
["#uname", "or", "eq"]
)
self.assertSyntaxError(
"unexpected 'node2' after 'and'",
["#uname", "eq", "node1", "and", "node2"]
)
self.assertUnexpectedEndOfInput(["#uname", "eq", "node1", "and"])
self.assertUnexpectedEndOfInput(
["#uname", "eq", "node1", "and", "#uname", "eq"]
)
self.assertSyntaxError(
"unexpected 'and'",
["and", "#uname", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'duration' after 'and'",
["#uname", "ne", "node1", "and", "duration", "hours=1"]
)
self.assertSyntaxError(
"unexpected 'duration' before 'or'",
["duration", "monthdays=1", "or", "#uname", "ne", "node1"]
)
def testParenthesizedExpression(self):
self.assertSyntaxError(
"missing one of 'eq', 'ne', 'lt', 'gt', 'lte', 'gte', 'in_range', "
"'defined', 'not_defined', 'date-spec'",
["(", "#uname", ")"]
)
self.assertEquals(
"(date-spec (literal hours=1))",
str(self.parser.parse(["(", "date-spec", "hours=1", ")"]))
)
self.assertEquals(
"(eq (literal #uname) (literal node1))",
str(self.parser.parse(["(", "#uname", "eq", "node1", ")"]))
)
self.assertEquals(
"(defined (literal pingd))",
str(self.parser.parse(["(", "defined", "pingd", ")"]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")"
]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"(", "#uname", "ne", "node1", ")",
"and",
"(", "#uname", "ne", "node2", ")"
]))
)
self.assertEquals(
"(or "
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
") "
"(eq (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"or", "#uname", "eq", "node3"
]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(or "
"(ne (literal #uname) (literal node2)) "
"(eq (literal #uname) (literal node3))"
")"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and",
"(", "#uname", "ne", "node2", "or", "#uname", "eq", "node3", ")"
]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(or "
"(ne (literal #uname) (literal node2)) "
"(eq (literal #uname) (literal node3))"
")"
")",
str(self.parser.parse([
"(", "(",
"(", "#uname", "ne", "node1", ")",
"and",
"(", "(",
"(", "#uname", "ne", "node2", ")",
"or",
"(", "#uname", "eq", "node3", ")",
")", ")",
")", ")"
]))
)
self.assertEquals(
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
")",
str(self.parser.parse([
"(", "date", "in_range", "2014-06-26", "to", "2014-07-26", ")"
]))
)
def testParenthesizedExpressionBad(self):
self.assertUnexpectedEndOfInput(["("])
self.assertSyntaxError(
"unexpected ')'",
["(", ")"]
)
self.assertSyntaxError(
"missing ')'",
["(", "#uname"]
)
self.assertUnexpectedEndOfInput(["(", "#uname", "eq"])
self.assertSyntaxError(
"missing ')'",
["(", "#uname", "eq", "node1"]
)
def assertUnexpectedEndOfInput(self, program):
self.assertRaises(rule.UnexpectedEndOfInput, self.parser.parse, program)
def assertSyntaxError(self, syntax_error, program):
self.assertRaises(
rule.SyntaxError, self.parser.parse, program
)
try:
self.parser.parse(program)
except rule.SyntaxError as e:
self.assertEquals(syntax_error, str(e))
class CibBuilderTest(unittest.TestCase):
def setUp(self):
self.parser = rule.RuleParser()
self.builder = rule.CibBuilder()
def testSingleLiteralDatespec(self):
self.assertExpressionXml(
["date-spec", "hours=1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec hours="1" id="location-dummy-rule-expr-datespec"/>
</date_expression>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date-spec", "hours=1-14 monthdays=20-30 months=1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec hours="1-14" id="location-dummy-rule-expr-datespec" monthdays="20-30" months="1"/>
</date_expression>
</rule>
</rsc_location>
"""
)
def testSimpleExpression(self):
self.assertExpressionXml(
["#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "ne", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "gt", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="gt" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "gte", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="gte" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "lt", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="lt" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "lte", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="lte" value="node1"/>
</rule>
</rsc_location>
"""
)
def testTypeExpression(self):
self.assertExpressionXml(
["#uname", "eq", "string", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" type="string" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "eq", "integer", "12345"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" type="number" value="12345"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "eq", "version", "1.2.3"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" type="version" value="1.2.3"/>
</rule>
</rsc_location>
"""
)
def testDefinedExpression(self):
self.assertExpressionXml(
["defined", "pingd"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="defined"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["not_defined", "pingd"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="not_defined"/>
</rule>
</rsc_location>
"""
)
def testDateExpression(self):
self.assertExpressionXml(
["date", "gt", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="gt" start="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "lt", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression end="2014-06-26" id="location-dummy-rule-expr" operation="lt"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "in_range", "2014-06-26", "to", "2014-07-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression end="2014-07-26" id="location-dummy-rule-expr" operation="in_range" start="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "in_range", "2014-06-26", "to", "duration", "years=1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="in_range" start="2014-06-26">
<duration id="location-dummy-rule-expr-duration" years="1"/>
</date_expression>
</rule>
</rsc_location>
"""
)
def testNotDateExpression(self):
self.assertExpressionXml(
["date", "eq", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="eq" value="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "gt", "string", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="gt" type="string" value="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "gt", "integer", "12345"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="gt" type="number" value="12345"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "gt", "version", "1.2.3"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="gt" type="version" value="1.2.3"/>
</rule>
</rsc_location>
"""
)
def testAndOrExpression(self):
self.assertExpressionXml(
["#uname", "ne", "node1", "and", "#uname", "ne", "node2"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node2"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "eq", "node1", "or", "#uname", "eq", "node2"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="eq" value="node2"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"and", "#uname", "ne", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node2"/>
<expression attribute="#uname" id="location-dummy-rule-expr-2" operation="ne" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"or", "#uname", "eq", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<rule boolean-op="and" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="ne" value="node2"/>
</rule>
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "eq", "node1",
"or", "#uname", "eq", "node2",
"and", "#uname", "ne", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<rule boolean-op="or" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="eq" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="eq" value="node2"/>
</rule>
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["defined", "pingd", "and", "pingd", "lte", "1"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="defined"/>
<expression attribute="pingd" id="location-dummy-rule-expr-1" operation="lte" value="1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["pingd", "gt", "1", "or", "not_defined", "pingd"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="gt" value="1"/>
<expression attribute="pingd" id="location-dummy-rule-expr-1" operation="not_defined"/>
</rule>
</rsc_location>
"""
)
def testAndOrExpressionDateSpec(self):
self.assertExpressionXml(
["#uname", "ne", "node1", "and", "date-spec", "hours=1-12"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<date_expression id="location-dummy-rule-expr-1" operation="date_spec">
<date_spec hours="1-12" id="location-dummy-rule-expr-1-datespec"/>
</date_expression>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date-spec", "monthdays=1-12", "or", "#uname", "ne", "node1"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec id="location-dummy-rule-expr-datespec" monthdays="1-12"/>
</date_expression>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date-spec", "monthdays=1-10", "or", "date-spec", "monthdays=11-20"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec id="location-dummy-rule-expr-datespec" monthdays="1-10"/>
</date_expression>
<date_expression id="location-dummy-rule-expr-1" operation="date_spec">
<date_spec id="location-dummy-rule-expr-1-datespec" monthdays="11-20"/>
</date_expression>
</rule>
</rsc_location>
"""
)
def testParenthesizedExpression(self):
self.assertExpressionXml(
[
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"or", "#uname", "eq", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<rule boolean-op="and" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="ne" value="node2"/>
</rule>
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "ne", "node1",
"and",
"(", "#uname", "ne", "node2", "or", "#uname", "eq", "node3", ")"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<rule boolean-op="or" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node2"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="eq" value="node3"/>
</rule>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"or",
"(",
"#uname", "ne", "node3", "and", "#uname", "ne", "node4",
")",
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<rule boolean-op="and" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="ne" value="node2"/>
</rule>
<rule boolean-op="and" id="location-dummy-rule-rule-1">
<expression attribute="#uname" id="location-dummy-rule-rule-1-expr" operation="ne" value="node3"/>
<expression attribute="#uname" id="location-dummy-rule-rule-1-expr-1" operation="ne" value="node4"/>
</rule>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"and",
"(",
"#uname", "ne", "node3", "and", "#uname", "ne", "node4",
")",
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node2"/>
<expression attribute="#uname" id="location-dummy-rule-expr-2" operation="ne" value="node3"/>
<expression attribute="#uname" id="location-dummy-rule-expr-3" operation="ne" value="node4"/>
</rule>
</rsc_location>
"""
)
def assertExpressionXml(self, rule_expression, rule_xml):
cib_dom = xml.dom.minidom.parse("empty.xml")
constraints = cib_dom.getElementsByTagName("constraints")[0]
constraint_el = constraints.appendChild(
cib_dom.createElement("rsc_location")
)
constraint_el.setAttribute("id", "location-dummy")
ac(
self.builder.build(
constraint_el,
self.parser.parse(rule_expression)
).parentNode.toprettyxml(indent=" "),
rule_xml.lstrip().rstrip(" ")
)
class TokenPreprocessorTest(unittest.TestCase):
def setUp(self):
self.preprocessor = rule.TokenPreprocessor()
def testNoChanges(self):
self.assertEquals([], self.preprocessor.run([]))
self.assertEquals(
["#uname", "eq", "node1"],
self.preprocessor.run(["#uname", "eq", "node1"])
)
def testDateSpec(self):
self.assertEquals(
["date-spec"],
self.preprocessor.run(["date-spec"])
)
self.assertEquals(
["date-spec", "hours=14"],
self.preprocessor.run(["date-spec", "hours=14"])
)
self.assertEquals(
["date-spec", "hours weeks=6 months= moon=1"],
self.preprocessor.run(
["date-spec", "hours", "weeks=6", "months=", "moon=1"]
)
)
self.assertEquals(
["date-spec", "foo", "hours=14"],
self.preprocessor.run(["date-spec", "foo", "hours=14"])
)
self.assertEquals(
["date-spec", "hours=14", "foo", "hours=14"],
self.preprocessor.run(["date-spec", "hours=14", "foo", "hours=14"])
)
self.assertEquals(
[
"date-spec",
"hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 "
"weeks=6 years=7 weekyears=8 moon=9"
],
self.preprocessor.run([
"date-spec",
"hours=1", "monthdays=2", "weekdays=3", "yeardays=4",
"months=5","weeks=6", "years=7", "weekyears=8", "moon=9"
])
)
self.assertEquals(
["#uname", "eq", "node1", "or", "date-spec", "hours=14"],
self.preprocessor.run([
"#uname", "eq", "node1", "or", "date-spec", "hours=14"
])
)
self.assertEquals(
["date-spec", "hours=14", "or", "#uname", "eq", "node1"],
self.preprocessor.run([
"date-spec", "hours=14", "or", "#uname", "eq", "node1",
])
)
def testDuration(self):
self.assertEquals(
["duration"],
self.preprocessor.run(["duration"])
)
self.assertEquals(
["duration", "hours=14"],
self.preprocessor.run(["duration", "hours=14"])
)
self.assertEquals(
["duration", "hours weeks=6 months= moon=1"],
self.preprocessor.run(
["duration", "hours", "weeks=6", "months=", "moon=1"]
)
)
self.assertEquals(
["duration", "foo", "hours=14"],
self.preprocessor.run(["duration", "foo", "hours=14"])
)
self.assertEquals(
["duration", "hours=14", "foo", "hours=14"],
self.preprocessor.run(["duration", "hours=14", "foo", "hours=14"])
)
self.assertEquals(
[
"duration",
"hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 "
"weeks=6 years=7 weekyears=8 moon=9"
],
self.preprocessor.run([
"duration",
"hours=1", "monthdays=2", "weekdays=3", "yeardays=4",
"months=5","weeks=6", "years=7", "weekyears=8", "moon=9"
])
)
self.assertEquals(
["#uname", "eq", "node1", "or", "duration", "hours=14"],
self.preprocessor.run([
"#uname", "eq", "node1", "or", "duration", "hours=14"
])
)
self.assertEquals(
["duration", "hours=14", "or", "#uname", "eq", "node1"],
self.preprocessor.run([
"duration", "hours=14", "or", "#uname", "eq", "node1",
])
)
def testOperationDatespec(self):
self.assertEquals(
["date-spec", "weeks=6 moon=1"],
self.preprocessor.run(
["date-spec", "operation=date_spec", "weeks=6", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6 moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "operation=date_spec", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6", "foo", "moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "operation=date_spec", "foo", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6", "foo", "operation=date_spec", "moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "foo", "operation=date_spec", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6 moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "moon=1", "operation=date_spec"]
)
)
self.assertEquals(
["date-spec", "weeks=6 moon=1", "foo"],
self.preprocessor.run(
["date-spec", "weeks=6", "moon=1", "operation=date_spec", "foo"]
)
)
self.assertEquals(
["date-spec"],
self.preprocessor.run(
["date-spec", "operation=date_spec"]
)
)
self.assertEquals(
["date-spec", "weeks=6", "operation=foo", "moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "operation=foo", "moon=1"]
)
)
def testDateLegacySyntax(self):
# valid syntax
self.assertEquals(
["date", "gt", "2014-06-26"],
self.preprocessor.run([
"date", "start=2014-06-26", "gt"
])
)
self.assertEquals(
["date", "lt", "2014-06-26"],
self.preprocessor.run([
"date", "end=2014-06-26", "lt"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "in_range"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26"],
self.preprocessor.run([
"date", "end=2014-07-26", "start=2014-06-26", "in_range"
])
)
self.assertEquals(
["date", "gt", "2014-06-26", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "gt", "foo"
])
)
self.assertEquals(
["date", "lt", "2014-06-26", "foo"],
self.preprocessor.run([
"date", "end=2014-06-26", "lt", "foo"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "in_range", "foo"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26", "foo"],
self.preprocessor.run([
"date", "end=2014-07-26", "start=2014-06-26", "in_range", "foo"
])
)
# invalid syntax - no change
self.assertEquals(
["date"],
self.preprocessor.run([
"date"
])
)
self.assertEquals(
["date", "start=2014-06-26"],
self.preprocessor.run([
"date", "start=2014-06-26"
])
)
self.assertEquals(
["date", "end=2014-06-26"],
self.preprocessor.run([
"date", "end=2014-06-26"
])
)
self.assertEquals(
["date", "start=2014-06-26", "end=2014-07-26"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26"
])
)
self.assertEquals(
["date", "start=2014-06-26", "end=2014-07-26", "lt"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "lt"
])
)
self.assertEquals(
["date", "start=2014-06-26", "lt", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "lt", "foo"
])
)
self.assertEquals(
["date", "start=2014-06-26", "end=2014-07-26", "gt", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "gt", "foo"
])
)
self.assertEquals(
["date", "end=2014-06-26", "gt"],
self.preprocessor.run([
"date", "end=2014-06-26", "gt"
])
)
self.assertEquals(
["date", "start=2014-06-26", "in_range", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "in_range", "foo"
])
)
self.assertEquals(
["date", "end=2014-07-26", "in_range"],
self.preprocessor.run([
"date", "end=2014-07-26", "in_range"
])
)
self.assertEquals(
["foo", "start=2014-06-26", "gt"],
self.preprocessor.run([
"foo", "start=2014-06-26", "gt"
])
)
self.assertEquals(
["foo", "end=2014-06-26", "lt"],
self.preprocessor.run([
"foo", "end=2014-06-26", "lt"
])
)
def testParenthesis(self):
self.assertEquals(
["("],
self.preprocessor.run(["("])
)
self.assertEquals(
[")"],
self.preprocessor.run([")"])
)
self.assertEquals(
["(", "(", ")", ")"],
self.preprocessor.run(["(", "(", ")", ")"])
)
self.assertEquals(
["(", "(", ")", ")"],
self.preprocessor.run(["(())"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a", "(", "b", ")", "c"])
)
self.assertEquals(
["a", "(", "b", "c", ")", "d"],
self.preprocessor.run(["a", "(", "b", "c", ")", "d"])
)
self.assertEquals(
["a", ")", "b", "(", "c"],
self.preprocessor.run(["a", ")", "b", "(", "c"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a", "(b)", "c"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a(", "b", ")c"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a(b)c"])
)
self.assertEquals(
["aA", "(", "bB", ")", "cC"],
self.preprocessor.run(["aA(bB)cC"])
)
self.assertEquals(
["(", "aA", "(", "bB", ")", "cC", ")"],
self.preprocessor.run(["(aA(bB)cC)"])
)
self.assertEquals(
["(", "aA", "(", "(", "bB", ")", "cC", ")"],
self.preprocessor.run(["(aA(", "(bB)cC)"])
)
self.assertEquals(
["(", "aA", "(", "(", "(", "bB", ")", "cC", ")"],
self.preprocessor.run(["(aA(", "(", "(bB)cC)"])
)
class ExportAsExpressionTest(unittest.TestCase):
def test_success(self):
self.assertXmlExport(
"""
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr"
operation="eq" value="node1"/>
</rule>
""",
"#uname eq node1",
"#uname eq string node1"
)
self.assertXmlExport(
"""
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="foo" id="location-dummy-rule-expr"
operation="gt" type="version" value="1.2.3"/>
</rule>
""",
"foo gt version 1.2.3",
"foo gt version 1.2.3"
)
self.assertXmlExport(
"""
<rule boolean-op="or" id="complexRule" score="INFINITY">
<rule boolean-op="and" id="complexRule-rule-1" score="0">
<date_expression id="complexRule-rule-1-expr" operation="date_spec">
<date_spec id="complexRule-rule-1-expr-datespec" weekdays="1-5" hours="12-23"/>
</date_expression>
<date_expression id="complexRule-rule-1-expr-1" operation="in_range" start="2014-07-26">
<duration id="complexRule-rule-1-expr-1-duration" months="1"/>
</date_expression>
</rule>
<rule boolean-op="and" id="complexRule-rule" score="0">
<expression attribute="foo" id="complexRule-rule-expr-1" operation="gt" type="version" value="1.2"/>
<expression attribute="#uname" id="complexRule-rule-expr" operation="eq" value="node3 4"/>
</rule>
</rule>
""",
"(date-spec hours=12-23 weekdays=1-5 and date in_range 2014-07-26 to duration months=1) or (foo gt version 1.2 and #uname eq \"node3 4\")",
"(#uname eq string \"node3 4\" and foo gt version 1.2) or (date in_range 2014-07-26 to duration months=1 and date-spec hours=12-23 weekdays=1-5)"
)
def assertXmlExport(self, rule_xml, export, export_normalized):
ac(
export + "\n",
rule.ExportAsExpression().get_string(
xml.dom.minidom.parseString(rule_xml).documentElement,
normalize=False
) + "\n"
)
ac(
export_normalized + "\n",
rule.ExportAsExpression().get_string(
xml.dom.minidom.parseString(rule_xml).documentElement,
normalize=True
) + "\n"
)
class DomRuleAddTest(unittest.TestCase):
def setUp(self):
shutil.copy(empty_cib, temp_cib)
output, returnVal = pcs(temp_cib, "resource create dummy1 Dummy")
assert returnVal == 0 and output == ""
def test_success_xml(self):
self.assertExpressionXml(
["#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["id=myRule", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="myRule" score="INFINITY">
<expression attribute="#uname" id="myRule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score=INFINITY", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score=100", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score="100">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score-attribute=pingd", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score-attribute="pingd">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["role=master", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" role="master" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["role=slave", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" role="slave" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score=100", "id=myRule", "role=master", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="myRule" role="master" score="100">
<expression attribute="#uname" id="myRule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
def test_success(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule #uname eq node1"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=MyRule score=100 role=master #uname eq node2"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=complexRule (#uname eq node3 and foo gt version 1.2) or (date-spec hours=12-23 weekdays=1-5 and date in_range 2014-07-26 to duration months=1)"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score=INFINITY (id:location-dummy1-rule)
Expression: #uname eq node1 (id:location-dummy1-rule-expr)
Constraint: location-dummy1-1
Rule: score=100 role=master (id:MyRule)
Expression: #uname eq node2 (id:MyRule-expr)
Constraint: location-dummy1-2
Rule: score=INFINITY boolean-op=or (id:complexRule)
Rule: score=0 boolean-op=and (id:complexRule-rule)
Expression: #uname eq node3 (id:complexRule-rule-expr)
Expression: foo gt version 1.2 (id:complexRule-rule-expr-1)
Rule: score=0 boolean-op=and (id:complexRule-rule-1)
Expression: (id:complexRule-rule-1-expr)
Date Spec: hours=12-23 weekdays=1-5 (id:complexRule-rule-1-expr-datespec)
Expression: date in_range 2014-07-26 to duration (id:complexRule-rule-1-expr-1)
Duration: months=1 (id:complexRule-rule-1-expr-1-duration)
""")
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score=INFINITY
Expression: #uname eq node1
Constraint: location-dummy1-1
Rule: score=100 role=master
Expression: #uname eq node2
Constraint: location-dummy1-2
Rule: score=INFINITY boolean-op=or
Rule: score=0 boolean-op=and
Expression: #uname eq node3
Expression: foo gt version 1.2
Rule: score=0 boolean-op=and
Expression:
Date Spec: hours=12-23 weekdays=1-5
Expression: date in_range 2014-07-26 to duration
Duration: months=1
""")
self.assertEquals(0, returnVal)
def test_invalid_score(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule score=pingd defined pingd"
)
ac(
output,
"Warning: invalid score 'pingd', setting score-attribute=pingd "
"instead\n"
)
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score-attribute=pingd (id:location-dummy1-rule)
Expression: defined pingd (id:location-dummy1-rule-expr)
""")
self.assertEquals(0, returnVal)
def test_invalid_rule(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule score=100"
)
ac(output, "Error: no rule expression was specified\n")
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule #uname eq"
)
ac(
output,
"Error: '#uname eq' is not a valid rule expression: unexpected end "
"of rule\n"
)
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule string #uname eq node1"
)
ac(
output,
"Error: 'string #uname eq node1' is not a valid rule expression: "
"unexpected 'string' before 'eq'\n"
)
self.assertEquals(1, returnVal)
def test_ivalid_options(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule role=foo #uname eq node1"
)
ac(output, "Error: invalid role 'foo', use 'master' or 'slave'\n")
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule score=100 score-attribute=pingd #uname eq node1"
)
ac(output, "Error: can not specify both score and score-attribute\n")
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=1foo #uname eq node1"
)
ac(
output,
"Error: invalid rule id '1foo', '1' is not a valid first character "
"for a rule id\n"
)
self.assertEquals(1, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, "Location Constraints:\n")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=MyRule #uname eq node1"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score=INFINITY (id:MyRule)
Expression: #uname eq node1 (id:MyRule-expr)
""")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=MyRule #uname eq node1"
)
ac(
output,
"Error: id 'MyRule' is already in use, please specify another one\n"
)
self.assertEquals(1, returnVal)
def assertExpressionXml(self, rule_expression, rule_xml):
cib_dom = xml.dom.minidom.parse("empty.xml")
constraints = cib_dom.getElementsByTagName("constraints")[0]
constraint_el = constraints.appendChild(
cib_dom.createElement("rsc_location")
)
constraint_el.setAttribute("id", "location-dummy")
options, rule_argv = rule.parse_argv(rule_expression)
rule.dom_rule_add(constraint_el, options, rule_argv)
ac(
constraint_el.toprettyxml(indent=" "),
rule_xml.lstrip().rstrip(" ")
)
if __name__ == "__main__":
unittest.main()
| tradej/pcs | pcs/test/test_rule.py | Python | gpl-2.0 | 67,998 |
"""
.. module:: poes
:synopsis: A module for reading, writing, and storing poes Data
.. moduleauthor:: AJ, 20130129
*********************
**Module**: gme.sat.poes
*********************
**Classes**:
* :class:`poesRec`
**Functions**:
* :func:`readPoes`
* :func:`readPoesFtp`
* :func:`mapPoesMongo`
* :func:`overlayPoesTed`
"""
from davitpy.gme.base.gmeBase import gmeData
class poesRec(gmeData):
"""a class to represent a record of poes data. Extends :class:`gmeBase.gmeData`. Insight on the class members can be obtained from `the NOAA NGDC site <ftp://satdat.ngdc.noaa.gov/sem/poes/data/readme.txt>`_. Note that Poes data is available from 1998-present day (or whatever the latest NOAA has uploaded is). **The data are the 16-second averages**
**Members**:
* **time** (`datetime <http://tinyurl.com/bl352yx>`_): an object identifying which time these data are for
* **info** (str): information about where the data come from. *Please be courteous and give credit to data providers when credit is due.*
* **dataSet** (str): the name of the data set
* **satnum** (ind): the noaa satellite number
* **sslat** (float): Geographic Latitude of sub-satellite point, degrees
* **sslon** (float): Geographic Longitude of sub-satellite point, degrees
* **folat** (float): Geographic Latitude of foot-of-field-line, degrees
* **folon** (float): Geographic Longitude of foot-of-field-line, degrees
* **lval** (float): L-value
* **mlt** (float): Magnetic local time of foot-of-field-line, degrees
* **pas0** (float): MEPED-0 pitch angle at satellite, degrees
* **pas90** (float): MEPED-90 pitch angle at satellite, degrees
* **mep0e1** (float): MEPED-0 > 30 keV electrons, counts/sec
* **mep0e2** (float): MEPED-0 > 100 keV electrons, counts/sec
* **mep0e3** (float): MEPED-0 > 300 keV electrons, counts/sec
* **mep0p1** (float):MEPED-0 30 keV to 80 keV protons, counts/sec
* **mep0p2** (float): MEPED-0 80 keV to 240 keV protons, counts/sec
* **mep0p3** (float): 240 kev to 800 keV protons, counts/sec
* **mep0p4** (float): MEPED-0 800 keV to 2500 keV protons, counts/sec
* **mep0p5** (float): MEPED-0 2500 keV to 6900 keV protons, counts/sec
* **mep0p6** (float): MEPED-0 > 6900 keV protons, counts/sec,
* **mep90e1** (float): MEPED-90 > 30 keV electrons, counts/sec,
* **mep90e2** (float): MEPED-90 > 100 keV electrons, counts/sec
* **mep90e3** (float): MEPED-90 > 300 keV electrons, counts/sec
* **mep90p1** (float): MEPED-90 30 keV to 80 keV protons, counts/sec
* **mep90p2** (float): MEPED-90 80 keV to 240 keV protons, counts/sec
* **mep90p3** (float): MEPED-90 240 kev to 800 keV protons, counts/sec,
* **mep90p4** (float): MEPED-90 800 keV to 2500 keV protons, counts/sec
* **mep90p5** (float): MEPED-90 2500 keV to 6900 keV protons, counts/sec
* **mep90p6** (float):MEPED-90 > 6900 keV protons, counts/sec
* **mepomp6** (float): MEPED omni-directional > 16 MeV protons, counts/sec
* **mepomp7** (float): MEPED omni-directional > 36 Mev protons, counts/sec
* **mepomp8** (float): MEPED omni-directional > 70 MeV protons, counts/sec
* **mepomp9** (float): MEPED omni-directional >= 140 MeV protons
* **ted** (float): TED, Total Energy Detector Average, ergs/cm2/sec
* **echar** (float): TED characteristic energy of electrons, eV
* **pchar** (float): TED characteristic energy of protons, eV
* **econtr** (float): TED electron contribution, Electron Energy/Total Energy
.. note::
If any of the members have a value of None, this means that they could not be read for that specific time
**Methods**:
* :func:`parseFtp`
**Example**:
::
emptyPoesObj = gme.sat.poesRec()
written by AJ, 20130131
"""
def parseFtp(self,line, header):
"""This method is used to convert a line of poes data read from the NOAA NGDC FTP site into a :class:`poesRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`poesRec`
**Args**:
* **line** (str): the ASCII line from the FTP server
**Returns**:
* Nothing.
**Example**:
::
myPoesObj.parseFtp(ftpLine)
written by AJ, 20130131
"""
import datetime as dt
#split the line into cols
cols = line.split()
head = header.split()
self.time = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]), \
int(float(cols[5])),int(round((float(cols[5])-int(float(cols[5])))*1e6)))
for key in self.__dict__.iterkeys():
if(key == 'dataSet' or key == 'info' or key == 'satnum' or key == 'time'): continue
try: ind = head.index(key)
except Exception,e:
print e
print 'problem setting attribute',key
#check for a good value
if(float(cols[ind]) != -999.): setattr(self,key,float(cols[ind]))
def __init__(self, ftpLine=None, dbDict=None, satnum=None, header=None):
"""the intialization fucntion for a :class:`omniRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`omniRec`
**Args**:
* [**ftpLine**] (str): an ASCII line from the FTP server. if this is provided, the object is initialized from it. header must be provided in conjunction with this. default=None
* [**header**] (str): the header from the ASCII FTP file. default=None
* [**dbDict**] (dict): a dictionary read from the mongodb. if this is provided, the object is initialized from it. default = None
* [**satnum**] (int): the satellite nuber. default=None
**Returns**:
* Nothing.
**Example**:
::
myPoesObj = poesRec(ftpLine=aftpLine)
written by AJ, 20130131
"""
#note about where data came from
self.dataSet = 'Poes'
self.info = 'These data were downloaded from NASA SPDF. *Please be courteous and give credit to data providers when credit is due.*'
self.satnum = satnum
self.sslat = None
self.sslon = None
self.folat = None
self.folon = None
self.lval = None
self.mlt = None
self.pas0 = None
self.pas90 = None
self.mep0e1 = None
self.mep0e2 = None
self.mep0e3 = None
self.mep0p1 = None
self.mep0p2 = None
self.mep0p3 = None
self.mep0p4 = None
self.mep0p5 = None
self.mep0p6 = None
self.mep90e1 = None
self.mep90e2 = None
self.mep90e3 = None
self.mep90p1 = None
self.mep90p2 = None
self.mep90p3 = None
self.mep90p4 = None
self.mep90p5 = None
self.mep90p6 = None
self.mepomp6 = None
self.mepomp7 = None
self.mepomp8 = None
self.mepomp9 = None
self.ted = None
self.echar = None
self.pchar = None
self.econtr = None
#if we're initializing from an object, do it!
if(ftpLine != None): self.parseFtp(ftpLine,header)
if(dbDict != None): self.parseDb(dbDict)
def readPoes(sTime,eTime=None,satnum=None,folat=None,folon=None,ted=None,echar=None,pchar=None):
"""This function reads poes data. First, it will try to get it from the mongodb, and if it can't find it, it will look on the NOAA NGDC FTP server using :func:`readPoesFtp`. The data are 16-second averages
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, end Time will be 1 day after sTime. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**satnum**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folon**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bye values in the range [a,b] will be returned. default = None
* [**ted**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bze values in the range [a,b] will be returned. default = None
* [**echar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bym values in the range [a,b] will be returned. default = None
* [**pchar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bzm values in the range [a,b] will be returned. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readPoes(sTime=dt.datetime(2011,1,1),eTime=dt.datetime(2011,6,1),folat=[60,80])
written by AJ, 20130131
"""
import datetime as dt
import davitpy.pydarn.sdio.dbUtils as db
#check all the inputs for validity
assert(isinstance(sTime,dt.datetime)), \
'error, sTime must be a datetime object'
assert(eTime == None or isinstance(eTime,dt.datetime)), \
'error, eTime must be either None or a datetime object'
assert(satnum == None or isinstance(satnum,int)), 'error, satnum must be an int'
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
assert(var[name] == None or (isinstance(var[name],list) and \
isinstance(var[name][0],(int,float)) and isinstance(var[name][1],(int,float)))), \
'error,'+name+' must None or a list of 2 numbers'
if(eTime == None): eTime = sTime+dt.timedelta(days=1)
qryList = []
#if arguments are provided, query for those
qryList.append({'time':{'$gte':sTime}})
if(eTime != None): qryList.append({'time':{'$lte':eTime}})
if(satnum != None): qryList.append({'satnum':satnum})
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
if(var[name] != None):
qryList.append({name:{'$gte':min(var[name])}})
qryList.append({name:{'$lte':max(var[name])}})
#construct the final query definition
qryDict = {'$and': qryList}
#connect to the database
poesData = db.getDataConn(dbName='gme',collName='poes')
#do the query
if(qryList != []): qry = poesData.find(qryDict)
else: qry = poesData.find()
if(qry.count() > 0):
poesList = []
for rec in qry.sort('time'):
poesList.append(poesRec(dbDict=rec))
print '\nreturning a list with',len(poesList),'records of poes data'
return poesList
#if we didn't find anything on the mongodb
else:
print '\ncould not find requested data in the mongodb'
return None
#print 'we will look on the ftp server, but your conditions will be (mostly) ignored'
##read from ftp server
#poesList = readPoesFtp(sTime, eTime)
#if(poesList != None):
#print '\nreturning a list with',len(poesList),'recs of poes data'
#return poesList
#else:
#print '\n no data found on FTP server, returning None...'
#return None
def readPoesFtp(sTime,eTime=None):
"""This function reads poes data from the NOAA NGDC server via anonymous FTP connection.
.. warning::
You should not use this. Use the general function :func:`readPoes` instead.
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, eTime will be equal 1 day after sTime. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readpoesFtp(dt.datetime(2011,1,1,1,50),eTime=dt.datetime(2011,1,1,10,0))
written by AJ, 20130128
"""
from ftplib import FTP
import datetime as dt
assert(isinstance(sTime,dt.datetime)),'error, sTime must be datetime'
if(eTime == None): eTime=sTime+dt.timedelta(days=1)
assert(isinstance(eTime,dt.datetime)),'error, eTime must be datetime'
assert(eTime >= sTime), 'error, end time greater than start time'
#connect to the server
try: ftp = FTP('satdat.ngdc.noaa.gov')
except Exception,e:
print e
print 'problem connecting to NOAA server'
return None
#login as anonymous
try: l=ftp.login()
except Exception,e:
print e
print 'problem logging in to NOAA server'
return None
myPoes = []
#get the poes data
myTime = dt.datetime(sTime.year,sTime.month,sTime.day)
while(myTime <= eTime):
#go to the data directory
try: ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year))
except Exception,e:
print e
print 'error getting to data directory'
return None
#list directory contents
dirlist = ftp.nlst()
for dire in dirlist:
#check for satellite directory
if(dire.find('noaa') == -1): continue
satnum = dire.replace('noaa','')
#chege to file directory
ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year)+'/'+dire)
fname = 'poes_n'+satnum+'_'+myTime.strftime("%Y%m%d")+'.txt'
print 'poes: RETR '+fname
#list to hold the lines
lines = []
#get the data
try: ftp.retrlines('RETR '+fname,lines.append)
except Exception,e:
print e
print 'error retrieving',fname
#convert the ascii lines into a list of poesRec objects
#skip first (header) line
for line in lines[1:]:
cols = line.split()
t = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]))
if(sTime <= t <= eTime):
myPoes.append(poesRec(ftpLine=line,satnum=int(satnum),header=lines[0]))
#increment myTime
myTime += dt.timedelta(days=1)
if(len(myPoes) > 0): return myPoes
else: return None
def mapPoesMongo(sYear,eYear=None):
"""This function reads poes data from the NOAA NGDC FTP server via anonymous FTP connection and maps it to the mongodb.
.. warning::
In general, nobody except the database admins will need to use this function
**Args**:
* **sYear** (int): the year to begin mapping data
* [**eYear**] (int or None): the end year for mapping data. if this is None, eYear will be sYear
**Returns**:
* Nothing.
**Example**:
::
gme.sat.mapPoesMongo(2004)
written by AJ, 20130131
"""
import davitpy.pydarn.sdio.dbUtils as db
from davitpy import rcParams
import datetime as dt
#check inputs
assert(isinstance(sYear,int)),'error, sYear must be int'
if(eYear == None): eYear=sYear
assert(isinstance(eYear,int)),'error, sYear must be None or int'
assert(eYear >= sYear), 'error, end year greater than start year'
#get data connection
mongoData = db.getDataConn(username=rcParams['DBWRITEUSER'],password=rcParams['DBWRITEPASS'],\
dbAddress=rcParams['SDDB'],dbName='gme',collName='poes')
#set up all of the indices
mongoData.ensure_index('time')
mongoData.ensure_index('satnum')
mongoData.ensure_index('folat')
mongoData.ensure_index('folon')
mongoData.ensure_index('ted')
mongoData.ensure_index('echar')
mongoData.ensure_index('pchar')
#read the poes data from the FTP server
myTime = dt.datetime(sYear,1,1)
while(myTime < dt.datetime(eYear+1,1,1)):
#10 day at a time, to not fill up RAM
templist = readPoesFtp(myTime,myTime+dt.timedelta(days=10))
if(templist == None): continue
for rec in templist:
#check if a duplicate record exists
qry = mongoData.find({'$and':[{'time':rec.time},{'satnum':rec.satnum}]})
print rec.time, rec.satnum
tempRec = rec.toDbDict()
cnt = qry.count()
#if this is a new record, insert it
if(cnt == 0): mongoData.insert(tempRec)
#if this is an existing record, update it
elif(cnt == 1):
print 'foundone!!'
dbDict = qry.next()
temp = dbDict['_id']
dbDict = tempRec
dbDict['_id'] = temp
mongoData.save(dbDict)
else:
print 'strange, there is more than 1 record for',rec.time
del templist
myTime += dt.timedelta(days=10)
def overlayPoesTed( baseMapObj, axisHandle, startTime, endTime = None, coords = 'geo', \
hemi = 1, folat = [45., 90.], satNum = None, param='ted', scMin=-3.,scMax=0.5) :
"""This function overlays POES TED data onto a map object.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**endTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, data from satellites with in +/- 45 min of the startTime is overlayed. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**coords**] (str): Coordinates of the map object on which you want data to be overlayed on, 'geo', 'mag', 'mlt'. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
[**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with latitude values in the range [a,b] will be returned. default = None
* [**param**] (str): the name of the poes parameter to be plotted. default='ted'
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import models
import matplotlib.cm as cm
from scipy import optimize
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
assert(endTime == None or isinstance(endTime,datetime.datetime)), \
'error, eTime must be either None or a datetime object'
var = locals()
assert(var['satNum'] == None or (isinstance(var['satNum'],list) )), \
'error, satNum must None or a list of satellite (integer) numbers'
if satNum != None :
assert( len(satNum) <= 5 ), \
'error, there are only 5 POES satellites in operation (atleast when I wrote this code)'
assert(var['folat'] == None or (isinstance(var['folat'],list) and \
isinstance(var['folat'][0],(int,float)) and isinstance(var['folat'][1],(int,float)))), \
'error, folat must None or a list of 2 numbers'
# Check the hemisphere and get the appropriate folat
folat = [ math.fabs( folat[0] ) * hemi, math.fabs( folat[1] ) * hemi ]
# Check if the endTime is given in which case the user wants a specific time interval to search for
# If not we'll give him the best available passes for the selected start time...
if ( endTime != None ) :
timeRange = numpy.array( [ startTime, endTime ] )
else :
timeRange = None
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
# check if the timeRange is set... if not set the timeRange to +/- pltTimeInterval of the startTime
if timeRange == None:
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
# SatNums - currently operational POES satellites are 15, 16, 17, 18, 19
if satNum == None:
satNum = [None]
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
goodFlg=False
for sN in range(len(satNum)) :
if(satNum[sN] != None):
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat)
else:
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = satNum[sN], folat = folat)
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
#return None
else:
goodFlg=True
# Loop through the list and store the data into arrays
lenDataAll.append(len(currPoesList))
for l in currPoesList :
# Store our data in arrays
try:
tedPoesAll[sN].append(math.log10(getattr(l,param)))
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(l.folat,l.folon, 0., l.time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(l.time),lon)*360./24.)
else:
latPoesAll[sN].append(l.folat)
lonPoesAll[sN].append(l.folon)
timePoesAll[sN].append(l.time)
except Exception,e:
print e
print 'could not get parameter for time',l.time
if(not goodFlg): return None
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
poesTicks = [ -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5 ]
# get the axis of the figure...
ax = axisHandle
for nn in range( len(satNum) ) :
x, y = baseMapObj(lonPoesAll[nn], latPoesAll[nn])
bpltpoes = baseMapObj.scatter(x,y,c=tedPoesAll[nn], vmin=scMin, vmax=scMax, alpha = 0.7, cmap=cm.jet, zorder = 7., edgecolor='none')
timeCurr = timePoesAll[nn]
for aa in range( len(latPoesAll[nn]) ) :
if aa % 10 == 0:
str_curr = str(timeCurr[aa].hour)+':'+str(timeCurr[aa].minute)
ax.annotate( str_curr, xy =( x[aa], y[aa] ), size = 5, zorder = 6. )
#cbar = plt.colorbar(bpltpoes, ticks = poesTicks, orientation='horizontal')
#cbar.ax.set_xticklabels(poesTicks)
#cbar.set_label(r"Total Log Energy Flux [ergs cm$^{-2}$ s$^{-1}$]")
return bpltpoes
def overlayPoesBnd( baseMapObj, axisHandle, startTime, coords = 'geo', hemi = 1, equBnd = True, polBnd = False ) :
"""This function reads POES TED data with in +/- 45min of the given time, fits the auroral oval boundaries and overlays them on a map object. The poleward boundary is not accurate all the times due to lesser number of satellite passes identifying it.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**coords**] (list or None): Coordinates of the map object on which you want data to be overlayed on. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
* [**equBnd**] (list or None): If this is True the equatorward auroral oval boundary fit from the TED data is overlayed on the map object. Default True
* [**polBnd**] (list or None): If this is True the poleward auroral oval boundary fit from the TED data is overlayed on the map object. Default False
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import matplotlib.cm as cm
from scipy import optimize
import models
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
# Check the hemisphere and get the appropriate folat
folat = [ 45. * hemi, 90. * hemi ]
# Get the time range we choose +/- 45 minutes....
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
satNum = [ 15, 16, 17, 18, 19 ]
# We set the TED cut-off value to -0.75,
# From observed cases this appeared to do well...
# though fails sometimes especially during geomagnetically quiet times...
# However this is version 1.0 and there always is room for improvement
equBndCutoffVal = -0.75
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
for sN in range( len(satNum) ) :
currPoesList = Poes.readPoes( timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat )
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
# Loop through the list and store the data into arrays
lenDataAll.append( len( currPoesList ) )
for l in range( lenDataAll[-1] ) :
# Store our data in arrays if the TED data value is > than the cutoff value
try:
x = math.log10(currPoesList[l].ted)
except:
continue
if x > equBndCutoffVal:
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(currPoesList[l].folat,currPoesList[l].folon, 0., currPoesList[l].time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(currPoesList[l].time),lon)*360./24.)
else:
latPoesAll[sN].append(currPoesList[l].folat)
lonPoesAll[sN].append(currPoesList[l].folon)
# latPoesAll[sN].append( currPoesList[l].folat )
# lonPoesAll[sN].append( currPoesList[l].folon )
tedPoesAll[sN].append( math.log10(currPoesList[l].ted) )
timePoesAll[sN].append( currPoesList[l].time )
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
# Now to identify the boundaries...
# Also need to check if the boundary is equatorward or poleward..
# When satellite is moving from high-lat to low-lat decrease in flux would mean equatorward boundary
# When satellite is moving from low-lat to high-lat increase in flux would mean equatorward boundary
# that is what we are trying to check here
eqBndLats = []
eqBndLons = []
poBndLats = []
poBndLons = []
for n1 in range( len(satNum) ) :
currSatLats = latPoesAll[n1]
currSatLons = lonPoesAll[n1]
currSatTeds = tedPoesAll[n1]
testLatArrLtoh = []
testLonArrLtoh = []
testLatArrHtol = []
testLonArrHtol = []
testLatArrLtohP = []
testLonArrLtohP = []
testLatArrHtolP = []
testLonArrHtolP = []
for n2 in range( len(currSatLats)-1 ) :
#Check if the satellite is moving form low-lat to high-lat or otherwise
if ( math.fabs( currSatLats[n2] ) < math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrLtoh.append( currSatLats[n2] )
testLonArrLtoh.append( currSatLons[n2] )
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrLtohP.append( currSatLats[n2] )
testLonArrLtohP.append( currSatLons[n2] )
if ( math.fabs( currSatLats[n2] ) > math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrHtol.append( currSatLats[n2] )
testLonArrHtol.append( currSatLons[n2] )
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrHtolP.append( currSatLats[n2] )
testLonArrHtolP.append( currSatLons[n2] )
# I do this to find the index of the min lat...
if ( testLatArrLtoh != [] ) :
testLatArrLtoh = numpy.array( testLatArrLtoh )
testLonArrLtoh = numpy.array( testLonArrLtoh )
VarEqLat1 = testLatArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
VarEqLon1 = testLonArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
eqBndLats.append( VarEqLat1[0] )
eqBndLons.append( VarEqLon1[0] )
if ( testLatArrHtol != [] ) :
testLatArrHtol = numpy.array( testLatArrHtol )
testLonArrHtol = numpy.array( testLonArrHtol )
VarEqLat2 = testLatArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
VarEqLon2 = testLonArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
eqBndLats.append( VarEqLat2[0] )
eqBndLons.append( VarEqLon2[0] )
if ( testLatArrLtohP != [] ) :
testLatArrLtohP = numpy.array( testLatArrLtohP )
testLonArrLtohP = numpy.array( testLonArrLtohP )
VarEqLatP1 = testLatArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
VarEqLonP1 = testLonArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
if VarEqLatP1[0] > 64. :
poBndLats.append( VarEqLatP1[0] )
poBndLons.append( VarEqLonP1[0] )
if ( testLatArrHtolP != [] ) :
testLatArrHtolP = numpy.array( testLatArrHtolP )
testLonArrHtolP = numpy.array( testLonArrHtolP )
VarEqLatP2 = testLatArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
VarEqLonP2 = testLonArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
if VarEqLatP2[0] > 64 :
poBndLats.append( VarEqLatP2[0] )
poBndLons.append( VarEqLonP2[0] )
eqBndLats = numpy.array( eqBndLats )
eqBndLons = numpy.array( eqBndLons )
poBndLats = numpy.array( poBndLats )
poBndLons = numpy.array( poBndLons )
#get the axis Handle used
ax = axisHandle
# Now we do the fitting part...
fitfunc = lambda p, x: p[0] + p[1]*numpy.cos(2*math.pi*(x/360.)+p[2]) # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
# Initial guess for the parameters
# Equatorward boundary
p0Equ = [ 1., 1., 1.]
p1Equ, successEqu = optimize.leastsq(errfunc, p0Equ[:], args=(eqBndLons, eqBndLats))
if polBnd == True :
p0Pol = [ 1., 1., 1.]
p1Pol, successPol = optimize.leastsq(errfunc, p0Pol[:], args=(poBndLons, poBndLats))
allPlotLons = numpy.linspace(0., 360., 25.)
allPlotLons[-1] = 0.
eqPlotLats = []
if polBnd == True :
poPlotLats = []
for xx in allPlotLons :
if equBnd == True :
eqPlotLats.append( p1Equ[0] + p1Equ[1]*numpy.cos(2*math.pi*(xx/360.)+p1Equ[2] ) )
if polBnd == True :
poPlotLats.append( p1Pol[0] + p1Pol[1]*numpy.cos(2*math.pi*(xx/360.)+p1Pol[2] ) )
xEqu, yEqu = baseMapObj(allPlotLons, eqPlotLats)
bpltpoes = baseMapObj.plot( xEqu,yEqu, zorder = 7., color = 'b' )
if polBnd == True :
xPol, yPol = baseMapObj(allPlotLons, poPlotLats)
bpltpoes = baseMapObj.plot( xPol,yPol, zorder = 7., color = 'r' )
| Shirling-VT/davitpy_sam | davitpy/gme/sat/poes.py | Python | gpl-3.0 | 33,273 |
#!/usr/bin/env python
# encoding: UTF-8
"""
This file is part of Commix Project (http://commixproject.com).
Copyright (c) 2014-2017 Anastasios Stasinopoulos (@ancst).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
For more see the file 'readme/COPYING' for copying permission.
"""
import os
import sys
import time
import base64
import sqlite3
import urllib2
from src.utils import menu
from src.utils import settings
from src.thirdparty.colorama import Fore, Back, Style, init
"""
Session handler via SQLite3 db.
"""
no_such_table = False
"""
Generate table name for SQLite3 db.
"""
def table_name(url):
host = url.split('//', 1)[1].split('/', 1)[0]
table_name = "session_" + host.replace(".","_").replace(":","_").replace("-","_")
return table_name
"""
Flush session.
"""
def flush(url):
info_msg = "Flushing the stored session from the session file... "
sys.stdout.write(settings.print_info_msg(info_msg))
sys.stdout.flush()
try:
conn = sqlite3.connect(settings.SESSION_FILE)
tables = list(conn.execute("SELECT name FROM sqlite_master WHERE type is 'table'"))
conn.executescript(';'.join(["DROP TABLE IF EXISTS %s" %i for i in tables]))
conn.commit()
conn.close()
print "[ " + Fore.GREEN + "SUCCEED" + Style.RESET_ALL + " ]"
except sqlite3.OperationalError, err_msg:
print "[ " + Fore.RED + "FAILED" + Style.RESET_ALL + " ]"
err_msg = "Unable to flush the session file." + str(err_msg).title()
print settings.print_critical_msg(err_msg)
"""
Clear injection point records
except latest for every technique.
"""
def clear(url):
try:
if no_such_table:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("DELETE FROM " + table_name(url) + "_ip WHERE "\
"id NOT IN (SELECT MAX(id) FROM " + \
table_name(url) + "_ip GROUP BY technique);")
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except:
settings.LOAD_SESSION = False
return False
"""
Import successful injection points to session file.
"""
def injection_point_importation(url, technique, injection_type, separator, shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_request_method, url_time_response, timesec, how_long, output_length, is_vulnerable):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_ip" + \
"(id INTEGER PRIMARY KEY, url VARCHAR, technique VARCHAR, injection_type VARCHAR, separator VARCHAR," \
"shell VARCHAR, vuln_parameter VARCHAR, prefix VARCHAR, suffix VARCHAR, "\
"TAG VARCHAR, alter_shell VARCHAR, payload VARCHAR, http_header VARCHAR, http_request_method VARCHAR, url_time_response INTEGER, "\
"timesec INTEGER, how_long INTEGER, output_length INTEGER, is_vulnerable VARCHAR);")
conn.execute("INSERT INTO " + table_name(url) + "_ip(url, technique, injection_type, separator, "\
"shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_header, http_request_method, "\
"url_time_response, timesec, how_long, output_length, is_vulnerable) "\
"VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", \
(str(url), str(technique), str(injection_type), \
str(separator), str(shell), str(vuln_parameter), str(prefix), str(suffix), \
str(TAG), str(alter_shell), str(payload), str(settings.HTTP_HEADER), str(http_request_method), \
int(url_time_response), int(timesec), int(how_long), \
int(output_length), str(is_vulnerable)))
conn.commit()
conn.close()
if settings.INJECTION_CHECKER == False:
settings.INJECTION_CHECKER = True
except sqlite3.OperationalError, err_msg:
err_msg = str(err_msg)[:1].upper() + str(err_msg)[1:] + "."
err_msg += " You are advised to rerun with switch '--flush-session'."
print settings.print_critical_msg(err_msg)
sys.exit(0)
except sqlite3.DatabaseError, err_msg:
err_msg = "An error occurred while accessing session file ('"
err_msg += settings.SESSION_FILE + "'). "
err_msg += "If the problem persists use the '--flush-session' option."
print "\n" + settings.print_critical_msg(err_msg)
sys.exit(0)
"""
Export successful applied techniques from session file.
"""
def applied_techniques(url, http_request_method):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
applied_techniques = conn.execute("SELECT technique FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC ;")
else:
applied_techniques = conn.execute("SELECT technique FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.INJECT_TAG + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC ;")
values = []
for session in applied_techniques:
if "tempfile" in session[0][:8]:
settings.TEMPFILE_BASED_STATE = True
session = session[0][4:]
elif "dynamic" in session[0][:7]:
settings.EVAL_BASED_STATE = True
session = session[0][13:]
values += session[0][:1]
applied_techniques = ''.join(list(set(values)))
return applied_techniques
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Export successful applied techniques from session file.
"""
def applied_levels(url, http_request_method):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
applied_level = conn.execute("SELECT is_vulnerable FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC;")
else:
applied_level = conn.execute("SELECT is_vulnerable FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.INJECT_TAG + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC;")
for session in applied_level:
return session[0]
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Export successful injection points from session file.
"""
def injection_point_exportation(url, http_request_method):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
result = conn.execute("SELECT * FROM sqlite_master WHERE name = '" + \
table_name(url) + "_ip' AND type = 'table';")
if result:
if menu.options.tech[:1] == "c":
select_injection_type = "R"
elif menu.options.tech[:1] == "e":
settings.EVAL_BASED_STATE = True
select_injection_type = "R"
elif menu.options.tech[:1] == "t":
select_injection_type = "B"
else:
select_injection_type = "S"
if settings.TEMPFILE_BASED_STATE and select_injection_type == "S":
check_injection_technique = "t"
elif settings.EVAL_BASED_STATE and select_injection_type == "R":
check_injection_technique = "d"
else:
check_injection_technique = menu.options.tech[:1]
if settings.TESTABLE_PARAMETER:
cursor = conn.execute("SELECT * FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"injection_type like '" + select_injection_type + "%' AND "\
"technique like '" + check_injection_technique + "%' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC limit 1;")
else:
cursor = conn.execute("SELECT * FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"injection_type like '" + select_injection_type + "%' AND "\
"technique like '" + check_injection_technique + "%' AND "\
"http_header = '" + settings.HTTP_HEADER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC limit 1;")
for session in cursor:
url = session[1]
technique = session[2]
injection_type = session[3]
separator = session[4]
shell = session[5]
vuln_parameter = session[6]
prefix = session[7]
suffix = session[8]
TAG = session[9]
alter_shell = session[10]
payload = session[11]
http_request_method = session[13]
url_time_response = session[14]
timesec = session[15]
how_long = session[16]
output_length = session[17]
is_vulnerable = session[18]
return url, technique, injection_type, separator, shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_request_method, url_time_response, timesec, how_long, output_length, is_vulnerable
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Notification about session.
"""
def notification(url, technique, injection_type):
try:
if settings.LOAD_SESSION == True:
success_msg = "A previously stored session has been held against that host."
print settings.print_success_msg(success_msg)
while True:
if not menu.options.batch:
question_msg = "Do you want to resume to the "
question_msg += "(" + injection_type.split(" ")[0] + ") "
question_msg += technique.rsplit(' ', 2)[0]
question_msg += " injection point? [Y/n] > "
sys.stdout.write(settings.print_question_msg(question_msg))
settings.LOAD_SESSION = sys.stdin.readline().replace("\n","").lower()
else:
settings.LOAD_SESSION = ""
if len(settings.LOAD_SESSION) == 0:
settings.LOAD_SESSION = "y"
if settings.LOAD_SESSION in settings.CHOICE_YES:
return True
elif settings.LOAD_SESSION in settings.CHOICE_NO:
settings.LOAD_SESSION = False
if technique[:1] != "c":
while True:
question_msg = "Which technique do you want to re-evaluate? [(C)urrent/(a)ll/(n)one] > "
sys.stdout.write(settings.print_question_msg(question_msg))
proceed_option = sys.stdin.readline().replace("\n","").lower()
if len(proceed_option) == 0:
proceed_option = "c"
if proceed_option.lower() in settings.CHOICE_PROCEED :
if proceed_option.lower() == "a":
settings.RETEST = True
break
elif proceed_option.lower() == "c" :
settings.RETEST = False
break
elif proceed_option.lower() == "n":
raise SystemExit()
else:
pass
else:
err_msg = "'" + proceed_option + "' is not a valid answer."
print settings.print_error_msg(err_msg)
pass
if settings.SESSION_APPLIED_TECHNIQUES:
menu.options.tech = ''.join(settings.AVAILABLE_TECHNIQUES)
return False
elif settings.LOAD_SESSION in settings.CHOICE_QUIT:
raise SystemExit()
else:
err_msg = "'" + settings.LOAD_SESSION + "' is not a valid answer."
print settings.print_error_msg(err_msg)
pass
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
"""
Check for specific stored parameter.
"""
def check_stored_parameter(url, http_request_method):
if injection_point_exportation(url, http_request_method):
if injection_point_exportation(url, http_request_method)[16] == str(menu.options.level):
# Check for stored alternative shell
if injection_point_exportation(url, http_request_method)[9] != "":
menu.options.alter_shell = injection_point_exportation(url, http_request_method)[9]
return True
else:
return False
else:
return False
"""
Import successful command execution outputs to session file.
"""
def store_cmd(url, cmd, shell, vuln_parameter):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_ir" + \
"(cmd VARCHAR, output VARCHAR, vuln_parameter VARCHAR);")
if settings.TESTABLE_PARAMETER:
conn.execute("INSERT INTO " + table_name(url) + "_ir(cmd, output, vuln_parameter) "\
"VALUES(?,?,?)", \
(str(base64.b64encode(cmd)), str(base64.b64encode(shell)), str(vuln_parameter)))
else:
conn.execute("INSERT INTO " + table_name(url) + "_ir(cmd, output, vuln_parameter) "\
"VALUES(?,?,?)", \
(str(base64.b64encode(cmd)), str(base64.b64encode(shell)), str(settings.HTTP_HEADER)))
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except TypeError, err_msg:
pass
"""
Export successful command execution outputs from session file.
"""
def export_stored_cmd(url, cmd, vuln_parameter):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
output = None
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
cursor = conn.execute("SELECT output FROM " + table_name(url) + \
"_ir WHERE cmd='" + base64.b64encode(cmd) + "' AND "\
"vuln_parameter= '" + vuln_parameter + "';").fetchall()
else:
cursor = conn.execute("SELECT output FROM " + table_name(url) + \
"_ir WHERE cmd='" + base64.b64encode(cmd) + "' AND "\
"vuln_parameter= '" + settings.HTTP_HEADER + "';").fetchall()
conn.commit()
conn.close()
for session in cursor:
output = base64.b64decode(session[0])
return output
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
pass
"""
Import valid credentials to session file.
"""
def import_valid_credentials(url, authentication_type, admin_panel, username, password):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_creds" + \
"(id INTEGER PRIMARY KEY, url VARCHAR, authentication_type VARCHAR, admin_panel VARCHAR, "\
"username VARCHAR, password VARCHAR);")
conn.execute("INSERT INTO " + table_name(url) + "_creds(url, authentication_type, "\
"admin_panel, username, password) VALUES(?,?,?,?,?)", \
(str(url), str(authentication_type), str(admin_panel), \
str(username), str(password)))
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except sqlite3.DatabaseError, err_msg:
err_msg = "An error occurred while accessing session file ('"
err_msg += settings.SESSION_FILE + "'). "
err_msg += "If the problem persists use the '--flush-session' option."
print "\n" + settings.print_critical_msg(err_msg)
sys.exit(0)
"""
Export valid credentials from session file.
"""
def export_valid_credentials(url, authentication_type):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
output = None
conn = sqlite3.connect(settings.SESSION_FILE)
cursor = conn.execute("SELECT username, password FROM " + table_name(url) + \
"_creds WHERE url='" + url + "' AND "\
"authentication_type= '" + authentication_type + "';").fetchall()
cursor = ":".join(cursor[0])
return cursor
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
pass
# eof | hackersql/sq1map | comm1x/src/utils/session_handler.py | Python | gpl-3.0 | 17,851 |
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.props import *
from PyHSPlasma import *
from .base import PlasmaModifierProperties
from ..prop_world import game_versions
from ...exporter import ExportError
from ... import idprops
class PlasmaVersionedNodeTree(idprops.IDPropMixin, bpy.types.PropertyGroup):
name = StringProperty(name="Name")
version = EnumProperty(name="Version",
description="Plasma versions this node tree exports under",
items=game_versions,
options={"ENUM_FLAG"},
default=set(list(zip(*game_versions))[0]))
node_tree = PointerProperty(name="Node Tree",
description="Node Tree to export",
type=bpy.types.NodeTree)
node_name = StringProperty(name="Node Ref",
description="Attach a reference to this node")
@classmethod
def _idprop_mapping(cls):
return {"node_tree": "node_tree_name"}
def _idprop_sources(self):
return {"node_tree_name": bpy.data.node_groups}
class PlasmaAdvancedLogic(PlasmaModifierProperties):
pl_id = "advanced_logic"
bl_category = "Logic"
bl_label = "Advanced"
bl_description = "Plasma Logic Nodes"
bl_icon = "NODETREE"
logic_groups = CollectionProperty(type=PlasmaVersionedNodeTree)
active_group_index = IntProperty(options={"HIDDEN"})
def export(self, exporter, bo, so):
version = exporter.mgr.getVer()
for i in self.logic_groups:
our_versions = [globals()[j] for j in i.version]
if version in our_versions:
if i.node_tree is None:
raise ExportError("'{}': Advanced Logic is missing a node tree for '{}'".format(bo.name, i.version))
# If node_name is defined, then we're only adding a reference. We will make sure that
# the entire node tree is exported once before the post_export step, however.
if i.node_name:
exporter.want_node_trees[i.node_tree.name] = (bo, so)
node = i.node_tree.nodes.get(i.node_name, None)
if node is None:
raise ExportError("Node '{}' does not exist in '{}'".format(i.node_name, i.node_tree.name))
# We are going to assume get_key will do the adding correctly. Single modifiers
# should fetch the appropriate SceneObject before doing anything, so this will
# be a no-op in that case. Multi modifiers should accept any SceneObject, however
node.get_key(exporter, so)
else:
exporter.node_trees_exported.add(i.node_tree.name)
i.node_tree.export(exporter, bo, so)
def harvest_actors(self):
actors = set()
for i in self.logic_groups:
actors.update(i.node_tree.harvest_actors())
return actors
class PlasmaSpawnPoint(PlasmaModifierProperties):
pl_id = "spawnpoint"
bl_category = "Logic"
bl_label = "Spawn Point"
bl_description = "Point at which avatars link into the Age"
def export(self, exporter, bo, so):
# Not much to this modifier... It's basically a flag that tells the engine, "hey, this is a
# place the avatar can show up." Nice to have a simple one to get started with.
spawn = exporter.mgr.add_object(pl=plSpawnModifier, so=so, name=self.key_name)
@property
def requires_actor(self):
return True
class PlasmaMaintainersMarker(PlasmaModifierProperties):
pl_id = "maintainersmarker"
bl_category = "Logic"
bl_label = "Maintainer's Marker"
bl_description = "Designates an object as the D'ni coordinate origin point of the Age."
bl_icon = "OUTLINER_DATA_EMPTY"
calibration = EnumProperty(name="Calibration",
description="State of repair for the Marker",
items=[
("kBroken", "Broken",
"A marker which reports scrambled coordinates to the KI."),
("kRepaired", "Repaired",
"A marker which reports blank coordinates to the KI."),
("kCalibrated", "Calibrated",
"A marker which reports accurate coordinates to the KI.")
])
def export(self, exporter, bo, so):
maintmark = exporter.mgr.add_object(pl=plMaintainersMarkerModifier, so=so, name=self.key_name)
maintmark.calibration = getattr(plMaintainersMarkerModifier, self.calibration)
@property
def requires_actor(self):
return True
| dpogue/korman | korman/properties/modifiers/logic.py | Python | gpl-3.0 | 5,501 |
import os.path
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth import get_user_model
from avatar.settings import AVATAR_DEFAULT_URL, AVATAR_MAX_AVATARS_PER_USER
from avatar.util import get_primary_avatar
from avatar.models import Avatar
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
def upload_helper(o, filename):
f = open(os.path.join(o.testdatapath, filename), "rb")
response = o.client.post(reverse('avatar_add'), {
'avatar': f,
}, follow=True)
f.close()
return response
class AvatarUploadTests(TestCase):
def setUp(self):
self.testdatapath = os.path.join(os.path.dirname(__file__), "testdata")
self.user = get_user_model().objects.create_user('test', '[email protected]', 'testpassword')
self.user.save()
self.client.login(username='test', password='testpassword')
Image.init()
def testNonImageUpload(self):
response = upload_helper(self, "nonimagefile")
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testNormalImageUpload(self):
response = upload_helper(self, "test.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
self.failUnlessEqual(response.context['upload_avatar_form'].errors, {})
avatar = get_primary_avatar(self.user)
self.failIfEqual(avatar, None)
def testImageWithoutExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithoutext")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageWithWrongExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithwrongext.ogg")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageTooBig(self):
# use with AVATAR_MAX_SIZE = 1024 * 1024
response = upload_helper(self, "testbig.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testDefaultUrl(self):
response = self.client.get(reverse('avatar_render_primary', kwargs={
'user': self.user.username,
'size': 80,
}))
loc = response['Location']
base_url = getattr(settings, 'STATIC_URL', None)
if not base_url:
base_url = settings.MEDIA_URL
self.assertTrue(base_url in loc)
self.assertTrue(loc.endswith(AVATAR_DEFAULT_URL))
def testNonExistingUser(self):
a = get_primary_avatar("nonexistinguser")
self.failUnlessEqual(a, None)
def testThereCanBeOnlyOnePrimaryAvatar(self):
for i in range(1, 10):
self.testNormalImageUpload()
count = Avatar.objects.filter(user=self.user, primary=True).count()
self.failUnlessEqual(count, 1)
def testDeleteAvatar(self):
self.testNormalImageUpload()
avatar = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(len(avatar), 1)
response = self.client.post(reverse('avatar_delete'), {
'choices': [avatar[0].id],
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
count = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(count, 0)
def testDeletePrimaryAvatarAndNewPrimary(self):
self.testThereCanBeOnlyOnePrimaryAvatar()
primary = get_primary_avatar(self.user)
oid = primary.id
response = self.client.post(reverse('avatar_delete'), {
'choices': [oid],
})
primaries = Avatar.objects.filter(user=self.user, primary=True)
self.failUnlessEqual(len(primaries), 1)
self.failIfEqual(oid, primaries[0].id)
avatars = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(avatars[0].id, primaries[0].id)
def testTooManyAvatars(self):
for i in range(0, AVATAR_MAX_AVATARS_PER_USER):
self.testNormalImageUpload()
count_before = Avatar.objects.filter(user=self.user).count()
response = upload_helper(self, "test.png")
count_after = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
self.failUnlessEqual(count_before, count_after)
# def testAvatarOrder
# def testReplaceAvatarWhenMaxIsOne
# def testHashFileName
# def testHashUserName
# def testChangePrimaryAvatar
# def testDeleteThumbnailAndRecreation
# def testAutomaticThumbnailCreation
| bhermansyah/DRR-datacenter | avatar/tests.py | Python | gpl-3.0 | 5,578 |
#!/usr/bin/env python3
"""Output a CSV file that can be imported to Petra"""
import os
import sys
import calendar
import csv
from csv_dict import CSVDict, CSVKeyMissing
def split_csv(table_file='Tabell.csv'):
"""Split account, cost center and project into three tables"""
account = []
cost_center = []
project = []
with open(table_file, newline='') as tablefile:
tablereader = csv.reader(tablefile, delimiter=';')
for row in tablereader:
if row[0] != '' and row[1] != '':
account.append([row[0], row[1]])
if row[3] != '' and row[4] != '':
cost_center.append([row[3], row[4]])
if row[6] != '' and row[7] != '':
project.append([row[6], row[7]])
with open('Konto.csv', 'w', newline='') as accountfile:
accountwriter = csv.writer(accountfile, delimiter=';')
for row in account:
accountwriter.writerow(row)
with open('Costcenter.csv', 'w', newline='') as ccfile:
ccwriter = csv.writer(ccfile, delimiter=';')
for row in cost_center:
ccwriter.writerow(row)
with open('Projekt.csv', 'w', newline='') as projectfile:
projectwriter = csv.writer(projectfile, delimiter=';')
for row in project:
projectwriter.writerow(row)
def _parse_trans_objects(trans):
"""
Handle an object list of a transaction.
The object list contains a cost center and project, formatted like so
['1', 'K0000', '6', 'P-00000000'].
Cost center (resultatenhet) is preceeded by a '1' and project by a '6', but the order
of the two could be reversed. Cost center always begins with 'K' and
project with 'P-'. The object list could also be empty.
Returns a tuple (cost_center, project), where any of the two could be
None in case the information is missing from the object list.
"""
cost_center = project = None
trans_it = iter(trans)
for idx in trans_it:
obj = next(trans_it)
if idx == '1' and obj.startswith('K'):
cost_center = obj
elif idx == '6' and obj.startswith('P-'):
project = obj
return (cost_center, project)
class PetraOutput:
"""Form an output file based on an SieData object and translation table"""
def __init__(self, sie_data, account_file, cost_center_file, project_file,
default_petra_cc='3200'):
self.sie_data = sie_data
self.default_petra_cc = default_petra_cc
# self.parse_tables(account_file, cost_center_file, project_file)
self.account = CSVDict(account_file)
self.cost_center = CSVDict(cost_center_file)
self.project = CSVDict(project_file)
self.table = []
self.ver_month = None
def populate_output_table(self):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
# pylint: disable=invalid-name
"""Extract interesting informatin from the Sie data and form output"""
header = ['', 'CC', 'Account', 'Narrative', 'Reference', 'Date', 'Dt',
'Ct']
self.table.append(header)
program = self.sie_data.get_data('#PROGRAM')[0].data[0].split()[0]
verifications = self.sie_data.get_data('#VER')
ver_date = next(v.verdatum for v in verifications if v.verdatum.has_date)
self.ver_month = ver_date.format("%Y-%m")
description = "Imported from {} {}".format(program, self.ver_month)
checksum = format(sum(ver.sum_debit() for ver in verifications),
'.2f').rstrip('0').rstrip('.').replace('.',',')
day = calendar.monthrange(ver_date.year, ver_date.month)[1]
last_date_month = "{}/{:02}/{}".format(day, ver_date.month, ver_date.year)
self.table.append(['B', description, checksum, last_date_month, '', '', '',
''])
for ver in verifications:
if not ver.in_balance():
raise Exception('Inte i balans:', ver)
"""
# Contains 'Swetzén'
if ver.serie == 'A' and ver.vernr == '170071':
print(ver)
# Contains stange characters
if ver.serie == 'C' and ver.vernr == '170058':
print(ver)
# CC with 'XXXX'
if ver.serie == 'C' and ver.vernr == '170064':
print(ver)
# Rounding error?
if ver.serie == 'C' and ver.vernr == '170067':
print(ver)
"""
ref = "Visma Ver {}{}".format(ver.serie, ver.vernr)
text = "{} - {}".format(ref, ver.vertext)
date = ver.verdatum.format("%d/%m/%Y")
self.table.append(['J', text, 'GL', 'STD', 'SEK', '1', date, ''])
narr = ver.vertext # Default
for trans in ver.trans_list:
(visma_cc, visma_proj) = _parse_trans_objects(trans.objekt)
if not visma_proj or visma_proj == 'P-32000000': # Use visma_cc instead
if not visma_cc: # Use default
cc = self.default_petra_cc
else:
cc = self.cost_center[str(visma_cc)]['P_CC']
else:
cc = self.project[str(visma_proj)]['P_CC']
acct = self.account[str(trans.kontonr)]['P_Acct']
if trans.transtext and trans.kvantitet:
kvantitet = format(trans.kvantitet,
'.2f').rstrip('0').rstrip('.').replace('.',',')
narr = "{} {}".format(trans.transtext, kvantitet)
elif trans.transtext:
narr = trans.transtext
dt = trans.debit
ct = trans.credit
self.table.append(['T', cc, acct, narr, ref, date, dt, ct])
def print_output(self):
"""Print csv output to stdout"""
print("\n".join(','.join(str(r) for r in row) for row in self.table))
def write_output(self, filename=None, overwrite=False):
"""Write csv to file, abort if it already exists"""
writemode = 'w' if overwrite else 'x'
try:
for encoding in ['utf_8']:
if not filename:
filename = 'CSV/PYTHON/VtP_' + self.ver_month + encoding + '.csv'
try:
with open(filename, writemode, newline='', encoding=encoding) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';')
csvwriter.writerows(self.table)
# print("Encoding with ", encoding, "successful!")
except UnicodeEncodeError as err:
print("Encoding failed: ", err)
os.remove(filename)
except FileExistsError:
sys.exit("Kan inte skriva " + filename + ", filen finns redan.")
| jswetzen/sie-parse | petra_output.py | Python | gpl-3.0 | 6,907 |
# -*- coding: utf-8 -*-
"""
José Vicente Pérez
Granada University (Spain)
March, 2017
Testing suite for profiler.py
Last modified: 19 June 2017
"""
import time
import profiler as p
import praster as pr
import numpy as np
import matplotlib.pyplot as plt
print("Tests for TProfiler methods")
def test01():
"""
Creates a TProfiler from an array with profile_data
Test for get_x, get_y
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para TProfiler")
print("Testing functions get_x(), get_y()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Test 01 get and print x and y arrays
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
xi1 = perfil.get_x(True)
yi1 = perfil.get_y(True)
xi2 = perfil.get_x(False)
yi2 = perfil.get_y(False)
ax1.plot(xi1, yi1)
ax2.plot(xi2, yi2)
ax1.set_title("head = True")
ax2.set_title("head = False")
fig.tight_layout()
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test02():
"""
Creates a TProfiler from an array with profile_data
Test for get_l, get_z
"""
inicio = time.time()
print("=" * 40)
print("Test 02 para TProfiler")
print("Testing functions get_l(), get_z()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Test 01 get and print x and y arrays
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
li1 = perfil.get_l(True)
zi1 = perfil.get_z(True)
ax1.plot(li1, zi1)
ax1.set_title("head = True")
li2 = perfil.get_l(False)
zi2 = perfil.get_z(False)
ax2.plot(li2, zi2)
ax2.set_title("head = False")
zi3 = perfil.get_z(True, True)
ax3.plot(li1, zi3)
ax3.set_title("Relative elevations, head = True")
zi4 = perfil.get_z(False, True)
ax4.plot(li2, zi4)
ax4.set_title("Relative elevations, head = False")
fig.tight_layout()
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test03():
"""
Creates a TProfiler from an array with profile_data
Test for raw_elevations and smooth
"""
inicio = time.time()
print("=" * 40)
print("Test 03 para TProfiler")
print("Testing functions smooth() and get_raw_z()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Print raw elevations vs peaks removed elevations
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
li = perfil.get_l(True)
zi = perfil.get_z(True)
raw_zi = perfil.get_raw_z(True)
ax1.plot(li, zi, label="Peaks removed")
ax1.plot(li, raw_zi, label="Raw elevations")
ax1.set_title("Raw elevations vs peak removed")
ax1.legend()
ax1.set_xlim((6850, 8950))
ax1.set_ylim((950, 1050))
# Test for smooth function
distance = 0
for n in range(5):
li = perfil.get_l(True)
zi = perfil.get_z(True)
perfil.smooth(distance)
ax2.plot(li, zi, label=str(distance) + " m")
distance += 50
ax2.set_title("Smooth with different distances")
ax2.legend()
ax2.set_xlim((8000, 9000))
ax2.set_ylim((950, 1000))
fig.tight_layout()
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test04():
"""
Creates a TProfiler from an array with profile_data
Test for get_area and get_slopes
"""
inicio = time.time()
print("=" * 40)
print("Test 04 para TProfiler")
print("Testing functions get_area() and get_slopes()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Get slope area and plot in log scale
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
for ax in (ax1, ax2, ax3, ax4):
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlim((1000000, 100000000))
ax.set_ylim((0.001, 1))
ai = perfil.get_area(True)
s1 = perfil.get_slope()
ax1.plot(ai, s1, "b+")
ax1.set_title("Raw slopes (all)")
s2 = perfil.get_slope(threshold=0.9)
ax2.plot(ai, s2, "b+")
ax2.set_title("Slopes with threshold >= 0.9")
s3, lq3 = perfil.get_slope(threshold=0.9, lq=True)
ax3.plot(ai, lq3, "r+")
ax3.plot(ai, s3, "b+")
ax3.set_title("Slopes and low quality slopes (threshold 0.9)")
s4, lq4 = perfil.get_slope(threshold=0.9, lq=True, head=True)
a2 = perfil.get_area(head=True)
ax4.plot(a2, lq4, "r+")
ax4.plot(a2, s4, "b+")
ax4.set_title("Example 3 with head=True")
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test05():
"""
Creates a TProfiler from an array with profile_data
Test for calculate slopes
"""
inicio = time.time()
print("=" * 40)
print("Test 05 para TProfiler")
print("Testing functions calculate slopes")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
reg_points = 4
# Get slope area and plot in log scale
fig = plt.figure(figsize=(12, 6))
for n in range(1, 9, 2):
ax1 = fig.add_subplot(4, 2, n)
ax2 = fig.add_subplot(4, 2, n+1)
perfil.calculate_slope(reg_points)
si = perfil.get_slope()
ai = perfil.get_area()
ax1.plot(ai, si, "b+")
ax1.set_xscale("log")
ax1.set_yscale("log")
ax1.set_xlim((1000000, 100000000))
ax1.set_ylim((0.001, 1))
ax1.set_title("reg_points = " + str(reg_points) + " (normal elevations)")
perfil.calculate_slope(reg_points, True)
si = perfil.get_slope(0.9)
ax2.plot(ai, si, "b+")
ax2.set_xscale("log")
ax2.set_yscale("log")
ax2.set_xlim((1000000, 100000000))
ax2.set_ylim((0.001, 1))
ax2.set_title("reg_points = " + str(reg_points) + " (raw elevations)")
reg_points += 4
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test06():
"""
Creates a TProfiler from an array with profile_data
Test for calculate_chi() and get_chi()
"""
inicio = time.time()
print("=" * 40)
print("Test 06 para TProfiler")
print("Testing functions get_chi() and calculate_chi()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Get slope area and plot in log scale
fig = plt.figure()
theta = 0.35
for n in range(1, 10):
ax = fig.add_subplot(3, 3, n)
perfil.thetaref = theta
perfil.calculate_chi()
chi = perfil.get_chi(False, True)
zi = perfil.get_z(False, True)
ax.plot(chi, zi)
ax.set_title("Thetaref = {0:.2f}".format(theta))
theta += 0.05
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test07():
"""
Creates a TProfiler from an array with profile_data
Test for get_ksn()
"""
inicio = time.time()
print("=" * 40)
print("Test 07 para TProfiler")
print("Testing function get_ksn()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Get slope area and plot in log scale
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
li = perfil.get_l(True)
ksn1 = perfil.get_ksn()
ax1.plot(li, ksn1, "b+")
ax1.set_title("Raw ksn (all)")
ksn2 = perfil.get_ksn(threshold=0.9)
ax2.plot(li, ksn2, "b+")
ax2.set_title("Ksn with threshold >= 0.9")
ksn3, lq3 = perfil.get_ksn(threshold=0.9, lq=True)
ax3.plot(li, lq3, "r+")
ax3.plot(li, ksn3, "b+")
ax3.set_title("Ksn and low quality ksn (threshold 0.9)")
ksn4, lq4 = perfil.get_ksn(threshold=0.9, lq=True, head=False)
l2 = perfil.get_l(head=False)
ax4.plot(l2, lq4, "r+")
ax4.plot(l2, ksn4, "b+")
ax4.set_title("Example 3 with head=False")
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test08():
"""
Creates a TProfiler from an array with profile_data
Test for calculate_ksn
"""
inicio = time.time()
print("=" * 40)
print("Test 08 para TProfiler")
print("Testing functions calculate_ksn()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
reg_points = 4
fig = plt.figure(figsize=(12, 6))
for n in range(1, 9, 2):
ax1 = fig.add_subplot(4, 2, n)
ax2 = fig.add_subplot(4, 2, n + 1)
perfil.calculate_ksn(reg_points)
ksn = perfil.get_ksn()
li = perfil.get_l()
ax1.plot(li, ksn)
ax1.set_title("KSN with reg_points = " + str(reg_points) + " (normal elevations)")
perfil.calculate_ksn(reg_points, raw_z=True)
ksn = perfil.get_ksn()
ax2.plot(li, ksn)
ax2.set_title("KSN with reg_points = " + str(reg_points) + " (raw elevations)")
reg_points += 4
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test09():
"""
Creates a TProfiler from an array with profile_data
Test for calculate_ksn
"""
inicio = time.time()
print("=" * 40)
print("Test 09 para TProfiler")
print("Testing ksn and SL plots")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
reg_points = 12
fig = plt.figure()
ax = fig.add_subplot(111)
perfil.calculate_ksn(reg_points=reg_points)
perfil.calculate_slope(reg_points=reg_points)
li = perfil.get_l()
slope = perfil.get_slope()
ksn = perfil.get_ksn()
sl = slope * li
sl, = ax.plot(li, sl)
ax.set_ylabel("SL index")
ax.set_xlabel("Distance (m)")
twax = ax.twinx()
ksn, = twax.plot(li, ksn, color="r")
twax.set_ylabel("Ksn index")
twax.legend((sl, ksn), ("SL", "ksn"))
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
test01()
test02()
test03()
test04()
test05()
test06()
test07()
test08()
test09()
| geolovic/TProfiler | test/06_TProfiler_test.py | Python | gpl-3.0 | 12,892 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import binascii
from smtplib import SMTPException
from django.db import models
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.signals import user_logged_in
from django.db.models.signals import post_save, post_migrate
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group, User, Permission
from django.utils import translation as django_translation
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives, get_connection
from django.utils.translation import LANGUAGE_SESSION_KEY
from social.apps.django_app.default.models import UserSocialAuth
from weblate.lang.models import Language
from weblate.trans.site import get_site_url, get_site_domain
from weblate.accounts.avatar import get_user_display
from weblate.trans.util import report_error
from weblate.trans.signals import user_pre_delete
from weblate import VERSION
from weblate.logger import LOGGER
from weblate.appsettings import ANONYMOUS_USER_NAME, SITE_TITLE
def send_mails(mails):
"""Sends multiple mails in single connection."""
try:
connection = get_connection()
connection.send_messages(mails)
except SMTPException as error:
LOGGER.error('Failed to send email: %s', error)
report_error(error, sys.exc_info())
def get_author_name(user, email=True):
"""Returns formatted author name with email."""
# Get full name from database
full_name = user.first_name
# Use username if full name is empty
if full_name == '':
full_name = user.username
# Add email if we are asked for it
if not email:
return full_name
return '%s <%s>' % (full_name, user.email)
def notify_merge_failure(subproject, error, status):
'''
Notification on merge failure.
'''
subscriptions = Profile.objects.subscribed_merge_failure(
subproject.project,
)
users = set()
mails = []
for subscription in subscriptions:
mails.append(
subscription.notify_merge_failure(subproject, error, status)
)
users.add(subscription.user_id)
for owner in subproject.project.owners.all():
mails.append(
owner.profile.notify_merge_failure(
subproject, error, status
)
)
# Notify admins
mails.append(
get_notification_email(
'en',
'ADMINS',
'merge_failure',
subproject,
{
'subproject': subproject,
'status': status,
'error': error,
}
)
)
send_mails(mails)
def notify_new_string(translation):
'''
Notification on new string to translate.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_string(
translation.subproject.project, translation.language
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_string(translation)
)
send_mails(mails)
def notify_new_language(subproject, language, user):
'''
Notify subscribed users about new language requests
'''
mails = []
subscriptions = Profile.objects.subscribed_new_language(
subproject.project,
user
)
users = set()
for subscription in subscriptions:
mails.append(
subscription.notify_new_language(subproject, language, user)
)
users.add(subscription.user_id)
for owner in subproject.project.owners.all():
mails.append(
owner.profile.notify_new_language(
subproject, language, user
)
)
# Notify admins
mails.append(
get_notification_email(
'en',
'ADMINS',
'new_language',
subproject,
{
'language': language,
'user': user,
},
user=user,
)
)
send_mails(mails)
def notify_new_translation(unit, oldunit, user):
'''
Notify subscribed users about new translation
'''
mails = []
subscriptions = Profile.objects.subscribed_any_translation(
unit.translation.subproject.project,
unit.translation.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_any_translation(unit, oldunit)
)
send_mails(mails)
def notify_new_contributor(unit, user):
'''
Notify about new contributor.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_contributor(
unit.translation.subproject.project,
unit.translation.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_contributor(
unit.translation, user
)
)
send_mails(mails)
def notify_new_suggestion(unit, suggestion, user):
'''
Notify about new suggestion.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_suggestion(
unit.translation.subproject.project,
unit.translation.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_suggestion(
unit.translation,
suggestion,
unit
)
)
send_mails(mails)
def notify_new_comment(unit, comment, user, report_source_bugs):
'''
Notify about new comment.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_comment(
unit.translation.subproject.project,
comment.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_comment(unit, comment, user)
)
# Notify upstream
if comment.language is None and report_source_bugs != '':
send_notification_email(
'en',
report_source_bugs,
'new_comment',
unit.translation,
{
'unit': unit,
'comment': comment,
'subproject': unit.translation.subproject,
},
user=user,
)
send_mails(mails)
def get_notification_email(language, email, notification,
translation_obj=None, context=None, headers=None,
user=None, info=None):
'''
Renders notification email.
'''
cur_language = django_translation.get_language()
context = context or {}
headers = headers or {}
references = None
if 'unit' in context:
unit = context['unit']
references = '{0}/{1}/{2}/{3}'.format(
unit.translation.subproject.project.slug,
unit.translation.subproject.slug,
unit.translation.language.code,
unit.id
)
if references is not None:
references = '<{0}@{1}>'.format(references, get_site_domain())
headers['In-Reply-To'] = references
headers['References'] = references
try:
if info is None:
info = translation_obj.__unicode__()
LOGGER.info(
'sending notification %s on %s to %s',
notification,
info,
email
)
# Load user language
if language is not None:
django_translation.activate(language)
# Template name
context['subject_template'] = 'mail/{}_subject.txt'.format(
notification
)
# Adjust context
context['current_site_url'] = get_site_url()
if translation_obj is not None:
context['translation'] = translation_obj
context['translation_url'] = get_site_url(
translation_obj.get_absolute_url()
)
context['site_title'] = SITE_TITLE
# Render subject
subject = render_to_string(
context['subject_template'],
context
).strip()
# Render body
body = render_to_string(
'mail/{}.txt'.format(notification),
context
)
html_body = render_to_string(
'mail/{}.html'.format(notification),
context
)
# Define headers
headers['Auto-Submitted'] = 'auto-generated'
headers['X-AutoGenerated'] = 'yes'
headers['Precedence'] = 'bulk'
headers['X-Mailer'] = 'Weblate {}'.format(VERSION)
# Reply to header
if user is not None:
headers['Reply-To'] = user.email
# List of recipients
if email == 'ADMINS':
emails = [a[1] for a in settings.ADMINS]
else:
emails = [email]
# Create message
email = EmailMultiAlternatives(
settings.EMAIL_SUBJECT_PREFIX + subject,
body,
to=emails,
headers=headers,
)
email.attach_alternative(
html_body,
'text/html'
)
# Return the mail
return email
finally:
django_translation.activate(cur_language)
def send_notification_email(language, email, notification,
translation_obj=None, context=None, headers=None,
user=None, info=None):
'''
Renders and sends notification email.
'''
email = get_notification_email(
language, email, notification, translation_obj, context, headers,
user, info
)
send_mails([email])
class VerifiedEmail(models.Model):
'''
Storage for verified emails from auth backends.
'''
social = models.ForeignKey(UserSocialAuth)
email = models.EmailField(max_length=254)
def __unicode__(self):
return u'{0} - {1}'.format(
self.social.user.username,
self.email
)
class ProfileManager(models.Manager):
'''
Manager providing shortcuts for subscription queries.
'''
# pylint: disable=W0232
def subscribed_any_translation(self, project, language, user):
return self.filter(
subscribe_any_translation=True,
subscriptions=project,
languages=language
).exclude(
user=user
)
def subscribed_new_language(self, project, user):
return self.filter(
subscribe_new_language=True,
subscriptions=project,
).exclude(
user=user
)
def subscribed_new_string(self, project, language):
return self.filter(
subscribe_new_string=True,
subscriptions=project,
languages=language
)
def subscribed_new_suggestion(self, project, language, user):
ret = self.filter(
subscribe_new_suggestion=True,
subscriptions=project,
languages=language
)
# We don't want to filter out anonymous user
if user is not None and user.is_authenticated():
ret = ret.exclude(user=user)
return ret
def subscribed_new_contributor(self, project, language, user):
return self.filter(
subscribe_new_contributor=True,
subscriptions=project,
languages=language
).exclude(
user=user
)
def subscribed_new_comment(self, project, language, user):
ret = self.filter(
subscribe_new_comment=True,
subscriptions=project
).exclude(
user=user
)
# Source comments go to every subscriber
if language is not None:
ret = ret.filter(languages=language)
return ret
def subscribed_merge_failure(self, project):
return self.filter(subscribe_merge_failure=True, subscriptions=project)
class Profile(models.Model):
'''
User profiles storage.
'''
user = models.OneToOneField(User, unique=True, editable=False)
language = models.CharField(
verbose_name=_(u"Interface Language"),
max_length=10,
choices=settings.LANGUAGES
)
languages = models.ManyToManyField(
Language,
verbose_name=_('Translated languages'),
blank=True,
help_text=_('Choose languages to which you can translate.')
)
secondary_languages = models.ManyToManyField(
Language,
verbose_name=_('Secondary languages'),
related_name='secondary_profile_set',
blank=True,
)
suggested = models.IntegerField(default=0, db_index=True)
translated = models.IntegerField(default=0, db_index=True)
hide_completed = models.BooleanField(
verbose_name=_('Hide completed translations on dashboard'),
default=False
)
secondary_in_zen = models.BooleanField(
verbose_name=_('Show secondary translations in zen mode'),
default=True
)
hide_source_secondary = models.BooleanField(
verbose_name=_('Hide source if there is secondary language'),
default=False
)
subscriptions = models.ManyToManyField(
'trans.Project',
verbose_name=_('Subscribed projects'),
blank=True,
)
subscribe_any_translation = models.BooleanField(
verbose_name=_('Notification on any translation'),
default=False
)
subscribe_new_string = models.BooleanField(
verbose_name=_('Notification on new string to translate'),
default=False
)
subscribe_new_suggestion = models.BooleanField(
verbose_name=_('Notification on new suggestion'),
default=False
)
subscribe_new_contributor = models.BooleanField(
verbose_name=_('Notification on new contributor'),
default=False
)
subscribe_new_comment = models.BooleanField(
verbose_name=_('Notification on new comment'),
default=False
)
subscribe_merge_failure = models.BooleanField(
verbose_name=_('Notification on merge failure'),
default=False
)
subscribe_new_language = models.BooleanField(
verbose_name=_('Notification on new language request'),
default=False
)
SUBSCRIPTION_FIELDS = (
'subscribe_any_translation',
'subscribe_new_string',
'subscribe_new_suggestion',
'subscribe_new_contributor',
'subscribe_new_comment',
'subscribe_merge_failure',
'subscribe_new_language',
)
objects = ProfileManager()
def __unicode__(self):
return self.user.username
def get_user_display(self):
return get_user_display(self.user)
def get_user_display_link(self):
return get_user_display(self.user, True, True)
def get_user_name(self):
return get_user_display(self.user, False)
@models.permalink
def get_absolute_url(self):
return ('user_page', (), {
'user': self.user.username
})
@property
def last_change(self):
'''
Returns date of last change user has done in Weblate.
'''
try:
return self.user.change_set.all()[0].timestamp
except IndexError:
return None
def notify_user(self, notification, translation_obj,
context=None, headers=None, user=None):
'''
Wrapper for sending notifications to user.
'''
if context is None:
context = {}
if headers is None:
headers = {}
# Check whether user is still allowed to access this project
if not translation_obj.has_acl(self.user):
return
# Generate notification
return get_notification_email(
self.language,
self.user.email,
notification,
translation_obj,
context,
headers,
user=user
)
def notify_any_translation(self, unit, oldunit):
'''
Sends notification on translation.
'''
if oldunit.translated:
template = 'changed_translation'
else:
template = 'new_translation'
return self.notify_user(
template,
unit.translation,
{
'unit': unit,
'oldunit': oldunit,
}
)
def notify_new_language(self, subproject, language, user):
'''
Sends notification on new language request.
'''
return self.notify_user(
'new_language',
subproject,
{
'language': language,
'user': user,
},
user=user
)
def notify_new_string(self, translation):
'''
Sends notification on new strings to translate.
'''
return self.notify_user(
'new_string',
translation,
)
def notify_new_suggestion(self, translation, suggestion, unit):
'''
Sends notification on new suggestion.
'''
return self.notify_user(
'new_suggestion',
translation,
{
'suggestion': suggestion,
'unit': unit,
}
)
def notify_new_contributor(self, translation, user):
'''
Sends notification on new contributor.
'''
return self.notify_user(
'new_contributor',
translation,
{
'user': user,
}
)
def notify_new_comment(self, unit, comment, user):
'''
Sends notification about new comment.
'''
return self.notify_user(
'new_comment',
unit.translation,
{
'unit': unit,
'comment': comment,
'subproject': unit.translation.subproject,
},
user=user,
)
def notify_merge_failure(self, subproject, error, status):
'''
Sends notification on merge failure.
'''
return self.notify_user(
'merge_failure',
subproject,
{
'subproject': subproject,
'error': error,
'status': status,
}
)
@property
def full_name(self):
'''
Returns user's full name.
'''
return self.user.first_name
def set_lang(request, profile):
"""
Sets session language based on user preferences.
"""
request.session[LANGUAGE_SESSION_KEY] = profile.language
@receiver(user_logged_in)
def post_login_handler(sender, request, user, **kwargs):
'''
Signal handler for setting user language and
migrating profile if needed.
'''
# Warning about setting password
if (getattr(user, 'backend', '').endswith('.EmailAuth') and
not user.has_usable_password()):
request.session['show_set_password'] = True
# Ensure user has a profile
profile = Profile.objects.get_or_create(user=user)[0]
# Migrate django-registration based verification to python-social-auth
if (user.has_usable_password() and
not user.social_auth.filter(provider='email').exists()):
social = user.social_auth.create(
provider='email',
uid=user.email,
)
VerifiedEmail.objects.create(
social=social,
email=user.email,
)
# Set language for session based on preferences
set_lang(request, profile)
def create_groups(update):
'''
Creates standard groups and gives them permissions.
'''
guest_group, created = Group.objects.get_or_create(name='Guests')
if created or update:
guest_group.permissions.add(
Permission.objects.get(codename='can_see_git_repository'),
Permission.objects.get(codename='add_suggestion'),
)
group, created = Group.objects.get_or_create(name='Users')
if created or update:
group.permissions.add(
Permission.objects.get(codename='upload_translation'),
Permission.objects.get(codename='overwrite_translation'),
Permission.objects.get(codename='save_translation'),
Permission.objects.get(codename='save_template'),
Permission.objects.get(codename='accept_suggestion'),
Permission.objects.get(codename='delete_suggestion'),
Permission.objects.get(codename='vote_suggestion'),
Permission.objects.get(codename='ignore_check'),
Permission.objects.get(codename='upload_dictionary'),
Permission.objects.get(codename='add_dictionary'),
Permission.objects.get(codename='change_dictionary'),
Permission.objects.get(codename='delete_dictionary'),
Permission.objects.get(codename='lock_translation'),
Permission.objects.get(codename='can_see_git_repository'),
Permission.objects.get(codename='add_comment'),
Permission.objects.get(codename='add_suggestion'),
Permission.objects.get(codename='use_mt'),
)
owner_permissions = (
Permission.objects.get(codename='author_translation'),
Permission.objects.get(codename='upload_translation'),
Permission.objects.get(codename='overwrite_translation'),
Permission.objects.get(codename='commit_translation'),
Permission.objects.get(codename='update_translation'),
Permission.objects.get(codename='push_translation'),
Permission.objects.get(codename='automatic_translation'),
Permission.objects.get(codename='save_translation'),
Permission.objects.get(codename='save_template'),
Permission.objects.get(codename='accept_suggestion'),
Permission.objects.get(codename='vote_suggestion'),
Permission.objects.get(codename='override_suggestion'),
Permission.objects.get(codename='delete_comment'),
Permission.objects.get(codename='delete_suggestion'),
Permission.objects.get(codename='ignore_check'),
Permission.objects.get(codename='upload_dictionary'),
Permission.objects.get(codename='add_dictionary'),
Permission.objects.get(codename='change_dictionary'),
Permission.objects.get(codename='delete_dictionary'),
Permission.objects.get(codename='lock_subproject'),
Permission.objects.get(codename='reset_translation'),
Permission.objects.get(codename='lock_translation'),
Permission.objects.get(codename='can_see_git_repository'),
Permission.objects.get(codename='add_comment'),
Permission.objects.get(codename='delete_comment'),
Permission.objects.get(codename='add_suggestion'),
Permission.objects.get(codename='use_mt'),
Permission.objects.get(codename='edit_priority'),
Permission.objects.get(codename='edit_flags'),
Permission.objects.get(codename='manage_acl'),
Permission.objects.get(codename='download_changes'),
Permission.objects.get(codename='view_reports'),
)
group, created = Group.objects.get_or_create(name='Managers')
if created or update:
group.permissions.add(*owner_permissions)
group, created = Group.objects.get_or_create(name='Owners')
if created or update:
group.permissions.add(*owner_permissions)
created = True
try:
anon_user = User.objects.get(
username=ANONYMOUS_USER_NAME,
)
created = False
if anon_user.is_active:
raise ValueError(
'Anonymous user ({}) already exists and enabled, '
'please change ANONYMOUS_USER_NAME setting.'.format(
ANONYMOUS_USER_NAME,
)
)
except User.DoesNotExist:
anon_user = User.objects.create(
username=ANONYMOUS_USER_NAME,
is_active=False,
)
if created or update:
anon_user.set_unusable_password()
anon_user.groups.clear()
anon_user.groups.add(guest_group)
def move_users():
'''
Moves users to default group.
'''
group = Group.objects.get(name='Users')
for user in User.objects.all():
user.groups.add(group)
def remove_user(user):
'''
Removes user account.
'''
# Send signal (to commit any pending changes)
user_pre_delete.send(instance=user, sender=user.__class__)
# Change username
user.username = 'deleted-{0}'.format(user.pk)
while User.objects.filter(username=user.username).exists():
user.username = 'deleted-{0}-{1}'.format(
user.pk,
binascii.b2a_hex(os.urandom(5))
)
# Remove user information
user.first_name = 'Deleted User'
user.last_name = ''
user.email = '[email protected]'
# Disable the user
user.is_active = False
user.set_unusable_password()
user.save()
# Remove all social auth associations
user.social_auth.all().delete()
@receiver(post_migrate)
def sync_create_groups(sender, **kwargs):
'''
Create groups on syncdb.
'''
if sender.label == 'accounts':
create_groups(False)
@receiver(post_save, sender=User)
def create_profile_callback(sender, instance, created=False, **kwargs):
'''
Automatically adds user to Users group.
'''
if created:
# Add user to Users group if it exists
try:
group = Group.objects.get(name='Users')
instance.groups.add(group)
except Group.DoesNotExist:
pass
| quinox/weblate | weblate/accounts/models.py | Python | gpl-3.0 | 26,496 |
#coding: utf-8
from __future__ import unicode_literals, absolute_import
import logging
import json
from django.utils.dateparse import parse_datetime
from django.utils import timezone
from wechatpy.exceptions import WeChatClientException
from common import wechat_client
from .local_parser import LocalParser
from remind.models import Remind
from .exceptions import ParseError
logger = logging.getLogger(__name__)
def parse(text, **kwargs):
"""Returns a Remind"""
# Try to parse by rules and then turn to wechat API since wechat API is unstable and inaccurate.
logger.info('Trying to parse "%s" using rules.', text)
reminder = LocalParser().parse_by_rules(text)
if not reminder:
logger.info('Failed to parse time from "%s" using rules, try wechat api.', text)
reminder = parse_by_wechat_api(text, **kwargs)
if reminder.time <= timezone.now(): # GMT and UTC time can compare with each other
raise ParseError('/:no%s已经过去了,请重设一个将来的提醒。\n\n消息: %s' % (
reminder.time.strftime('%Y-%m-%d %H:%M'), text))
return reminder
def parse_by_wechat_api(text, **kwargs):
"""
{
"errcode": 0,
"query": "提醒我上午十点开会",
"semantic": {
"details": {
"answer": "",
"context_info": {},
"datetime": {
"date": "2015-12-23",
"date_lunar": "2015-11-13",
"time": "10:00:00",
"time_ori": "上午十点",
"type": "DT_ORI",
"week": "3"
},
"event": "开会",
"hit_str": "提醒 我 上午 十点 开会 ",
"remind_type": "0"
},
"intent": "SEARCH"
},
"type": "remind"
}
"""
try:
wechat_result = wechat_client.semantic.search(
query=text,
category='remind',
city='上海', # F**k, weixin always needs the city param, hard-code one.
**kwargs
)
except WeChatClientException as e:
logger.info('Failed to parse using wechat api ' + str(e))
raise
# wechat_result = json.loads(parse_by_wechat_api.__doc__)
logger.debug('Semantic result from wechat, %s',
json.dumps(wechat_result, ensure_ascii=False))
dt_str = '%s %s+08:00' % (
wechat_result['semantic']['details']['datetime']['date'],
wechat_result['semantic']['details']['datetime']['time'],
) # there could be nothing in details
dt = parse_datetime(dt_str)
return Remind(time=dt,
desc=wechat_result.get('query', ''),
event=wechat_result['semantic']['details'].get('event', ''))
def parse_by_boson(text):
pass
| polyrabbit/WeCron | WeCron/wxhook/todo_parser/__init__.py | Python | gpl-3.0 | 2,854 |
from settings import CONTENT_SERVER
"""
context processor applied to all requests
"""
def settings_cp(request):
return {'content_server': CONTENT_SERVER}
| elifeasley/metacademy-application | app_server/context_processors/global_cp.py | Python | gpl-3.0 | 160 |
# This file is part of JST.
#
# JST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# JST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JST. If not, see <http://www.gnu.org/licenses/>.
# This file was made to prevent circular dependencies, if we can do something better, let's do it
import mips.registers as mr
SPILL_MEM_LABEL = 'SPILL_MEMORY'
SPILL_MEM_SIZE = 64 # bytes
TEMPROARY_REGISTER_SET = mr.T_REGISTERS
NOT_TESTING_FUNCTIONS = False
| s-gogna/JST | mips/configurations.py | Python | gpl-3.0 | 904 |
# -*- coding: utf-8 -*-
"""Caliopen mail message privacy features extraction methods."""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pgpy
from caliopen_main.pi.parameters import PIParameter
from .helpers.spam import SpamScorer
from .helpers.ingress_path import get_ingress_features
from .helpers.importance_level import compute_importance
from .types import init_features
log = logging.getLogger(__name__)
TLS_VERSION_PI = {
'tlsv1/sslv3': 2,
'tls1': 7,
'tlsv1': 7,
'tls12': 10,
}
PGP_MESSAGE_HEADER = '\n-----BEGIN PGP MESSAGE-----'
class InboundMailFeature(object):
"""Process a parsed mail message and extract available privacy features."""
def __init__(self, message, config):
"""Get a ``MailMessage`` instance and extract privacy features."""
self.message = message
self.config = config
self._features = init_features('message')
def is_blacklist_mx(self, mx):
"""MX is blacklisted."""
blacklisted = self.config.get('blacklistes.mx')
if not blacklisted:
return False
if mx in blacklisted:
return True
return False
def is_whitelist_mx(self, mx):
"""MX is whitelisted."""
whitelistes = self.config.get('whitelistes.mx')
if not whitelistes:
return False
if mx in whitelistes:
return True
return False
@property
def internal_domains(self):
"""Get internal hosts from configuration."""
domains = self.config.get('internal_domains')
return domains if domains else []
def emitter_reputation(self, mx):
"""Return features about emitter."""
if self.is_blacklist_mx(mx):
return 'blacklisted'
if self.is_whitelist_mx(mx):
return 'whitelisted'
return 'unknown'
def emitter_certificate(self):
"""Get the certificate from emitter."""
return None
@property
def mail_agent(self):
"""Get the mailer used for this message."""
# XXX normalize better and more ?
return self.message.mail.get('X-Mailer', '').lower()
@property
def transport_signature(self):
"""Get the transport signature if any."""
return self.message.mail.get('DKIM-Signature')
@property
def spam_informations(self):
"""Return a global spam_score and related features."""
spam = SpamScorer(self.message.mail)
return {'spam_score': spam.score,
'spam_method': spam.method,
'is_spam': spam.is_spam}
@property
def is_internal(self):
"""Return true if it's an internal message."""
from_ = self.message.mail.get('From')
for domain in self.internal_domains:
if domain in from_:
return True
return False
def get_signature_informations(self):
"""Get message signature features."""
signed_parts = [x for x in self.message.attachments
if 'pgp-sign' in x.content_type]
if not signed_parts:
return {}
sign = pgpy.PGPSignature()
features = {'message_signed': True,
'message_signature_type': 'PGP'}
try:
sign.parse(signed_parts[0].data)
features.update({'message_signer': sign.signer})
except Exception as exc:
log.error('Unable to parse pgp signature {}'.format(exc))
return features
def get_encryption_informations(self):
"""Get message encryption features."""
is_encrypted = False
if 'encrypted' in self.message.extra_parameters:
is_encrypted = True
# Maybe pgp/inline ?
if not is_encrypted:
try:
body = self.message.body_plain.decode('utf-8')
if body.startswith(PGP_MESSAGE_HEADER):
is_encrypted = True
except UnicodeDecodeError:
log.warn('Invalid body_plain encoding for message')
pass
return {'message_encrypted': is_encrypted,
'message_encryption_method': 'pgp' if is_encrypted else ''}
def _get_features(self):
"""Extract privacy features."""
features = self._features.copy()
received = self.message.headers.get('Received', [])
features.update(get_ingress_features(received, self.internal_domains))
mx = features.get('ingress_server')
reputation = None if not mx else self.emitter_reputation(mx)
features['mail_emitter_mx_reputation'] = reputation
features['mail_emitter_certificate'] = self.emitter_certificate()
features['mail_agent'] = self.mail_agent
features['is_internal'] = self.is_internal
features.update(self.get_signature_informations())
features.update(self.get_encryption_informations())
features.update(self.spam_informations)
if self.transport_signature:
features.update({'transport_signed': True})
return features
def _compute_pi(self, participants, features):
"""Compute Privacy Indexes for a message."""
log.info('PI features {}'.format(features))
pi_cx = {} # Contextual privacy index
pi_co = {} # Comportemental privacy index
pi_t = {} # Technical privacy index
reput = features.get('mail_emitter_mx_reputation')
if reput == 'whitelisted':
pi_cx['reputation_whitelist'] = 20
elif reput == 'unknown':
pi_cx['reputation_unknow'] = 10
known_contacts = []
known_public_key = 0
for part, contact in participants:
if contact:
known_contacts.append(contact)
if contact.public_key:
known_public_key += 1
if len(participants) == len(known_contacts):
# - Si tous les contacts sont déjà connus le PIᶜˣ
# augmente de la valeur du PIᶜᵒ le plus bas des PIᶜᵒ des contacts.
contact_pi_cos = [x.pi['comportment'] for x in known_contacts
if x.pi and 'comportment' in x.pi]
if contact_pi_cos:
pi_cx['known_contacts'] = min(contact_pi_cos)
if known_public_key == len(known_contacts):
pi_co['contact_pubkey'] = 20
ext_hops = features.get('nb_external_hops', 0)
if ext_hops <= 1:
tls = features.get('ingress_socket_version')
if tls:
if tls not in TLS_VERSION_PI:
log.warn('Unknown TLS version {}'.format(tls))
else:
pi_t += TLS_VERSION_PI[tls]
if features.get('mail_emitter_certificate'):
pi_t['emitter_certificate'] = 10
if features.get('transport_signed'):
pi_t['transport_signed'] = 10
if features.get('message_encrypted'):
pi_t['encrypted'] = 30
log.info('PI compute t:{} cx:{} co:{}'.format(pi_t, pi_cx, pi_co))
return PIParameter({'technic': sum(pi_t.values()),
'context': sum(pi_cx.values()),
'comportment': sum(pi_co.values()),
'version': 0})
def process(self, user, message, participants):
"""
Process the message for privacy features and PI compute.
:param user: user the message belong to
:ptype user: caliopen_main.user.core.User
:param message: a message parameter that will be updated with PI
:ptype message: NewMessage
:param participants: an array of participant with related Contact
:ptype participants: list(Participant, Contact)
"""
features = self._get_features()
message.pi = self._compute_pi(participants, features)
il = compute_importance(user, message, features, participants)
message.privacy_features = features
message.importance_level = il
| CaliOpen/CaliOpen | src/backend/components/py.pi/caliopen_pi/features/mail.py | Python | gpl-3.0 | 8,082 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-11-21 04:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posgradmin', '0039_auto_20191120_2249'),
]
operations = [
migrations.AlterModelOptions(
name='profesor',
options={'ordering': ['user__first_name', 'user__last_name'], 'verbose_name_plural': 'Profesores'},
),
]
| sostenibilidad-unam/posgrado | posgradmin/posgradmin/migrations/0040_auto_20191120_2258.py | Python | gpl-3.0 | 484 |
""" Interacts with sqlite3 db
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import sqlite3
import os
import hashlib
import random
import time
import DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.FrameworkSystem.private.monitoring.Activity import Activity
from DIRAC.Core.Utilities import Time
class MonitoringCatalog(object):
"""
This class is used to perform all kinds queries to the sqlite3 database.
"""
def __init__(self, dataPath):
"""
Initialize monitoring catalog
"""
self.dbConn = False
self.dataPath = dataPath
self.log = gLogger.getSubLogger("ActivityCatalog")
self.createSchema()
def __connect(self):
"""
Connects to database
"""
if not self.dbConn:
dbPath = "%s/monitoring.db" % self.dataPath
self.dbConn = sqlite3.connect(dbPath, timeout=20, isolation_level=None)
# These two settings dramatically increase the performance
# at the cost of a small corruption risk in case of OS crash
# It is acceptable though, given the nature of the data
# details here https://www.sqlite.org/pragma.html
c = self.dbConn.cursor()
c.execute("PRAGMA synchronous = OFF")
c.execute("PRAGMA journal_mode = TRUNCATE")
def __dbExecute(self, query, values=False):
"""
Executes a sql statement.
:type query: string
:param query: The query to be executed.
:type values: bool
:param values: To execute query with values or not.
:return: the cursor.
"""
cursor = self.dbConn.cursor() # pylint: disable=no-member
self.log.debug("Executing %s" % query)
executed = False
retry = 0
while not executed and retry < 10:
retry += 1
try:
if values:
cursor.execute(query, values)
else:
cursor.execute(query)
executed = True
except Exception as e:
self.log.exception("Exception executing statement", "query: %s, values: %s" % (query, values))
time.sleep(random.random())
if not executed:
self.log.error("Could not execute query, big mess ahead", "query: %s, values: %s" % (query, values))
return cursor
def __createTables(self):
"""
Creates tables if not already created
"""
self.log.info("Creating tables in db")
try:
filePath = "%s/monitoringSchema.sql" % os.path.dirname(__file__)
fd = open(filePath)
buff = fd.read()
fd.close()
except IOError as e:
DIRAC.abort(1, "Can't read monitoring schema", filePath)
while buff.find(";") > -1:
limit = buff.find(";") + 1
sqlQuery = buff[:limit].replace("\n", "")
buff = buff[limit:]
try:
self.__dbExecute(sqlQuery)
except Exception as e:
DIRAC.abort(1, "Can't create tables", str(e))
def createSchema(self):
"""
Creates all the sql schema if it does not exist
"""
self.__connect()
try:
sqlQuery = "SELECT name FROM sqlite_master WHERE type='table';"
c = self.__dbExecute(sqlQuery)
tablesList = c.fetchall()
if len(tablesList) < 2:
self.__createTables()
except Exception as e:
self.log.fatal("Failed to startup db engine", str(e))
return False
return True
def __delete(self, table, dataDict):
"""
Executes an sql delete.
:type table: string
:param table: name of the table.
:type dataDict: dictionary
:param dataDict: the data dictionary.
"""
query = "DELETE FROM %s" % table
valuesList = []
keysList = []
for key in dataDict:
if isinstance(dataDict[key], list):
orList = []
for keyValue in dataDict[key]:
valuesList.append(keyValue)
orList.append("%s = ?" % key)
keysList.append("( %s )" % " OR ".join(orList))
else:
valuesList.append(dataDict[key])
keysList.append("%s = ?" % key)
if keysList:
query += " WHERE %s" % (" AND ".join(keysList))
self.__dbExecute("%s;" % query, values=valuesList)
def __select(self, fields, table, dataDict, extraCond="", queryEnd=""):
"""
Executes a sql select.
:type fields: string
:param fields: The fields required in a string.
:type table: string
:param table: name of the table.
:type dataDict: dictionary
:param dataDict: the data dictionary.
:return: a list of values.
"""
valuesList = []
keysList = []
for key in dataDict:
if isinstance(dataDict[key], list):
orList = []
for keyValue in dataDict[key]:
valuesList.append(keyValue)
orList.append("%s = ?" % key)
keysList.append("( %s )" % " OR ".join(orList))
else:
valuesList.append(dataDict[key])
keysList.append("%s = ?" % key)
if isinstance(fields, six.string_types):
fields = [fields]
if len(keysList) > 0:
whereCond = "WHERE %s" % (" AND ".join(keysList))
else:
whereCond = ""
if extraCond:
if whereCond:
whereCond += " AND %s" % extraCond
else:
whereCond = "WHERE %s" % extraCond
query = "SELECT %s FROM %s %s %s;" % (",".join(fields), table, whereCond, queryEnd)
c = self.__dbExecute(query, values=valuesList)
return c.fetchall()
def __insert(self, table, specialDict, dataDict):
"""
Executes an sql insert.
:type table: string
:param table: name of the table.
:type specialDict: dictionary
:param specialDict: the special dictionary.
:type dataDict: dictionary
:param dataDict: the data dictionary.
:return: the number of rows inserted.
"""
valuesList = []
valuePoitersList = []
namesList = []
for key in specialDict:
namesList.append(key)
valuePoitersList.append(specialDict[key])
for key in dataDict:
namesList.append(key)
valuePoitersList.append("?")
valuesList.append(dataDict[key])
query = "INSERT INTO %s (%s) VALUES (%s);" % (table, ", ".join(namesList), ",".join(valuePoitersList))
c = self.__dbExecute(query, values=valuesList)
return c.rowcount
def __update(self, newValues, table, dataDict, extraCond=""):
"""
Executes a sql update.
:type table: string
:param table: name of the table.
:type newValues: dictionary
:param newValues: a dictionary with new values.
:type dataDict: dictionary
:param dataDict: the data dictionary.
:return: the number of rows updated.
"""
valuesList = []
keysList = []
updateFields = []
for key in newValues:
updateFields.append("%s = ?" % key)
valuesList.append(newValues[key])
for key in dataDict:
if isinstance(dataDict[key], list):
orList = []
for keyValue in dataDict[key]:
valuesList.append(keyValue)
orList.append("%s = ?" % key)
keysList.append("( %s )" % " OR ".join(orList))
else:
valuesList.append(dataDict[key])
keysList.append("%s = ?" % key)
if len(keysList) > 0:
whereCond = "WHERE %s" % (" AND ".join(keysList))
else:
whereCond = ""
if extraCond:
if whereCond:
whereCond += " AND %s" % extraCond
else:
whereCond = "WHERE %s" % extraCond
query = "UPDATE %s SET %s %s;" % (table, ",".join(updateFields), whereCond)
c = self.__dbExecute(query, values=valuesList)
return c.rowcount
def registerSource(self, sourceDict):
"""
Registers an activity source.
:type sourceDict: dictionary
:param sourceDict: the source dictionary.
:return: a list of values.
"""
retList = self.__select("id", "sources", sourceDict)
if len(retList) > 0:
return retList[0][0]
else:
self.log.info("Registering source", str(sourceDict))
if self.__insert("sources", {"id": "NULL"}, sourceDict) == 0:
return -1
return self.__select("id", "sources", sourceDict)[0][0]
def registerActivity(self, sourceId, acName, acDict):
"""
Register an activity.
:type sourceId: string
:param sourceId: The source id.
:type acName: string
:param acName: name of the activity.
:type acDict: dictionary
:param acDict: The activity dictionary containing information about 'category', 'description', 'bucketLength',
'type', 'unit'.
:return: a list of values.
"""
m = hashlib.md5()
acDict["name"] = acName
acDict["sourceId"] = sourceId
m.update(str(acDict).encode())
retList = self.__select("filename", "activities", acDict)
if len(retList) > 0:
return retList[0][0]
else:
acDict["lastUpdate"] = int(Time.toEpoch() - 86000)
filePath = m.hexdigest()
filePath = "%s/%s.rrd" % (filePath[:2], filePath)
self.log.info("Registering activity", str(acDict))
# This is basically called by the ServiceInterface inside registerActivities method and then all the activity
# information is stored in the sqlite3 db using the __insert method.
if (
self.__insert(
"activities",
{
"id": "NULL",
"filename": "'%s'" % filePath,
},
acDict,
)
== 0
):
return -1
return self.__select("filename", "activities", acDict)[0][0]
def getFilename(self, sourceId, acName):
"""
Gets rrd filename for an activity.
:type sourceId: string
:param sourceId: The source id.
:type acName: string
:param acName: name of the activity.
:return: The filename in a string.
"""
queryDict = {"sourceId": sourceId, "name": acName}
retList = self.__select("filename", "activities", queryDict)
if len(retList) == 0:
return ""
else:
return retList[0][0]
def findActivity(self, sourceId, acName):
"""
Finds activity.
:type sourceId: string
:param sourceId: The source id.
:type acName: string
:param acName: name of the activity.
:return: A list containing all the activity information.
"""
queryDict = {"sourceId": sourceId, "name": acName}
retList = self.__select(
"id, name, category, unit, type, description, filename, bucketLength, lastUpdate", "activities", queryDict
)
if len(retList) == 0:
return False
else:
return retList[0]
def activitiesQuery(self, selDict, sortList, start, limit):
"""
Gets all the sources and activities details in a joined format.
:type selDict: dictionary
:param selDict: The fields inside the select query.
:type sortList: list
:param sortList: A list in sorted order of the data.
:type start: int
:param start: The point or tuple from where to start.
:type limit: int
:param limit: The number of tuples to select from the starting point.
:return: S_OK with a tuple of the result list and fields list.
"""
fields = [
"sources.id",
"sources.site",
"sources.componentType",
"sources.componentLocation",
"sources.componentName",
"activities.id",
"activities.name",
"activities.category",
"activities.unit",
"activities.type",
"activities.description",
"activities.bucketLength",
"activities.filename",
"activities.lastUpdate",
]
extraSQL = ""
if sortList:
for sorting in sortList:
if sorting[0] not in fields:
return S_ERROR("Sorting field %s is invalid" % sorting[0])
extraSQL = "ORDER BY %s" % ",".join(["%s %s" % sorting for sorting in sortList])
if limit:
if start:
extraSQL += " LIMIT %s OFFSET %s" % (limit, start)
else:
extraSQL += " LIMIT %s" % limit
# This method basically takes in some condition and then based on those performs SQL Join on the
# sources and activities table of the sqlite3 db and returns the corresponding result.
retList = self.__select(
", ".join(fields), "sources, activities", selDict, "sources.id = activities.sourceId", extraSQL
)
return S_OK((retList, fields))
def setLastUpdate(self, sourceId, acName, lastUpdateTime):
"""
Updates the lastUpdate timestamp for a particular activity using the source id.
:type sourceId: string
:param sourceId: The source id.
:type acName: string
:param acName: name of the activity.
:type lastUpdateTime: string
:param lastUpdateTime: The last update time in the proper format.
:return: the number of rows updated.
"""
queryDict = {"sourceId": sourceId, "name": acName}
return self.__update({"lastUpdate": lastUpdateTime}, "activities", queryDict)
def getLastUpdate(self, sourceId, acName):
"""
Gets the lastUpdate timestamp for a particular activity using the source id.
:type sourceId: string
:param sourceId: The source id.
:type acName: string
:param acName: name of the activity.
:return: The last update time in string.
"""
queryDict = {"sourceId": sourceId, "name": acName}
retList = self.__update("lastUpdate", "activities", queryDict)
if len(retList) == 0:
return False
else:
return retList[0]
def queryField(self, field, definedFields):
"""
Query the values of a field given a set of defined ones.
:type field: string
:param field: The field required in a string.
:type field: list
:param definedFields: A set of defined fields.
:return: A list of values.
"""
retList = self.__select(field, "sources, activities", definedFields, "sources.id = activities.sourceId")
return retList
def getMatchingActivities(self, condDict):
"""
Gets all activities matching the defined conditions.
:type condDict: dictionary.
:param condDict: A dictionary containing the conditions.
:return: a list of matching activities.
"""
retList = self.queryField(Activity.dbFields, condDict)
acList = []
for acData in retList:
acList.append(Activity(acData))
return acList
def registerView(self, viewName, viewData, varFields):
"""
Registers a new view.
:type viewName: string
:param viewName: Name of the view.
:type viewDescription: dictionary
:param viewDescription: A dictionary containing the view description.
:type varFields: list
:param varFields: A list of variable fields.
:return: S_OK / S_ERROR with the corresponding error message.
"""
retList = self.__select("id", "views", {"name": viewName})
if len(retList) > 0:
return S_ERROR("Name for view name already exists")
retList = self.__select("name", "views", {"definition": viewData})
if len(retList) > 0:
return S_ERROR("View specification already defined with name '%s'" % retList[0][0])
self.__insert(
"views", {"id": "NULL"}, {"name": viewName, "definition": viewData, "variableFields": ", ".join(varFields)}
)
return S_OK()
def getViews(self, onlyStatic):
"""
Gets views.
:type onlyStatic: bool
:param onlyStatic: Whether the views required are static or not.
:return: A list of values.
"""
queryCond = {}
if onlyStatic:
queryCond["variableFields"] = ""
return self.__select("id, name, variableFields", "views", queryCond)
def getViewById(self, viewId):
"""
Gets a view for a given id.
:type viewId: string
:param viewId: The view id.
:return: A list of values.
"""
if isinstance(viewId, six.string_types):
return self.__select("definition, variableFields", "views", {"name": viewId})
else:
return self.__select("definition, variableFields", "views", {"id": viewId})
def deleteView(self, viewId):
"""
Deletes a view for a given id.
:type viewId: string
:param viewId: The view id.
"""
self.__delete("views", {"id": viewId})
def getSources(self, dbCond, fields=[]):
"""
Gets souces for a given db condition.
:type dbCond: dictionary
:param dbCond: The required database conditions.
:type fields: list
:param fields: A list of required fields.
:return: The list of results after the query is performed.
"""
if not fields:
fields = "id, site, componentType, componentLocation, componentName"
else:
fields = ", ".join(fields)
return self.__select(fields, "sources", dbCond)
def getActivities(self, dbCond):
"""
Gets activities given a db condition.
:type dbCond: dictionary
:param dbCond: The required database conditions.
:return: a list of activities.
"""
return self.__select("id, name, category, unit, type, description, bucketLength", "activities", dbCond)
def deleteActivity(self, sourceId, activityId):
"""
Deletes an activity.
:type sourceId: string
:param sourceId: The source id.
:type activityId: string
:param activityId: The activity id.
:return: S_OK with rrd filename / S_ERROR with a message.
"""
acCond = {"sourceId": sourceId, "id": activityId}
acList = self.__select("filename", "activities", acCond)
if len(acList) == 0:
return S_ERROR("Activity does not exist")
rrdFile = acList[0][0]
self.__delete("activities", acCond)
acList = self.__select("id", "activities", {"sourceId": sourceId})
if len(acList) == 0:
self.__delete("sources", {"id": sourceId})
return S_OK(rrdFile)
| ic-hep/DIRAC | src/DIRAC/FrameworkSystem/private/monitoring/MonitoringCatalog.py | Python | gpl-3.0 | 19,766 |
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from os.path import dirname, abspath, join, curdir
from nose.tools import assert_equals, with_setup
from tests.asserts import prepare_stdout
def test_imports_terrain_under_path_that_is_run():
old_path = abspath(curdir)
os.chdir(join(abspath(dirname(__file__)), 'simple_features', '1st_feature_dir'))
status, output = subprocess.getstatusoutput('python -c "from lettuce import world;assert hasattr(world, \'works_fine\'); print \'it passed!\'"')
assert_equals(status, 0)
assert_equals(output, "it passed!")
os.chdir(old_path)
@with_setup(prepare_stdout)
def test_after_each_all_is_executed_before_each_all():
"terrain.before.each_all and terrain.after.each_all decorators"
from lettuce import step
from lettuce import Runner
from lettuce.terrain import before, after, world
world.all_steps = []
@before.all
def set_state_to_before():
world.all_steps.append('before')
@step('append 1 in world all steps')
def append_1_in_world_all_steps(step):
world.all_steps.append("1")
@step('append 2 more')
def append_2_more(step):
world.all_steps.append("2")
@step('append 3 in world all steps')
def append_during_to_all_steps(step):
world.all_steps.append("3")
@after.all
def set_state_to_after(total):
world.all_steps.append('after')
runner = Runner(join(abspath(dirname(__file__)), 'simple_features', '2nd_feature_dir'))
runner.run()
assert_equals(
world.all_steps,
['before', '1', '2', '3', 'after']
)
| adw0rd/lettuce-py3 | tests/functional/test_terrain.py | Python | gpl-3.0 | 2,378 |
"""
Test notifiers
"""
import unittest
from sickchill.oldbeard import db
from sickchill.oldbeard.notifiers.emailnotify import Notifier as EmailNotifier
from sickchill.oldbeard.notifiers.prowl import Notifier as ProwlNotifier
from sickchill.tv import TVEpisode, TVShow
from sickchill.views.home import Home
from tests import test_lib as test
# noinspection PyProtectedMember
class NotifierTests(test.SickChillTestDBCase):
"""
Test notifiers
"""
@classmethod
def setUpClass(cls):
num_legacy_shows = 3
num_shows = 3
num_episodes_per_show = 5
cls.mydb = db.DBConnection()
cls.legacy_shows = []
cls.shows = []
# Per-show-notifications were originally added for email notifications only. To add
# this feature to other notifiers, it was necessary to alter the way text is stored in
# one of the DB columns. Therefore, to test properly, we must create some shows that
# store emails in the old method (legacy method) and then other shows that will use
# the new method.
for show_counter in range(100, 100 + num_legacy_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
show.saveToDB()
cls.legacy_shows.append(show)
for show_counter in range(200, 200 + num_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
show.saveToDB()
cls.shows.append(show)
def setUp(self):
"""
Set up tests
"""
self._debug_spew("\n\r")
@unittest.skip('Not yet implemented')
def test_boxcar(self):
"""
Test boxcar notifications
"""
pass
@unittest.skip('Cannot call directly without a request')
def test_email(self):
"""
Test email notifications
"""
email_notifier = EmailNotifier()
# Per-show-email notifications were added early on and utilized a different format than the other notifiers.
# Therefore, to test properly (and ensure backwards compatibility), this routine will test shows that use
# both the old and the new storage methodology
legacy_test_emails = "[email protected],[email protected],[email protected]"
test_emails = "[email protected],[email protected],[email protected]"
for show in self.legacy_shows:
showid = self._get_showid_by_showname(show.show_name)
self.mydb.action("UPDATE tv_shows SET notify_list = ? WHERE show_id = ?", [legacy_test_emails, showid])
for show in self.shows:
showid = self._get_showid_by_showname(show.show_name)
Home.saveShowNotifyList(show=showid, emails=test_emails)
# Now, iterate through all shows using the email list generation routines that are used in the notifier proper
shows = self.legacy_shows + self.shows
for show in shows:
for episode in show.episodes:
ep_name = episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality
show_name = email_notifier._parseEp(ep_name)
recipients = email_notifier._generate_recipients(show_name)
self._debug_spew("- Email Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for email in recipients:
self._debug_spew("-- " + email.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented')
def test_emby(self):
"""
Test emby notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_freemobile(self):
"""
Test freemobile notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_growl(self):
"""
Test growl notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_kodi(self):
"""
Test kodi notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_libnotify(self):
"""
Test libnotify notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nma(self):
"""
Test nma notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmj(self):
"""
Test nmj notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmjv2(self):
"""
Test nmjv2 notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_plex(self):
"""
Test plex notifications
"""
pass
@unittest.skip('Cannot call directly without a request')
def test_prowl(self):
"""
Test prowl notifications
"""
prowl_notifier = ProwlNotifier()
# Prowl per-show-notifications only utilize the new methodology for storage; therefore, the list of legacy_shows
# will not be altered (to preserve backwards compatibility testing)
test_prowl_apis = "11111111111111111111,22222222222222222222"
for show in self.shows:
showid = self._get_showid_by_showname(show.show_name)
Home.saveShowNotifyList(show=showid, prowlAPIs=test_prowl_apis)
# Now, iterate through all shows using the Prowl API generation routines that are used in the notifier proper
for show in self.shows:
for episode in show.episodes:
ep_name = episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality
show_name = prowl_notifier._parse_episode(ep_name)
recipients = prowl_notifier._generate_recipients(show_name)
self._debug_spew("- Prowl Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for api in recipients:
self._debug_spew("-- " + api.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented')
def test_pushalot(self):
"""
Test pushalot notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pushbullet(self):
"""
Test pushbullet notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pushover(self):
"""
Test pushover notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pytivo(self):
"""
Test pytivo notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_synoindex(self):
"""
Test synoindex notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_synologynotifier(self):
"""
Test synologynotifier notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_trakt(self):
"""
Test trakt notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_tweet(self):
"""
Test tweet notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_twilio(self):
"""
Test twilio notifications
"""
pass
@staticmethod
def _debug_spew(text):
"""
Spew text notifications
:param text: to spew
:return:
"""
if __name__ == '__main__' and text is not None:
print(text)
def _get_showid_by_showname(self, showname):
"""
Get show ID by show name
:param showname:
:return:
"""
if showname is not None:
rows = self.mydb.select("SELECT show_id FROM tv_shows WHERE show_name = ?", [showname])
if len(rows) == 1:
return rows[0]['show_id']
return -1
if __name__ == '__main__':
print("==================")
print("STARTING - NOTIFIER TESTS")
print("==================")
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(NotifierTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| Vagab0nd/SiCKRAGE | tests/notifier_tests.py | Python | gpl-3.0 | 9,061 |
import os, random
rfilename=random.choice(os.listdir("/storage/pictures"))
rextension=os.path.splitext(rfilename)[1]
picturespath='/storage/pictures/'
#TODO Probably dont need a forloop can possibly do random*
#TODO What if the directory is empty?
for filename in os.listdir(picturespath):
if filename.startswith("random"):
extension=os.path.splitext(filename)[1]
newname=picturespath + str(random.random()).rsplit('.',1)[1] + extension
# rename the existing random wallpaper to something random
filename=picturespath+filename
os.rename(filename, newname)
# now rename the newly randomly founded file to be random
rfilename=picturespath+rfilename
os.rename(rfilename, picturespath+'random'+rextension)
| shoaibali/kodi.background.rotator | randombackground.py | Python | gpl-3.0 | 713 |
# -*- encoding: utf-8 -*-
import os
from abjad.tools import documentationtools
from abjad.tools import systemtools
from abjad.tools.developerscripttools.DeveloperScript import DeveloperScript
from abjad.tools.developerscripttools.ReplaceInFilesScript \
import ReplaceInFilesScript
class RenameModulesScript(DeveloperScript):
r'''Renames classes and functions.
Handle renaming the module and package, as well as any tests,
documentation or mentions of the class throughout the Abjad codebase:
.. shell::
ajv rename --help
'''
### PUBLIC PROPERTIES ###
@property
def alias(self):
r'''Alias of script.
Returns ``'rename'``.
'''
return 'rename'
@property
def long_description(self):
r'''Long description of script.
Returns string or none.
'''
return None
@property
def scripting_group(self):
r'''Scripting group of script.
Returns none.
'''
return None
@property
def short_description(self):
r'''Short description of script.
Returns string.
'''
return 'Rename public modules.'
@property
def version(self):
r'''Version of script.
Returns float.
'''
return 1.0
### PRIVATE METHODS ###
def _codebase_name_to_codebase_docs_path(self, codebase):
from abjad import abjad_configuration
if codebase == 'mainline':
return os.path.join(
abjad_configuration.abjad_directory,
'docs',
'source',
'api',
'tools',
)
elif codebase == 'experimental':
return os.path.join(
abjad_configuration.abjad_experimental_directory,
'docs',
'source',
'tools',
)
message = 'bad codebase name: {!r}.'
message = message.format(codebase)
raise Exception(message)
def _codebase_name_to_codebase_tools_path(self, codebase):
from abjad import abjad_configuration
if codebase == 'mainline':
return os.path.join(
abjad_configuration.abjad_directory, 'tools')
elif codebase == 'experimental':
return os.path.join(
abjad_configuration.abjad_experimental_directory, 'tools')
message = 'bad codebase name: {!r}.'
message = message.format(codebase)
raise Exception(message)
def _confirm_name_changes(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
max_codebase = max(len(old_codebase), len(new_codebase))
old_codebase = old_codebase.ljust(max_codebase)
new_codebase = new_codebase.ljust(max_codebase)
print('')
print('Is ...')
print('')
print(' [{}] {}.{}()'.format(
old_codebase, old_tools_package_name, old_module_name))
print(' ===>')
print(' [{}] {}.{}()'.format(
new_codebase, new_tools_package_name, new_module_name))
print('')
string = raw_input('... correct [yes, no, abort]? ').lower()
print('')
if string in ('y', 'yes'):
return True
elif string in ('a', 'abort', 'q', 'quit'):
raise SystemExit
elif string in ('n', 'no'):
return False
def _get_object_names(self, kind, codebase, tools_package_name):
assert kind in ('class', 'function')
tools_path = self._codebase_name_to_codebase_tools_path(codebase)
path = os.path.join(tools_path, tools_package_name)
if kind == 'class':
generator = documentationtools.yield_all_classes(
code_root=path,
include_private_objects=True,
)
elif kind == 'function':
generator = documentationtools.yield_all_functions(
code_root=path,
include_private_objects=True,
)
return tuple(sorted(generator, key=lambda x: x.__name__))
def _get_tools_package_names(self, codebase):
tools_path = self._codebase_name_to_codebase_tools_path(codebase)
names = []
for x in os.listdir(tools_path):
if os.path.isdir(os.path.join(tools_path, x)):
if not x.startswith(('_', '.')):
names.append(x)
return tuple(sorted(names))
def _parse_tools_package_path(self, path):
from abjad import abjad_configuration
if '.' not in path:
raise SystemExit
tools_package_name, module_name = path.split('.')
mainline_tools_directory = os.path.join(
abjad_configuration.abjad_directory,
'tools',
)
for directory_name in os.listdir(mainline_tools_directory):
directory = os.path.join(
mainline_tools_directory, directory_name)
if not os.path.isdir(directory):
continue
elif directory_name != tools_package_name:
continue
return 'mainline', tools_package_name, module_name
experimental_tools_directory = os.path.join(
abjad_configuration.abjad_experimental_directory,
'tools',
)
for directory_name in os.listdir(mainline_tools_directory):
directory = os.path.join(
experimental_tools_directory, directory_name)
if not os.path.isdir(directory):
continue
elif directory_name != tools_package_name:
continue
return 'experimental', tools_package_name, module_name
raise SystemExit
def _rename_old_api_page(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
print('Renaming old API page ...')
old_docs_path = self._codebase_name_to_codebase_docs_path(old_codebase)
new_docs_path = self._codebase_name_to_codebase_docs_path(new_codebase)
old_rst_file_name = old_module_name + '.rst'
new_rst_file_name = new_module_name + '.rst'
old_api_path = os.path.join(
old_docs_path, old_tools_package_name, old_rst_file_name)
new_api_path = os.path.join(
new_docs_path, new_tools_package_name, new_rst_file_name)
command = 'mv {} {}'.format(
old_api_path, new_api_path)
systemtools.IOManager.spawn_subprocess(command)
print('')
def _rename_old_module(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
print('Renaming old module ...')
old_tools_path = self._codebase_name_to_codebase_tools_path(
old_codebase)
new_tools_path = self._codebase_name_to_codebase_tools_path(
new_codebase)
old_module = old_module_name + '.py'
old_path = os.path.join(
old_tools_path, old_tools_package_name, old_module)
new_module = new_module_name + '.py'
new_path = os.path.join(
new_tools_path, new_tools_package_name, new_module)
command = 'git mv -f {} {}'.format(
old_path, new_path)
systemtools.IOManager.spawn_subprocess(command)
print('')
def _rename_old_test_files(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
print('Renaming old test file(s) ...')
old_tools_path = self._codebase_name_to_codebase_tools_path(
old_codebase)
old_test_path = os.path.join(
old_tools_path, old_tools_package_name, 'test')
if not os.path.exists(old_test_path):
return
new_tools_path = self._codebase_name_to_codebase_tools_path(
new_codebase)
new_test_path = os.path.join(
new_tools_path, new_tools_package_name, 'test')
old_test_file_prefix = 'test_{}_{}'.format(
old_tools_package_name, old_module_name)
old_test_file_names = [x for x in os.listdir(old_test_path)
if x.startswith(old_test_file_prefix) and x.endswith('.py')]
for old_test_file_name in old_test_file_names:
old_test_file_path = os.path.join(
old_test_path, old_test_file_name)
old_test_file_suffix = old_test_file_name[
len(old_test_file_prefix):]
new_test_file_name = 'test_{}_{}{}'.format(
new_tools_package_name, new_module_name, old_test_file_suffix)
new_test_file_path = os.path.join(
new_test_path, new_test_file_name)
command = 'git mv -f {} {}'.format(
old_test_file_path, new_test_file_path)
systemtools.IOManager.spawn_subprocess(command)
print('')
def _update_codebase(self,
old_codebase,
old_tools_package_name,
old_module_name,
new_codebase,
new_tools_package_name,
new_module_name,
):
from abjad import abjad_configuration
without_dirs = ['--without-dirs', 'build', '--without-dirs', '_build']
directory = abjad_configuration.abjad_root_directory
print('Updating codebase ...')
print('')
old_text = '{}.{}'.format(old_tools_package_name, old_module_name)
new_text = '{}.{}'.format(new_tools_package_name, new_module_name)
command = [
directory,
old_text,
new_text,
'--force',
'--whole-words-only',
#'--verbose',
]
command.extend(without_dirs)
ReplaceInFilesScript()(command)
print('')
old_text = 'test_{}_{}_'.format(
old_tools_package_name, old_module_name)
new_text = 'test_{}_{}_'.format(
new_tools_package_name, new_module_name)
command = [directory, old_text, new_text, '--force', '--verbose']
command.extend(without_dirs)
ReplaceInFilesScript()(command)
print('')
old_text = old_module_name
new_text = new_module_name
command = [
directory,
old_text,
new_text,
'--force',
'--whole-words-only',
#'--verbose',
]
command.extend(without_dirs)
ReplaceInFilesScript()(command)
print('')
### PUBLIC METHODS ###
def process_args(self, args):
r'''Processes `args`.
Returns none.
'''
systemtools.IOManager.clear_terminal()
# Handle source path:
old_codebase, old_tools_package_name, old_module_name = \
self._parse_tools_package_path(args.source)
old_codebase_tools_path = self._codebase_name_to_codebase_tools_path(
old_codebase)
old_module_path = os.path.join(
old_codebase_tools_path,
old_tools_package_name,
old_module_name + '.py',
)
if not os.path.exists(old_module_path):
message = 'source does not exist: {}'
message = message.format(old_module_path)
raise SystemExit(message)
# Handle destination path:
new_codebase, new_tools_package_name, new_module_name = \
self._parse_tools_package_path(args.destination)
new_codebase_tools_path = self._codebase_name_to_codebase_tools_path(
new_codebase)
new_module_path = os.path.join(
new_codebase_tools_path,
new_tools_package_name,
new_module_name + '.py',
)
if os.path.exists(new_module_path):
message = 'destination already exists: {}'
message = message.format(old_module_path)
raise SystemExit(message)
# Process changes:
new_args = (
old_codebase, old_tools_package_name, old_module_name,
new_codebase, new_tools_package_name, new_module_name,
)
if not self._confirm_name_changes(*new_args):
raise SystemExit
self._rename_old_test_files(*new_args)
self._rename_old_api_page(*new_args)
self._rename_old_module(*new_args)
self._update_codebase(*new_args)
raise SystemExit
def setup_argument_parser(self, parser):
r'''Sets up argument `parser`.
Returns none.
'''
parser.add_argument(
'source',
help='toolspackage path of source module',
)
parser.add_argument(
'destination',
help='toolspackage path of destination module',
) | mscuthbert/abjad | abjad/tools/developerscripttools/RenameModulesScript.py | Python | gpl-3.0 | 13,078 |
"""
Module defining the Event class which is used to manage collissions and check their validity
"""
from itertools import combinations
from copy import copy
from particle import Particle
class EventParticle(object):
def __init__(self, particle1, particle2):
self.particle1 = particle1
self.particle2 = particle2
self.id = (self.particle1.getCollisionCountAsCopy(), self.particle2.getCollisionCountAsCopy())
self.timeUntilCollision = self.particle1.collideParticle(self.particle2)
def isValid(self):
return self.id == (self.particle1.getCollisionCountAsCopy(), self.particle2.getCollisionCountAsCopy())
def reevaluateCollisionTime(self):
self.id = (self.particle1.getCollisionCountAsCopy(), self.particle2.getCollisionCountAsCopy())
self.timeUntilCollision = self.particle1.collideParticle(self.particle2)
def doCollision(self):
self.particle1.bounceParticle(self.particle2)
class EventWallX(object):
def __init__(self, particle):
self.particle = particle
self.id = self.particle.getCollisionCountAsCopy()
self.timeUntilCollision = self.particle.collidesWallX()
def isValid(self):
return self.id == self.particle.getCollisionCountAsCopy()
def reevaluateCollisionTime(self):
self.id = self.particle.getCollisionCountAsCopy()
self.timeUntilCollision = self.particle.collidesWallX()
def doCollision(self):
self.particle.bounceX()
class EventWallY(object):
def __init__(self, particle):
self.particle = particle
self.id = self.particle.getCollisionCountAsCopy()
self.timeUntilCollision = self.particle.collidesWallY()
def isValid(self):
return self.id == self.particle.getCollisionCountAsCopy()
def reevaluateCollisionTime(self):
self.id = self.particle.getCollisionCountAsCopy()
self.timeUntilCollision = self.particle.collidesWallY()
def doCollision(self):
self.particle.bounceY()
class EventManager(object):
def __init__(self, ListOfParticles):
self.ListOfParticles = ListOfParticles
self.ListOfEvents = []
for (particle1, particle2) in combinations(self.ListOfParticles, 2):
self.ListOfEvents.append(EventParticle(particle1, particle2))
for particle in self.ListOfParticles:
self.ListOfEvents.append(EventWallX(particle))
self.ListOfEvents.append(EventWallY(particle))
self.sortEventList()
def sortEventList(self):
def sorting_closure(event):
if event.timeUntilCollision is None or event.timeUntilCollision < 0.0:
return 1.0e7
else:
return event.timeUntilCollision
self.ListOfEvents = sorted(self.ListOfEvents, key=sorting_closure)
def step(self):
for event in self.ListOfEvents:
if not event.isValid():
event.reevaluateCollisionTime()
self.sortEventList()
collTime = copy(self.ListOfEvents[0].timeUntilCollision)
for particle in self.ListOfParticles:
particle.advance(collTime)
self.ListOfEvents[0].doCollision()
for event in self.ListOfEvents:
if event.timeUntilCollision is not None:
event.timeUntilCollision -= collTime
if __name__ == '__main__':
import numpy as np
import pylab as plt
a = Particle(np.array([0.1, 0.5]), np.array([0.01, 0.1]), 0.05, 2.0)
b = Particle(np.array([0.4, 0.5]), np.array([-0.1, 0.01]), 0.05, 2.0)
manager = EventManager([a,b])
for i in range(20):
plt.title(a.t)
plt.scatter([a._x[0], b._x[0]], [a._x[1], b._x[1]])
print a._x
print b._x
plt.xlim([0,1])
plt.ylim([0,1])
plt.show()
manager.step()
| hniemeyer/HardSphereSim | EventManager.py | Python | gpl-3.0 | 4,177 |
# -*- coding: utf-8 -*-
""" Projy template for PythonPackage. """
# system
from datetime import date
from os import mkdir, rmdir
from shutil import move
from subprocess import call
# parent class
from projy.templates.ProjyTemplate import ProjyTemplate
# collectors
from projy.collectors.AuthorCollector import AuthorCollector
from projy.collectors.AuthorMailCollector import AuthorMailCollector
class DjangoProjectTemplate(ProjyTemplate):
""" Projy template class for PythonPackage. """
def __init__(self):
ProjyTemplate.__init__(self)
def directories(self):
""" Return the names of directories to be created. """
directories_description = [
self.project_name,
self.project_name + '/conf',
self.project_name + '/static',
]
return directories_description
def files(self):
""" Return the names of files to be created. """
files_description = [
# configuration
[ self.project_name,
'Makefile',
'DjangoMakefileTemplate' ],
[ self.project_name + '/conf',
'requirements_base.txt',
'DjangoRequirementsBaseTemplate' ],
[ self.project_name + '/conf',
'requirements_dev.txt',
'DjangoRequirementsDevTemplate' ],
[ self.project_name + '/conf',
'requirements_production.txt',
'DjangoRequirementsProdTemplate' ],
[ self.project_name + '/conf',
'nginx.conf',
'DjangoNginxConfTemplate' ],
[ self.project_name + '/conf',
'supervisord.conf',
'DjangoSupervisorConfTemplate' ],
[ self.project_name,
'fabfile.py',
'DjangoFabfileTemplate' ],
[ self.project_name,
'CHANGES.txt',
'PythonPackageCHANGESFileTemplate' ],
[ self.project_name,
'LICENSE.txt',
'GPL3FileTemplate' ],
[ self.project_name,
'README.txt',
'READMEReSTFileTemplate' ],
[ self.project_name,
'.gitignore',
'DjangoGitignoreTemplate' ],
# django files
[ self.project_name,
'dev.py',
'DjangoSettingsDevTemplate' ],
[ self.project_name,
'prod.py',
'DjangoSettingsProdTemplate' ],
]
return files_description
def substitutes(self):
""" Return the substitutions for the templating replacements. """
author_collector = AuthorCollector()
mail_collector = AuthorMailCollector()
substitute_dict = {
'project': self.project_name,
'project_lower': self.project_name.lower(),
'date': date.today().isoformat(),
'author': author_collector.collect(),
'author_email': mail_collector.collect(),
}
return substitute_dict
def posthook(self):
# build the virtualenv
call(['make'])
# create the Django project
call(['./venv/bin/django-admin.py', 'startproject', self.project_name])
# transform original settings files into 3 files for different env
mkdir('{p}/settings'.format(p=self.project_name))
self.touch('{p}/settings/__init__.py'.format(p=self.project_name))
move('dev.py', '{p}/settings'.format(p=self.project_name))
move('prod.py', '{p}/settings'.format(p=self.project_name))
move('{p}/{p}/settings.py'.format(p=self.project_name), '{p}/settings/base.py'.format(p=self.project_name))
# organize files nicely
mkdir('{p}/templates'.format(p=self.project_name))
move('{p}/manage.py'.format(p=self.project_name), 'manage.py')
move('{p}/{p}/__init__.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
move('{p}/{p}/urls.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
move('{p}/{p}/wsgi.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
rmdir('{p}/{p}'.format(p=self.project_name))
# create empty git repo
call(['git', 'init'])
# replace some lines
self.replace_in_file('{p}/wsgi.py'.format(p=self.project_name),
'"{p}.settings"'.format(p=self.project_name),
'"{p}.settings.production"'.format(p=self.project_name))
self.replace_in_file('{p}/settings/base.py'.format(p=self.project_name),
u" # ('Your Name', '[email protected]'),",
u" ('{}', '{}'),".format(self.substitutes()['author'],
self.substitutes()['author_email']))
| stephanepechard/projy | projy/templates/DjangoProjectTemplate.py | Python | gpl-3.0 | 4,869 |
#!/usr/bin/python
# The file is part of the WRL Project.
#
# The WRL Project is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The WRL Project is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2017, Andrew McConachie, <[email protected]>
import os
import sys
import random
import dns.resolver
numTestDomains = 100
numTopTLDs = 100
ignoreDomains = ['com', 'net', 'jobs', 'cat', 'mil', 'edu', 'gov', 'int', 'arpa']
serverZone = '.ws.sp.am' # DNS Zone containing CNAME records pointing to whois FQDNs
def dbg(s):
# print s
pass
random.seed()
zFiles = os.listdir('zonefiles/')
#dbgFiles = 10 # How many files to read while developing this, remove when finished coding
tlds = []
for zf in zFiles:
# if len(tlds) >= dbgFiles: # For developing, remove when finished coding
# break
dbg(zf)
tld = {}
if zf.find(".txt") == -1:
dbg("This should not happen")
continue
zfh = open('zonefiles/' + zf, 'r')
lines = zfh.read().splitlines()
zfh.close()
dbg("after file read")
tld['name'] = lines[0].split(".")[0].strip()
if tld['name'] in ignoreDomains:
dbg("Ignoring:" + tld['name'])
continue
dbg("after name split")
rrs = []
for line in lines:
rr = line.split("\t")
rrs.append(rr)
dbg("after rr split")
ns = []
for rr in rrs:
if rr[3].lower() == 'ns':
ns.append(rr[0].split(".")[0])
dbg("after counting NS records")
if len(ns) < numTestDomains:
continue
else:
tld['size'] = len(ns)
tld['domains'] = random.sample(ns, numTestDomains)
for d in tld['domains']:
dbg(d + "." + tld['name'])
dbg(tld['name'] + ": " + str(tld['size']))
tlds.append(tld)
tlds.sort(key=lambda tld: tld['size'], reverse=True)
for ii in xrange(numTopTLDs):
# Find FQDN of whois server
d = dns.resolver.Resolver()
try:
resp = d.query(tlds[ii]['name'] + serverZone, 'CNAME')
if len(resp.rrset) < 1:
whois = 'UNKNOWN'
else:
whois = str(resp.rrset[0]).strip('.')
except:
whois = 'UNKNOWN'
s = whois + ','
for dom in tlds[ii]['domains']:
s += dom + '.' + tlds[ii]['name'] + ','
print s.strip(',')
| smutt/WRL | topThick.py | Python | gpl-3.0 | 2,656 |
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import exec_statement
# This needed because comtypes wx.lib.activex generates some stuff.
exec_statement("import wx.lib.activex")
| ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/hooks/hook-wx.lib.activex.py | Python | gpl-3.0 | 571 |
# rasPyCNCController
# Copyright 2016 Francesco Santini <[email protected]>
#
# This file is part of rasPyCNCController.
#
# rasPyCNCController is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rasPyCNCController is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rasPyCNCController. If not, see <http://www.gnu.org/licenses/>.
from PySide import QtCore
from GCodeAnalyzer import GCodeAnalyzer
import sys
import pycnc_config
class GCodeLoader(QtCore.QThread):
load_finished = QtCore.Signal()
load_error = QtCore.Signal(object)
def __init__(self):
QtCore.QThread.__init__(self)
self.file = None
self.gcode = None
self.times = None
self.bBox = None
self.loaded = False
self.totalTime = 0
self.busy = False
self.g0_feed = pycnc_config.G0_FEED
def run(self):
self.loaded = False
self.gcode = []
self.times = []
self.bBox = None
self.totalTime = 0
self.busy = True
analyzer = GCodeAnalyzer()
analyzer.fastf = self.g0_feed
try:
with open(self.file) as f:
for line in f:
analyzer.Analyze(line)
self.gcode.append(line)
self.times.append(analyzer.getTravelTime()*60) # time returned is in minutes: convert to seconds
except:
self.busy = False
e = sys.exc_info()[0]
self.load_error.emit("%s" % e)
return
self.busy = False
self.loaded = True
self.totalTime = self.times[-1]
self.bBox = analyzer.getBoundingBox()
self.load_finished.emit()
def load(self, file):
self.file = file
self.start()
| fsantini/rasPyCNCController | gcode/GCodeLoader.py | Python | gpl-3.0 | 2,216 |
# Authors: Karl MacMillan <[email protected]>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
# pylint: disable=deprecated-module
from optparse import (
Option, Values, OptionParser, IndentedHelpFormatter, OptionValueError)
# pylint: enable=deprecated-module
from copy import copy
from configparser import SafeConfigParser
from urllib.parse import urlsplit
import socket
import functools
from dns.exception import DNSException
import dns.name
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipapython.dnsutil import query_srv
from ipapython.ipautil import CheckedIPAddress, CheckedIPAddressLoopback
class IPAConfigError(Exception):
def __init__(self, msg=''):
self.msg = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.msg
__str__ = __repr__
class IPAFormatter(IndentedHelpFormatter):
"""Our own optparse formatter that indents multiple lined usage string."""
def format_usage(self, usage):
usage_string = "Usage:"
spacing = " " * len(usage_string)
lines = usage.split("\n")
ret = "%s %s\n" % (usage_string, lines[0])
for line in lines[1:]:
ret += "%s %s\n" % (spacing, line)
return ret
def check_ip_option(option, opt, value, allow_loopback=False):
try:
if allow_loopback:
return CheckedIPAddressLoopback(value)
else:
return CheckedIPAddress(value)
except Exception as e:
raise OptionValueError("option {}: invalid IP address {}: {}"
.format(opt, value, e))
def check_dn_option(option, opt, value):
try:
return DN(value)
except Exception as e:
raise OptionValueError("option %s: invalid DN: %s" % (opt, e))
def check_constructor(option, opt, value):
con = option.constructor
assert con is not None, "Oops! Developer forgot to set 'constructor' kwarg"
try:
return con(value)
except Exception as e:
raise OptionValueError("option {} invalid: {}".format(opt, e))
class IPAOption(Option):
"""
optparse.Option subclass with support of options labeled as
security-sensitive such as passwords.
"""
ATTRS = Option.ATTRS + ["sensitive", "constructor"]
TYPES = Option.TYPES + ("ip", "dn", "constructor", "ip_with_loopback")
TYPE_CHECKER = copy(Option.TYPE_CHECKER)
TYPE_CHECKER["ip"] = check_ip_option
TYPE_CHECKER["ip_with_loopback"] = functools.partial(check_ip_option,
allow_loopback=True)
TYPE_CHECKER["dn"] = check_dn_option
TYPE_CHECKER["constructor"] = check_constructor
class IPAOptionParser(OptionParser):
"""
optparse.OptionParser subclass that uses IPAOption by default
for storing options.
"""
def __init__(self,
usage=None,
option_list=None,
option_class=IPAOption,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None):
OptionParser.__init__(self, usage, option_list, option_class,
version, conflict_handler, description,
formatter, add_help_option, prog)
def get_safe_opts(self, opts):
"""
Returns all options except those with sensitive=True in the same
fashion as parse_args would
"""
all_opts_dict = {
o.dest: o for o in self._get_all_options()
if hasattr(o, 'sensitive')
}
safe_opts_dict = {}
for option, value in opts.__dict__.items():
if not all_opts_dict[option].sensitive:
safe_opts_dict[option] = value
return Values(safe_opts_dict)
def verify_args(parser, args, needed_args = None):
"""Verify that we have all positional arguments we need, if not, exit."""
if needed_args:
needed_list = needed_args.split(" ")
else:
needed_list = []
len_need = len(needed_list)
len_have = len(args)
if len_have > len_need:
parser.error("too many arguments")
elif len_have < len_need:
parser.error("no %s specified" % needed_list[len_have])
class IPAConfig:
def __init__(self):
self.default_realm = None
self.default_server = []
self.default_domain = None
def get_realm(self):
if self.default_realm:
return self.default_realm
else:
raise IPAConfigError("no default realm")
def get_server(self):
if len(self.default_server):
return self.default_server
else:
raise IPAConfigError("no default server")
def get_domain(self):
if self.default_domain:
return self.default_domain
else:
raise IPAConfigError("no default domain")
# Global library config
config = IPAConfig()
def __parse_config(discover_server = True):
p = SafeConfigParser()
p.read(paths.IPA_DEFAULT_CONF)
try:
if not config.default_realm:
config.default_realm = p.get("global", "realm")
except Exception:
pass
if discover_server:
try:
s = p.get("global", "xmlrpc_uri")
server = urlsplit(s)
config.default_server.append(server.netloc)
except Exception:
pass
try:
if not config.default_domain:
config.default_domain = p.get("global", "domain")
except Exception:
pass
def __discover_config(discover_server = True):
servers = []
try:
if not config.default_domain:
# try once with REALM -> domain
domain = str(config.default_realm).lower()
name = "_ldap._tcp." + domain
try:
servers = query_srv(name)
except DNSException:
# try cycling on domain components of FQDN
try:
domain = dns.name.from_text(socket.getfqdn())
except DNSException:
return False
while True:
domain = domain.parent()
if str(domain) == '.':
return False
name = "_ldap._tcp.%s" % domain
try:
servers = query_srv(name)
break
except DNSException:
pass
config.default_domain = str(domain).rstrip(".")
if discover_server:
if not servers:
name = "_ldap._tcp.%s." % config.default_domain
try:
servers = query_srv(name)
except DNSException:
pass
for server in servers:
hostname = str(server.target).rstrip(".")
config.default_server.append(hostname)
except Exception:
pass
return None
def add_standard_options(parser):
parser.add_option("--realm", dest="realm", help="Override default IPA realm")
parser.add_option("--server", dest="server",
help="Override default FQDN of IPA server")
parser.add_option("--domain", dest="domain", help="Override default IPA DNS domain")
def init_config(options=None):
if options:
config.default_realm = options.realm
config.default_domain = options.domain
if options.server:
config.default_server.extend(options.server.split(","))
if len(config.default_server):
discover_server = False
else:
discover_server = True
__parse_config(discover_server)
__discover_config(discover_server)
# make sure the server list only contains unique items
new_server = []
for server in config.default_server:
if server not in new_server:
new_server.append(server)
config.default_server = new_server
if not config.default_realm:
raise IPAConfigError("IPA realm not found in DNS, in the config file (/etc/ipa/default.conf) or on the command line.")
if not config.default_server:
raise IPAConfigError("IPA server not found in DNS, in the config file (/etc/ipa/default.conf) or on the command line.")
if not config.default_domain:
raise IPAConfigError("IPA domain not found in the config file (/etc/ipa/default.conf) or on the command line.")
| encukou/freeipa | ipapython/config.py | Python | gpl-3.0 | 9,257 |
"""Define and instantiate the configuration class for Robottelo."""
import logging
import os
import sys
from logging import config
from nailgun import entities, entity_mixins
from nailgun.config import ServerConfig
from robottelo.config import casts
from six.moves.urllib.parse import urlunsplit, urljoin
from six.moves.configparser import (
NoOptionError,
NoSectionError,
ConfigParser
)
LOGGER = logging.getLogger(__name__)
SETTINGS_FILE_NAME = 'robottelo.properties'
class ImproperlyConfigured(Exception):
"""Indicates that Robottelo somehow is improperly configured.
For example, if settings file can not be found or some required
configuration is not defined.
"""
def get_project_root():
"""Return the path to the Robottelo project root directory.
:return: A directory path.
:rtype: str
"""
return os.path.realpath(os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
))
class INIReader(object):
"""ConfigParser wrapper able to cast value when reading INI options."""
# Helper casters
cast_boolean = casts.Boolean()
cast_dict = casts.Dict()
cast_list = casts.List()
cast_logging_level = casts.LoggingLevel()
cast_tuple = casts.Tuple()
cast_webdriver_desired_capabilities = casts.WebdriverDesiredCapabilities()
def __init__(self, path):
self.config_parser = ConfigParser()
with open(path) as handler:
self.config_parser.readfp(handler)
if sys.version_info[0] < 3:
# ConfigParser.readfp is deprecated on Python3, read_file
# replaces it
self.config_parser.readfp(handler)
else:
self.config_parser.read_file(handler)
def get(self, section, option, default=None, cast=None):
"""Read an option from a section of a INI file.
The default value will return if the look up option is not available.
The value will be cast using a callable if specified otherwise a string
will be returned.
:param section: Section to look for.
:param option: Option to look for.
:param default: The value that should be used if the option is not
defined.
:param cast: If provided the value will be cast using the cast
provided.
"""
try:
value = self.config_parser.get(section, option)
if cast is not None:
if cast is bool:
value = self.cast_boolean(value)
elif cast is dict:
value = self.cast_dict(value)
elif cast is list:
value = self.cast_list(value)
elif cast is tuple:
value = self.cast_tuple(value)
else:
value = cast(value)
except (NoSectionError, NoOptionError):
value = default
return value
def has_section(self, section):
"""Check if section is available."""
return self.config_parser.has_section(section)
class FeatureSettings(object):
"""Settings related to a feature.
Create a instance of this class and assign attributes to map to the feature
options.
"""
def read(self, reader):
"""Subclasses must implement this method in order to populate itself
with expected settings values.
:param reader: An INIReader instance to read the settings.
"""
raise NotImplementedError('Subclasses must implement read method.')
def validate(self):
"""Subclasses must implement this method in order to validade the
settings and raise ``ImproperlyConfigured`` if any issue is found.
"""
raise NotImplementedError('Subclasses must implement validate method.')
class ServerSettings(FeatureSettings):
"""Satellite server settings definitions."""
def __init__(self, *args, **kwargs):
super(ServerSettings, self).__init__(*args, **kwargs)
self.admin_password = None
self.admin_username = None
self.hostname = None
self.port = None
self.scheme = None
self.ssh_key = None
self.ssh_password = None
self.ssh_username = None
def read(self, reader):
"""Read and validate Satellite server settings."""
self.admin_password = reader.get(
'server', 'admin_password', 'changeme')
self.admin_username = reader.get(
'server', 'admin_username', 'admin')
self.hostname = reader.get('server', 'hostname')
self.port = reader.get('server', 'port', cast=int)
self.scheme = reader.get('server', 'scheme', 'https')
self.ssh_key = reader.get('server', 'ssh_key')
self.ssh_password = reader.get('server', 'ssh_password')
self.ssh_username = reader.get('server', 'ssh_username', 'root')
def validate(self):
validation_errors = []
if self.hostname is None:
validation_errors.append('[server] hostname must be provided.')
if (self.ssh_key is None and self.ssh_password is None):
validation_errors.append(
'[server] ssh_key or ssh_password must be provided.')
return validation_errors
def get_credentials(self):
"""Return credentials for interacting with a Foreman deployment API.
:return: A username-password pair.
:rtype: tuple
"""
return (self.admin_username, self.admin_password)
def get_url(self):
"""Return the base URL of the Foreman deployment being tested.
The following values from the config file are used to build the URL:
* ``[server] scheme`` (default: https)
* ``[server] hostname`` (required)
* ``[server] port`` (default: none)
Setting ``port`` to 80 does *not* imply that ``scheme`` is 'https'. If
``port`` is 80 and ``scheme`` is unset, ``scheme`` will still default
to 'https'.
:return: A URL.
:rtype: str
"""
if not self.scheme:
scheme = 'https'
else:
scheme = self.scheme
# All anticipated error cases have been handled at this point.
if not self.port:
return urlunsplit((scheme, self.hostname, '', '', ''))
else:
return urlunsplit((
scheme, '{0}:{1}'.format(self.hostname, self.port), '', '', ''
))
def get_pub_url(self):
"""Return the pub URL of the server being tested.
The following values from the config file are used to build the URL:
* ``main.server.hostname`` (required)
:return: The pub directory URL.
:rtype: str
"""
return urlunsplit(('http', self.hostname, 'pub/', '', ''))
def get_cert_rpm_url(self):
"""Return the Katello cert RPM URL of the server being tested.
The following values from the config file are used to build the URL:
* ``main.server.hostname`` (required)
:return: The Katello cert RPM URL.
:rtype: str
"""
return urljoin(
self.get_pub_url(), 'katello-ca-consumer-latest.noarch.rpm')
class ClientsSettings(FeatureSettings):
"""Clients settings definitions."""
def __init__(self, *args, **kwargs):
super(ClientsSettings, self).__init__(*args, **kwargs)
self.image_dir = None
self.provisioning_server = None
def read(self, reader):
"""Read clients settings."""
self.image_dir = reader.get(
'clients', 'image_dir', '/opt/robottelo/images')
self.provisioning_server = reader.get(
'clients', 'provisioning_server')
def validate(self):
"""Validate clients settings."""
validation_errors = []
if self.provisioning_server is None:
validation_errors.append(
'[clients] provisioning_server option must be provided.')
return validation_errors
class DockerSettings(FeatureSettings):
"""Docker settings definitions."""
def __init__(self, *args, **kwargs):
super(DockerSettings, self).__init__(*args, **kwargs)
self.unix_socket = None
self.external_url = None
self.external_registry_1 = None
self.external_registry_2 = None
def read(self, reader):
"""Read docker settings."""
self.unix_socket = reader.get(
'docker', 'unix_socket', False, bool)
self.external_url = reader.get('docker', 'external_url')
self.external_registry_1 = reader.get('docker', 'external_registry_1')
self.external_registry_2 = reader.get('docker', 'external_registry_2')
def validate(self):
"""Validate docker settings."""
validation_errors = []
if not any((self.unix_socket, self.external_url)):
validation_errors.append(
'Either [docker] unix_socket or external_url options must '
'be provided or enabled.')
if not all((self.external_registry_1, self.external_registry_2)):
validation_errors.append(
'Both [docker] external_registry_1 and external_registry_2 '
'options must be provided.')
return validation_errors
def get_unix_socket_url(self):
"""Use the unix socket connection to the local docker daemon. Make sure
that your Satellite server's docker is configured to allow foreman user
accessing it. This can be done by::
$ groupadd docker
$ usermod -aG docker foreman
# Add -G docker to the options for the docker daemon
$ systemctl restart docker
$ katello-service restart
"""
return (
'unix:///var/run/docker.sock'
if self.unix_socket else None
)
class FakeManifestSettings(FeatureSettings):
"""Fake manifest settings defintitions."""
def __init__(self, *args, **kwargs):
super(FakeManifestSettings, self).__init__(*args, **kwargs)
self.cert_url = None
self.key_url = None
self.url = None
def read(self, reader):
"""Read fake manifest settings."""
self.cert_url = reader.get(
'fake_manifest', 'cert_url')
self.key_url = reader.get(
'fake_manifest', 'key_url')
self.url = reader.get(
'fake_manifest', 'url')
def validate(self):
"""Validate fake manifest settings."""
validation_errors = []
if not all(vars(self).values()):
validation_errors.append(
'All [fake_manifest] cert_url, key_url, url options must '
'be provided.'
)
return validation_errors
class LDAPSettings(FeatureSettings):
"""LDAP settings definitions."""
def __init__(self, *args, **kwargs):
super(LDAPSettings, self).__init__(*args, **kwargs)
self.basedn = None
self.grpbasedn = None
self.hostname = None
self.password = None
self.username = None
def read(self, reader):
"""Read LDAP settings."""
self.basedn = reader.get('ldap', 'basedn')
self.grpbasedn = reader.get('ldap', 'grpbasedn')
self.hostname = reader.get('ldap', 'hostname')
self.password = reader.get('ldap', 'password')
self.username = reader.get('ldap', 'username')
def validate(self):
"""Validate LDAP settings."""
validation_errors = []
if not all(vars(self).values()):
validation_errors.append(
'All [ldap] basedn, grpbasedn, hostname, password, '
'username options must be provided.'
)
return validation_errors
class LibvirtHostSettings(FeatureSettings):
"""Libvirt host settings definitions."""
def __init__(self, *args, **kwargs):
super(LibvirtHostSettings, self).__init__(*args, **kwargs)
self.libvirt_image_dir = None
self.libvirt_hostname = None
def read(self, reader):
"""Read libvirt host settings."""
self.libvirt_image_dir = reader.get(
'compute_resources', 'libvirt_image_dir', '/var/lib/libvirt/images'
)
self.libvirt_hostname = reader.get(
'compute_resources', 'libvirt_hostname')
def validate(self):
"""Validate libvirt host settings."""
validation_errors = []
if self.libvirt_hostname is None:
validation_errors.append(
'[compute_resources] libvirt_hostname option must be provided.'
)
return validation_errors
class FakeCapsuleSettings(FeatureSettings):
"""Fake Capsule settings definitions."""
def __init__(self, *args, **kwargs):
super(FakeCapsuleSettings, self).__init__(*args, **kwargs)
self.port_range = None
def read(self, reader):
"""Read fake capsule settings"""
self.port_range = reader.get(
'fake_capsules', 'port_range', cast=tuple
)
def validate(self):
"""Validate fake capsule settings."""
validation_errors = []
if self.port_range is None:
validation_errors.append(
'[fake_capsules] port_range option must be provided.'
)
return validation_errors
class RHEVSettings(FeatureSettings):
"""RHEV settings definitions."""
def __init__(self, *args, **kwargs):
super(RHEVSettings, self).__init__(*args, **kwargs)
# Compute Resource Information
self.hostname = None
self.username = None
self.password = None
self.datacenter = None
self.vm_name = None
# Image Information
self.image_os = None
self.image_arch = None
self.image_username = None
self.image_password = None
self.image_name = None
def read(self, reader):
"""Read rhev settings."""
# Compute Resource Information
self.hostname = reader.get('rhev', 'hostname')
self.username = reader.get('rhev', 'username')
self.password = reader.get('rhev', 'password')
self.datacenter = reader.get('rhev', 'datacenter')
self.vm_name = reader.get('rhev', 'vm_name')
# Image Information
self.image_os = reader.get('rhev', 'image_os')
self.image_arch = reader.get('rhev', 'image_arch')
self.image_username = reader.get('rhev', 'image_username')
self.image_password = reader.get('rhev', 'image_password')
self.image_name = reader.get('rhev', 'image_name')
def validate(self):
"""Validate rhev settings."""
validation_errors = []
if not all(vars(self).values()):
validation_errors.append(
'All [rhev] hostname, username, password, datacenter, '
'vm_name, image_name, image_os, image_arch, image_usernam, '
'image_name options must be provided.'
)
return validation_errors
class VmWareSettings(FeatureSettings):
"""VmWare settings definitions."""
def __init__(self, *args, **kwargs):
super(VmWareSettings, self).__init__(*args, **kwargs)
# Compute Resource Information
self.vcenter = None
self.username = None
self.password = None
self.datacenter = None
self.vm_name = None
# Image Information
self.image_os = None
self.image_arch = None
self.image_username = None
self.image_password = None
self.image_name = None
def read(self, reader):
"""Read vmware settings."""
# Compute Resource Information
self.vcenter = reader.get('vmware', 'hostname')
self.username = reader.get('vmware', 'username')
self.password = reader.get('vmware', 'password')
self.datacenter = reader.get('vmware', 'datacenter')
self.vm_name = reader.get('vmware', 'vm_name')
# Image Information
self.image_os = reader.get('vmware', 'image_os')
self.image_arch = reader.get('vmware', 'image_arch')
self.image_username = reader.get('vmware', 'image_username')
self.image_password = reader.get('vmware', 'image_password')
self.image_name = reader.get('vmware', 'image_name')
def validate(self):
"""Validate vmware settings."""
validation_errors = []
if not all(vars(self).values()):
validation_errors.append(
'All [vmware] hostname, username, password, datacenter, '
'vm_name, image_name, image_os, image_arch, image_usernam, '
'image_name options must be provided.'
)
return validation_errors
class DiscoveryISOSettings(FeatureSettings):
"""Discovery ISO name settings definition."""
def __init__(self, *args, **kwargs):
super(DiscoveryISOSettings, self).__init__(*args, **kwargs)
self.discovery_iso = None
def read(self, reader):
"""Read discovery iso setting."""
self.discovery_iso = reader.get('discovery', 'discovery_iso')
def validate(self):
"""Validate discovery iso name setting."""
validation_errors = []
if self.discovery_iso is None:
validation_errors.append(
'[discovery] discovery iso name must be provided.'
)
return validation_errors
class OscapSettings(FeatureSettings):
"""Oscap settings definitions."""
def __init__(self, *args, **kwargs):
super(OscapSettings, self).__init__(*args, **kwargs)
self.content_path = None
def read(self, reader):
"""Read Oscap settings."""
self.content_path = reader.get('oscap', 'content_path')
def validate(self):
"""Validate Oscap settings."""
validation_errors = []
if self.content_path is None:
validation_errors.append(
'[oscap] content_path option must be provided.'
)
return validation_errors
class PerformanceSettings(FeatureSettings):
"""Performance settings definitions."""
def __init__(self, *args, **kwargs):
super(PerformanceSettings, self).__init__(*args, **kwargs)
self.time_hammer = None
self.cdn_address = None
self.virtual_machines = None
self.fresh_install_savepoint = None
self.enabled_repos_savepoint = None
self.csv_buckets_count = None
self.sync_count = None
self.sync_type = None
self.repos = None
def read(self, reader):
"""Read performance settings."""
self.time_hammer = reader.get(
'performance', 'time_hammer', False, bool)
self.cdn_address = reader.get(
'performance', 'cdn_address')
self.virtual_machines = reader.get(
'performance', 'virtual_machines', cast=list)
self.fresh_install_savepoint = reader.get(
'performance', 'fresh_install_savepoint')
self.enabled_repos_savepoint = reader.get(
'performance', 'enabled_repos_savepoint')
self.csv_buckets_count = reader.get(
'performance', 'csv_buckets_count', 10, int)
self.sync_count = reader.get(
'performance', 'sync_count', 3, int)
self.sync_type = reader.get(
'performance', 'sync_type', 'sync')
self.repos = reader.get(
'performance', 'repos', cast=list)
def validate(self):
"""Validate performance settings."""
validation_errors = []
if self.cdn_address is None:
validation_errors.append(
'[performance] cdn_address must be provided.')
if self.virtual_machines is None:
validation_errors.append(
'[performance] virtual_machines must be provided.')
if self.fresh_install_savepoint is None:
validation_errors.append(
'[performance] fresh_install_savepoint must be provided.')
if self.enabled_repos_savepoint is None:
validation_errors.append(
'[performance] enabled_repos_savepoint must be provided.')
return validation_errors
class RHAISettings(FeatureSettings):
"""RHAI settings definitions."""
def __init__(self, *args, **kwargs):
super(RHAISettings, self).__init__(*args, **kwargs)
self.insights_client_el6repo = None
self.insights_client_el7repo = None
def read(self, reader):
"""Read RHAI settings."""
self.insights_client_el6repo = reader.get(
'rhai', 'insights_client_el6repo')
self.insights_client_el7repo = reader.get(
'rhai', 'insights_client_el7repo')
def validate(self):
"""Validate RHAI settings."""
return []
class TransitionSettings(FeatureSettings):
"""Transition settings definitions."""
def __init__(self, *args, **kwargs):
super(TransitionSettings, self).__init__(*args, **kwargs)
self.exported_data = None
def read(self, reader):
"""Read transition settings."""
self.exported_data = reader.get('transition', 'exported_data')
def validate(self):
"""Validate transition settings."""
validation_errors = []
if self.exported_data is None:
validation_errors.append(
'[transition] exported_data must be provided.')
return validation_errors
class VlanNetworkSettings(FeatureSettings):
"""Vlan Network settings definitions."""
def __init__(self, *args, **kwargs):
super(VlanNetworkSettings, self).__init__(*args, **kwargs)
self.subnet = None
self.netmask = None
self.gateway = None
self.bridge = None
def read(self, reader):
"""Read Vlan Network settings."""
self.subnet = reader.get('vlan_networking', 'subnet')
self.netmask = reader.get('vlan_networking', 'netmask')
self.gateway = reader.get('vlan_networking', 'gateway')
self.bridge = reader.get('vlan_networking', 'bridge')
def validate(self):
"""Validate Vlan Network settings."""
validation_errors = []
if not all(vars(self).values()):
validation_errors.append(
'All [vlan_networking] subnet, netmask, gateway, bridge '
'options must be provided.')
return validation_errors
class UpgradeSettings(FeatureSettings):
"""Satellite upgrade settings definitions."""
def __init__(self, *args, **kwargs):
super(UpgradeSettings, self).__init__(*args, **kwargs)
self.upgrade_data = None
def read(self, reader):
"""Read and validate Satellite server settings."""
self.upgrade_data = reader.get('upgrade', 'upgrade_data')
def validate(self):
validation_errors = []
if self.upgrade_data is None:
validation_errors.append('[upgrade] data must be provided.')
return validation_errors
class Settings(object):
"""Robottelo's settings representation."""
def __init__(self):
self._all_features = None
self._configured = False
self._validation_errors = []
self.browser = None
self.locale = None
self.project = None
self.reader = None
self.rhel6_repo = None
self.rhel7_repo = None
self.screenshots_path = None
self.saucelabs_key = None
self.saucelabs_user = None
self.server = ServerSettings()
self.run_one_datapoint = None
self.upstream = None
self.verbosity = None
self.webdriver = None
self.webdriver_binary = None
self.webdriver_desired_capabilities = None
# Features
self.clients = ClientsSettings()
self.compute_resources = LibvirtHostSettings()
self.discovery = DiscoveryISOSettings()
self.docker = DockerSettings()
self.fake_capsules = FakeCapsuleSettings()
self.fake_manifest = FakeManifestSettings()
self.ldap = LDAPSettings()
self.oscap = OscapSettings()
self.performance = PerformanceSettings()
self.rhai = RHAISettings()
self.rhev = RHEVSettings()
self.transition = TransitionSettings()
self.vlan_networking = VlanNetworkSettings()
self.upgrade = UpgradeSettings()
self.vmware = VmWareSettings()
def configure(self):
"""Read the settings file and parse the configuration.
:raises: ImproperlyConfigured if any issue is found during the parsing
or validation of the configuration.
"""
if self.configured:
# TODO: what to do here, raise and exception, just skip or ...?
return
# Expect the settings file to be on the robottelo project root.
settings_path = os.path.join(get_project_root(), SETTINGS_FILE_NAME)
if not os.path.isfile(settings_path):
raise ImproperlyConfigured(
'Not able to find settings file at {}'.format(settings_path))
self.reader = INIReader(settings_path)
self._read_robottelo_settings()
self._validation_errors.extend(
self._validate_robottelo_settings())
self.server.read(self.reader)
self._validation_errors.extend(self.server.validate())
if self.reader.has_section('clients'):
self.clients.read(self.reader)
self._validation_errors.extend(self.clients.validate())
if self.reader.has_section('compute_resources'):
self.compute_resources.read(self.reader)
self._validation_errors.extend(self.compute_resources.validate())
if self.reader.has_section('discovery'):
self.discovery.read(self.reader)
self._validation_errors.extend(self.discovery.validate())
if self.reader.has_section('docker'):
self.docker.read(self.reader)
self._validation_errors.extend(self.docker.validate())
if self.reader.has_section('fake_capsules'):
self.fake_capsules.read(self.reader)
self._validation_errors.extend(self.fake_capsules.validate())
if self.reader.has_section('fake_manifest'):
self.fake_manifest.read(self.reader)
self._validation_errors.extend(self.fake_manifest.validate())
if self.reader.has_section('ldap'):
self.ldap.read(self.reader)
self._validation_errors.extend(self.ldap.validate())
if self.reader.has_section('oscap'):
self.oscap.read(self.reader)
self._validation_errors.extend(self.oscap.validate())
if self.reader.has_section('performance'):
self.performance.read(self.reader)
self._validation_errors.extend(self.performance.validate())
if self.reader.has_section('rhai'):
self.rhai.read(self.reader)
self._validation_errors.extend(self.rhai.validate())
if self.reader.has_section('rhev'):
self.rhev.read(self.reader)
self._validation_errors.extend(self.rhev.validate())
if self.reader.has_section('transition'):
self.transition.read(self.reader)
self._validation_errors.extend(self.transition.validate())
if self.reader.has_section('vlan_networking'):
self.vlan_networking.read(self.reader)
self._validation_errors.extend(self.vlan_networking.validate())
if self.reader.has_section('upgrade'):
self.upgrade.read(self.reader)
self._validation_errors.extend(self.upgrade.validate())
if self.reader.has_section('vmware'):
self.vmware.read(self.reader)
self._validation_errors.extend(self.vmware.validate())
if self._validation_errors:
raise ImproperlyConfigured(
'Failed to validate the configuration, check the message(s):\n'
'{}'.format('\n'.join(self._validation_errors))
)
self._configure_logging()
self._configure_third_party_logging()
self._configure_entities()
self._configured = True
def _read_robottelo_settings(self):
"""Read Robottelo's general settings."""
self.log_driver_commands = self.reader.get(
'robottelo',
'log_driver_commands',
['newSession',
'windowMaximize',
'get',
'findElement',
'sendKeysToElement',
'clickElement',
'mouseMoveTo'],
list
)
self.browser = self.reader.get(
'robottelo', 'browser', 'selenium')
self.locale = self.reader.get('robottelo', 'locale', 'en_US.UTF-8')
self.project = self.reader.get('robottelo', 'project', 'sat')
self.rhel6_repo = self.reader.get('robottelo', 'rhel6_repo', None)
self.rhel7_repo = self.reader.get('robottelo', 'rhel7_repo', None)
self.screenshots_path = self.reader.get(
'robottelo', 'screenshots_path', '/tmp/robottelo/screenshots')
self.run_one_datapoint = self.reader.get(
'robottelo', 'run_one_datapoint', False, bool)
self.cleanup = self.reader.get('robottelo', 'cleanup', False, bool)
self.upstream = self.reader.get('robottelo', 'upstream', True, bool)
self.verbosity = self.reader.get(
'robottelo',
'verbosity',
INIReader.cast_logging_level('debug'),
INIReader.cast_logging_level
)
self.webdriver = self.reader.get(
'robottelo', 'webdriver', 'firefox')
self.saucelabs_user = self.reader.get(
'robottelo', 'saucelabs_user', None)
self.saucelabs_key = self.reader.get(
'robottelo', 'saucelabs_key', None)
self.webdriver_binary = self.reader.get(
'robottelo', 'webdriver_binary', None)
self.webdriver_desired_capabilities = self.reader.get(
'robottelo',
'webdriver_desired_capabilities',
None,
cast=INIReader.cast_webdriver_desired_capabilities
)
self.window_manager_command = self.reader.get(
'robottelo', 'window_manager_command', None)
def _validate_robottelo_settings(self):
"""Validate Robottelo's general settings."""
validation_errors = []
browsers = ('selenium', 'docker', 'saucelabs')
webdrivers = ('chrome', 'firefox', 'ie', 'phantomjs', 'remote')
if self.browser not in browsers:
validation_errors.append(
'[robottelo] browser should be one of {0}.'
.format(', '.join(browsers))
)
if self.webdriver not in webdrivers:
validation_errors.append(
'[robottelo] webdriver should be one of {0}.'
.format(', '.join(webdrivers))
)
if self.browser == 'saucelabs':
if self.saucelabs_user is None:
validation_errors.append(
'[robottelo] saucelabs_user must be provided when '
'browser is saucelabs.'
)
if self.saucelabs_key is None:
validation_errors.append(
'[robottelo] saucelabs_key must be provided when '
'browser is saucelabs.'
)
return validation_errors
@property
def configured(self):
"""Returns True if the settings have already been configured."""
return self._configured
@property
def all_features(self):
"""List all expected feature settings sections."""
if self._all_features is None:
self._all_features = [
name for name, value in vars(self).items()
if isinstance(value, FeatureSettings)
]
return self._all_features
def _configure_entities(self):
"""Configure NailGun's entity classes.
Do the following:
* Set ``entity_mixins.CREATE_MISSING`` to ``True``. This causes method
``EntityCreateMixin.create_raw`` to generate values for empty and
required fields.
* Set ``nailgun.entity_mixins.DEFAULT_SERVER_CONFIG`` to whatever is
returned by :meth:`robottelo.helpers.get_nailgun_config`. See
``robottelo.entity_mixins.Entity`` for more information on the effects
of this.
* Set a default value for ``nailgun.entities.GPGKey.content``.
* Set the default value for
``nailgun.entities.DockerComputeResource.url``
if either ``docker.internal_url`` or ``docker.external_url`` is set in
the configuration file.
"""
entity_mixins.CREATE_MISSING = True
entity_mixins.DEFAULT_SERVER_CONFIG = ServerConfig(
self.server.get_url(),
self.server.get_credentials(),
verify=False,
)
gpgkey_init = entities.GPGKey.__init__
def patched_gpgkey_init(self, server_config=None, **kwargs):
"""Set a default value on the ``content`` field."""
gpgkey_init(self, server_config, **kwargs)
self._fields['content'].default = os.path.join(
get_project_root(),
'tests', 'foreman', 'data', 'valid_gpg_key.txt'
)
entities.GPGKey.__init__ = patched_gpgkey_init
# NailGun provides a default value for ComputeResource.url. We override
# that value if `docker.internal_url` or `docker.external_url` is set.
docker_url = None
# Try getting internal url
docker_url = self.docker.get_unix_socket_url()
# Try getting external url
if docker_url is None:
docker_url = self.docker.external_url
if docker_url is not None:
dockercr_init = entities.DockerComputeResource.__init__
def patched_dockercr_init(self, server_config=None, **kwargs):
"""Set a default value on the ``docker_url`` field."""
dockercr_init(self, server_config, **kwargs)
self._fields['url'].default = docker_url
entities.DockerComputeResource.__init__ = patched_dockercr_init
def _configure_logging(self):
"""Configure logging for the entire framework.
If a config named ``logging.conf`` exists in Robottelo's root
directory, the logger is configured using the options in that file.
Otherwise, a custom logging output format is set, and default values
are used for all other logging options.
"""
# All output should be made by the logging module, including warnings
logging.captureWarnings(True)
# Set the logging level based on the Robottelo's verbosity
for name in ('nailgun', 'robottelo'):
logging.getLogger(name).setLevel(self.verbosity)
# Allow overriding logging config based on the presence of logging.conf
# file on Robottelo's project root
logging_conf_path = os.path.join(get_project_root(), 'logging.conf')
if os.path.isfile(logging_conf_path):
config.fileConfig(logging_conf_path)
else:
logging.basicConfig(
format='%(levelname)s %(module)s:%(lineno)d: %(message)s'
)
def _configure_third_party_logging(self):
"""Increase the level of third party packages logging."""
loggers = (
'bugzilla',
'easyprocess',
'paramiko',
'requests.packages.urllib3.connectionpool',
'selenium.webdriver.remote.remote_connection',
)
for logger in loggers:
logging.getLogger(logger).setLevel(logging.WARNING)
| Ichimonji10/robottelo | robottelo/config/base.py | Python | gpl-3.0 | 35,889 |
# This file is part of PlexPy.
#
# PlexPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PlexPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
from plexpy import logger, notifiers, plextv, pmsconnect, common, log_reader, datafactory, graphs, users
from plexpy.helpers import checked, radio
from mako.lookup import TemplateLookup
from mako import exceptions
import plexpy
import threading
import cherrypy
import hashlib
import random
import json
import os
try:
# pylint:disable=E0611
# ignore this error because we are catching the ImportError
from collections import OrderedDict
# pylint:enable=E0611
except ImportError:
# Python 2.6.x fallback, from libs
from ordereddict import OrderedDict
def serve_template(templatename, **kwargs):
interface_dir = os.path.join(str(plexpy.PROG_DIR), 'data/interfaces/')
template_dir = os.path.join(str(interface_dir), plexpy.CONFIG.INTERFACE)
_hplookup = TemplateLookup(directories=[template_dir])
try:
template = _hplookup.get_template(templatename)
return template.render(**kwargs)
except:
return exceptions.html_error_template().render()
class WebInterface(object):
def __init__(self):
self.interface_dir = os.path.join(str(plexpy.PROG_DIR), 'data/')
@cherrypy.expose
def index(self):
if plexpy.CONFIG.FIRST_RUN_COMPLETE:
raise cherrypy.HTTPRedirect("home")
else:
raise cherrypy.HTTPRedirect("welcome")
@cherrypy.expose
def home(self):
config = {
"home_stats_length": plexpy.CONFIG.HOME_STATS_LENGTH,
"home_stats_type": plexpy.CONFIG.HOME_STATS_TYPE,
"home_stats_count": plexpy.CONFIG.HOME_STATS_COUNT,
"pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER,
}
return serve_template(templatename="index.html", title="Home", config=config)
@cherrypy.expose
def welcome(self, **kwargs):
config = {
"launch_browser": checked(plexpy.CONFIG.LAUNCH_BROWSER),
"refresh_users_on_startup": checked(plexpy.CONFIG.REFRESH_USERS_ON_STARTUP),
"pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER,
"pms_ip": plexpy.CONFIG.PMS_IP,
"pms_is_remote": checked(plexpy.CONFIG.PMS_IS_REMOTE),
"pms_port": plexpy.CONFIG.PMS_PORT,
"pms_token": plexpy.CONFIG.PMS_TOKEN,
"pms_ssl": checked(plexpy.CONFIG.PMS_SSL),
"pms_uuid": plexpy.CONFIG.PMS_UUID,
"tv_notify_enable": checked(plexpy.CONFIG.TV_NOTIFY_ENABLE),
"movie_notify_enable": checked(plexpy.CONFIG.MOVIE_NOTIFY_ENABLE),
"music_notify_enable": checked(plexpy.CONFIG.MUSIC_NOTIFY_ENABLE),
"tv_notify_on_start": checked(plexpy.CONFIG.TV_NOTIFY_ON_START),
"movie_notify_on_start": checked(plexpy.CONFIG.MOVIE_NOTIFY_ON_START),
"music_notify_on_start": checked(plexpy.CONFIG.MUSIC_NOTIFY_ON_START),
"video_logging_enable": checked(plexpy.CONFIG.VIDEO_LOGGING_ENABLE),
"music_logging_enable": checked(plexpy.CONFIG.MUSIC_LOGGING_ENABLE),
"logging_ignore_interval": plexpy.CONFIG.LOGGING_IGNORE_INTERVAL,
"check_github": checked(plexpy.CONFIG.CHECK_GITHUB)
}
# The setup wizard just refreshes the page on submit so we must redirect to home if config set.
# Also redirecting to home if a PMS token already exists - will remove this in future.
if plexpy.CONFIG.FIRST_RUN_COMPLETE or plexpy.CONFIG.PMS_TOKEN:
raise cherrypy.HTTPRedirect("home")
else:
return serve_template(templatename="welcome.html", title="Welcome", config=config)
@cherrypy.expose
def get_date_formats(self):
if plexpy.CONFIG.DATE_FORMAT:
date_format = plexpy.CONFIG.DATE_FORMAT
else:
date_format = 'YYYY-MM-DD'
if plexpy.CONFIG.TIME_FORMAT:
time_format = plexpy.CONFIG.TIME_FORMAT
else:
time_format = 'HH:mm'
formats = {'date_format': date_format,
'time_format': time_format}
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(formats)
@cherrypy.expose
def home_stats(self, time_range='30', stat_type='0', stat_count='5', **kwargs):
data_factory = datafactory.DataFactory()
stats_data = data_factory.get_home_stats(time_range=time_range, stat_type=stat_type, stat_count=stat_count)
return serve_template(templatename="home_stats.html", title="Stats", data=stats_data)
@cherrypy.expose
def library_stats(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
stats_data = pms_connect.get_library_stats()
return serve_template(templatename="library_stats.html", title="Library Stats", data=stats_data)
@cherrypy.expose
def history(self):
return serve_template(templatename="history.html", title="History")
@cherrypy.expose
def users(self):
return serve_template(templatename="users.html", title="Users")
@cherrypy.expose
def graphs(self):
return serve_template(templatename="graphs.html", title="Graphs")
@cherrypy.expose
def sync(self):
return serve_template(templatename="sync.html", title="Synced Items")
@cherrypy.expose
def user(self, user=None, user_id=None):
user_data = users.Users()
if user_id:
try:
user_details = user_data.get_user_details(user_id=user_id)
except:
logger.warn("Unable to retrieve friendly name for user_id %s " % user_id)
elif user:
try:
user_details = user_data.get_user_details(user=user)
except:
logger.warn("Unable to retrieve friendly name for user %s " % user)
else:
logger.debug(u"User page requested but no parameters received.")
raise cherrypy.HTTPRedirect("home")
return serve_template(templatename="user.html", title="User", data=user_details)
@cherrypy.expose
def edit_user_dialog(self, user=None, user_id=None, **kwargs):
user_data = users.Users()
if user_id:
result = user_data.get_user_friendly_name(user_id=user_id)
status_message = ''
elif user:
result = user_data.get_user_friendly_name(user=user)
status_message = ''
else:
result = None
status_message = 'An error occured.'
return serve_template(templatename="edit_user.html", title="Edit User", data=result, status_message=status_message)
@cherrypy.expose
def edit_user(self, user=None, user_id=None, friendly_name=None, **kwargs):
if 'do_notify' in kwargs:
do_notify = kwargs.get('do_notify')
else:
do_notify = 0
if 'keep_history' in kwargs:
keep_history = kwargs.get('keep_history')
else:
keep_history = 0
if 'thumb' in kwargs:
custom_avatar = kwargs['thumb']
else:
custom_avatar = ''
user_data = users.Users()
if user_id:
try:
user_data.set_user_friendly_name(user_id=user_id,
friendly_name=friendly_name,
do_notify=do_notify,
keep_history=keep_history)
user_data.set_user_profile_url(user_id=user_id,
profile_url=custom_avatar)
status_message = "Successfully updated user."
return status_message
except:
status_message = "Failed to update user."
return status_message
if user:
try:
user_data.set_user_friendly_name(user=user,
friendly_name=friendly_name,
do_notify=do_notify,
keep_history=keep_history)
user_data.set_user_profile_url(user=user,
profile_url=custom_avatar)
status_message = "Successfully updated user."
return status_message
except:
status_message = "Failed to update user."
return status_message
@cherrypy.expose
def get_stream_data(self, row_id=None, user=None, **kwargs):
data_factory = datafactory.DataFactory()
stream_data = data_factory.get_stream_details(row_id)
return serve_template(templatename="stream_data.html", title="Stream Data", data=stream_data, user=user)
@cherrypy.expose
def get_ip_address_details(self, ip_address=None, **kwargs):
import socket
try:
socket.inet_aton(ip_address)
except socket.error:
ip_address = None
return serve_template(templatename="ip_address_modal.html", title="IP Address Details", data=ip_address)
@cherrypy.expose
def get_user_list(self, **kwargs):
user_data = users.Users()
user_list = user_data.get_user_list(kwargs=kwargs)
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(user_list)
@cherrypy.expose
def checkGithub(self):
from plexpy import versioncheck
versioncheck.checkGithub()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
def logs(self):
return serve_template(templatename="logs.html", title="Log", lineList=plexpy.LOG_LIST)
@cherrypy.expose
def clearLogs(self):
plexpy.LOG_LIST = []
logger.info("Web logs cleared")
raise cherrypy.HTTPRedirect("logs")
@cherrypy.expose
def toggleVerbose(self):
plexpy.VERBOSE = not plexpy.VERBOSE
logger.initLogger(console=not plexpy.QUIET,
log_dir=plexpy.CONFIG.LOG_DIR, verbose=plexpy.VERBOSE)
logger.info("Verbose toggled, set to %s", plexpy.VERBOSE)
logger.debug("If you read this message, debug logging is available")
raise cherrypy.HTTPRedirect("logs")
@cherrypy.expose
def getLog(self, start=0, length=100, **kwargs):
start = int(start)
length = int(length)
search_value = ""
search_regex = ""
order_column = 0
order_dir = "desc"
if 'order[0][dir]' in kwargs:
order_dir = kwargs.get('order[0][dir]', "desc")
if 'order[0][column]' in kwargs:
order_column = kwargs.get('order[0][column]', "0")
if 'search[value]' in kwargs:
search_value = kwargs.get('search[value]', "")
if 'search[regex]' in kwargs:
search_regex = kwargs.get('search[regex]', "")
filtered = []
if search_value == "":
filtered = plexpy.LOG_LIST[::]
else:
filtered = [row for row in plexpy.LOG_LIST for column in row if search_value.lower() in column.lower()]
sortcolumn = 0
if order_column == '1':
sortcolumn = 2
elif order_column == '2':
sortcolumn = 1
filtered.sort(key=lambda x: x[sortcolumn], reverse=order_dir == "desc")
rows = filtered[start:(start + length)]
rows = [[row[0], row[2], row[1]] for row in rows]
return json.dumps({
'recordsFiltered': len(filtered),
'recordsTotal': len(plexpy.LOG_LIST),
'data': rows,
})
@cherrypy.expose
def get_plex_log(self, window=1000, **kwargs):
log_lines = []
try:
log_lines = {'data': log_reader.get_log_tail(window=window)}
except:
logger.warn("Unable to retrieve Plex Logs.")
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(log_lines)
@cherrypy.expose
def generateAPI(self):
apikey = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[0:32]
logger.info("New API generated")
return apikey
@cherrypy.expose
def settings(self):
interface_dir = os.path.join(plexpy.PROG_DIR, 'data/interfaces/')
interface_list = [name for name in os.listdir(interface_dir) if
os.path.isdir(os.path.join(interface_dir, name))]
# Initialise blank passwords so we do not expose them in the html forms
# but users are still able to clear them
if plexpy.CONFIG.HTTP_PASSWORD != '':
http_password = ' '
else:
http_password = ''
config = {
"http_host": plexpy.CONFIG.HTTP_HOST,
"http_username": plexpy.CONFIG.HTTP_USERNAME,
"http_port": plexpy.CONFIG.HTTP_PORT,
"http_password": http_password,
"launch_browser": checked(plexpy.CONFIG.LAUNCH_BROWSER),
"enable_https": checked(plexpy.CONFIG.ENABLE_HTTPS),
"https_cert": plexpy.CONFIG.HTTPS_CERT,
"https_key": plexpy.CONFIG.HTTPS_KEY,
"api_enabled": checked(plexpy.CONFIG.API_ENABLED),
"api_key": plexpy.CONFIG.API_KEY,
"update_db_interval": plexpy.CONFIG.UPDATE_DB_INTERVAL,
"freeze_db": checked(plexpy.CONFIG.FREEZE_DB),
"log_dir": plexpy.CONFIG.LOG_DIR,
"cache_dir": plexpy.CONFIG.CACHE_DIR,
"check_github": checked(plexpy.CONFIG.CHECK_GITHUB),
"interface_list": interface_list,
"growl_enabled": checked(plexpy.CONFIG.GROWL_ENABLED),
"growl_host": plexpy.CONFIG.GROWL_HOST,
"growl_password": plexpy.CONFIG.GROWL_PASSWORD,
"prowl_enabled": checked(plexpy.CONFIG.PROWL_ENABLED),
"prowl_keys": plexpy.CONFIG.PROWL_KEYS,
"prowl_priority": plexpy.CONFIG.PROWL_PRIORITY,
"xbmc_enabled": checked(plexpy.CONFIG.XBMC_ENABLED),
"xbmc_host": plexpy.CONFIG.XBMC_HOST,
"xbmc_username": plexpy.CONFIG.XBMC_USERNAME,
"xbmc_password": plexpy.CONFIG.XBMC_PASSWORD,
"plex_enabled": checked(plexpy.CONFIG.PLEX_ENABLED),
"plex_client_host": plexpy.CONFIG.PLEX_CLIENT_HOST,
"plex_username": plexpy.CONFIG.PLEX_USERNAME,
"plex_password": plexpy.CONFIG.PLEX_PASSWORD,
"nma_enabled": checked(plexpy.CONFIG.NMA_ENABLED),
"nma_apikey": plexpy.CONFIG.NMA_APIKEY,
"nma_priority": int(plexpy.CONFIG.NMA_PRIORITY),
"pushalot_enabled": checked(plexpy.CONFIG.PUSHALOT_ENABLED),
"pushalot_apikey": plexpy.CONFIG.PUSHALOT_APIKEY,
"pushover_enabled": checked(plexpy.CONFIG.PUSHOVER_ENABLED),
"pushover_keys": plexpy.CONFIG.PUSHOVER_KEYS,
"pushover_apitoken": plexpy.CONFIG.PUSHOVER_APITOKEN,
"pushover_priority": plexpy.CONFIG.PUSHOVER_PRIORITY,
"pushbullet_enabled": checked(plexpy.CONFIG.PUSHBULLET_ENABLED),
"pushbullet_apikey": plexpy.CONFIG.PUSHBULLET_APIKEY,
"pushbullet_deviceid": plexpy.CONFIG.PUSHBULLET_DEVICEID,
"twitter_enabled": checked(plexpy.CONFIG.TWITTER_ENABLED),
"osx_notify_enabled": checked(plexpy.CONFIG.OSX_NOTIFY_ENABLED),
"osx_notify_app": plexpy.CONFIG.OSX_NOTIFY_APP,
"boxcar_enabled": checked(plexpy.CONFIG.BOXCAR_ENABLED),
"boxcar_token": plexpy.CONFIG.BOXCAR_TOKEN,
"cache_sizemb": plexpy.CONFIG.CACHE_SIZEMB,
"email_enabled": checked(plexpy.CONFIG.EMAIL_ENABLED),
"email_from": plexpy.CONFIG.EMAIL_FROM,
"email_to": plexpy.CONFIG.EMAIL_TO,
"email_smtp_server": plexpy.CONFIG.EMAIL_SMTP_SERVER,
"email_smtp_user": plexpy.CONFIG.EMAIL_SMTP_USER,
"email_smtp_password": plexpy.CONFIG.EMAIL_SMTP_PASSWORD,
"email_smtp_port": int(plexpy.CONFIG.EMAIL_SMTP_PORT),
"email_tls": checked(plexpy.CONFIG.EMAIL_TLS),
"pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER,
"pms_ip": plexpy.CONFIG.PMS_IP,
"pms_logs_folder": plexpy.CONFIG.PMS_LOGS_FOLDER,
"pms_port": plexpy.CONFIG.PMS_PORT,
"pms_token": plexpy.CONFIG.PMS_TOKEN,
"pms_ssl": checked(plexpy.CONFIG.PMS_SSL),
"pms_use_bif": checked(plexpy.CONFIG.PMS_USE_BIF),
"pms_uuid": plexpy.CONFIG.PMS_UUID,
"plexwatch_database": plexpy.CONFIG.PLEXWATCH_DATABASE,
"date_format": plexpy.CONFIG.DATE_FORMAT,
"time_format": plexpy.CONFIG.TIME_FORMAT,
"grouping_global_history": checked(plexpy.CONFIG.GROUPING_GLOBAL_HISTORY),
"grouping_user_history": checked(plexpy.CONFIG.GROUPING_USER_HISTORY),
"grouping_charts": checked(plexpy.CONFIG.GROUPING_CHARTS),
"tv_notify_enable": checked(plexpy.CONFIG.TV_NOTIFY_ENABLE),
"movie_notify_enable": checked(plexpy.CONFIG.MOVIE_NOTIFY_ENABLE),
"music_notify_enable": checked(plexpy.CONFIG.MUSIC_NOTIFY_ENABLE),
"tv_notify_on_start": checked(plexpy.CONFIG.TV_NOTIFY_ON_START),
"movie_notify_on_start": checked(plexpy.CONFIG.MOVIE_NOTIFY_ON_START),
"music_notify_on_start": checked(plexpy.CONFIG.MUSIC_NOTIFY_ON_START),
"tv_notify_on_stop": checked(plexpy.CONFIG.TV_NOTIFY_ON_STOP),
"movie_notify_on_stop": checked(plexpy.CONFIG.MOVIE_NOTIFY_ON_STOP),
"music_notify_on_stop": checked(plexpy.CONFIG.MUSIC_NOTIFY_ON_STOP),
"tv_notify_on_pause": checked(plexpy.CONFIG.TV_NOTIFY_ON_PAUSE),
"movie_notify_on_pause": checked(plexpy.CONFIG.MOVIE_NOTIFY_ON_PAUSE),
"music_notify_on_pause": checked(plexpy.CONFIG.MUSIC_NOTIFY_ON_PAUSE),
"monitoring_interval": plexpy.CONFIG.MONITORING_INTERVAL,
"refresh_users_interval": plexpy.CONFIG.REFRESH_USERS_INTERVAL,
"refresh_users_on_startup": checked(plexpy.CONFIG.REFRESH_USERS_ON_STARTUP),
"ip_logging_enable": checked(plexpy.CONFIG.IP_LOGGING_ENABLE),
"video_logging_enable": checked(plexpy.CONFIG.VIDEO_LOGGING_ENABLE),
"music_logging_enable": checked(plexpy.CONFIG.MUSIC_LOGGING_ENABLE),
"logging_ignore_interval": plexpy.CONFIG.LOGGING_IGNORE_INTERVAL,
"pms_is_remote": checked(plexpy.CONFIG.PMS_IS_REMOTE),
"notify_watched_percent": plexpy.CONFIG.NOTIFY_WATCHED_PERCENT,
"notify_on_start_subject_text": plexpy.CONFIG.NOTIFY_ON_START_SUBJECT_TEXT,
"notify_on_start_body_text": plexpy.CONFIG.NOTIFY_ON_START_BODY_TEXT,
"notify_on_stop_subject_text": plexpy.CONFIG.NOTIFY_ON_STOP_SUBJECT_TEXT,
"notify_on_stop_body_text": plexpy.CONFIG.NOTIFY_ON_STOP_BODY_TEXT,
"notify_on_pause_subject_text": plexpy.CONFIG.NOTIFY_ON_PAUSE_SUBJECT_TEXT,
"notify_on_pause_body_text": plexpy.CONFIG.NOTIFY_ON_PAUSE_BODY_TEXT,
"notify_on_resume_subject_text": plexpy.CONFIG.NOTIFY_ON_RESUME_SUBJECT_TEXT,
"notify_on_resume_body_text": plexpy.CONFIG.NOTIFY_ON_RESUME_BODY_TEXT,
"notify_on_buffer_subject_text": plexpy.CONFIG.NOTIFY_ON_BUFFER_SUBJECT_TEXT,
"notify_on_buffer_body_text": plexpy.CONFIG.NOTIFY_ON_BUFFER_BODY_TEXT,
"notify_on_watched_subject_text": plexpy.CONFIG.NOTIFY_ON_WATCHED_SUBJECT_TEXT,
"notify_on_watched_body_text": plexpy.CONFIG.NOTIFY_ON_WATCHED_BODY_TEXT,
"home_stats_length": plexpy.CONFIG.HOME_STATS_LENGTH,
"home_stats_type": checked(plexpy.CONFIG.HOME_STATS_TYPE),
"home_stats_count": plexpy.CONFIG.HOME_STATS_COUNT,
"buffer_threshold": plexpy.CONFIG.BUFFER_THRESHOLD,
"buffer_wait": plexpy.CONFIG.BUFFER_WAIT
}
return serve_template(templatename="settings.html", title="Settings", config=config)
@cherrypy.expose
def configUpdate(self, **kwargs):
# Handle the variable config options. Note - keys with False values aren't getting passed
checked_configs = [
"launch_browser", "enable_https", "api_enabled", "freeze_db", "growl_enabled",
"prowl_enabled", "xbmc_enabled", "check_github",
"plex_enabled", "nma_enabled", "pushalot_enabled",
"pushover_enabled", "pushbullet_enabled",
"twitter_enabled", "osx_notify_enabled",
"boxcar_enabled", "email_enabled", "email_tls",
"grouping_global_history", "grouping_user_history", "grouping_charts", "pms_use_bif", "pms_ssl",
"tv_notify_enable", "movie_notify_enable", "music_notify_enable",
"tv_notify_on_start", "movie_notify_on_start", "music_notify_on_start",
"tv_notify_on_stop", "movie_notify_on_stop", "music_notify_on_stop",
"tv_notify_on_pause", "movie_notify_on_pause", "music_notify_on_pause", "refresh_users_on_startup",
"ip_logging_enable", "video_logging_enable", "music_logging_enable", "pms_is_remote", "home_stats_type"
]
for checked_config in checked_configs:
if checked_config not in kwargs:
# checked items should be zero or one. if they were not sent then the item was not checked
kwargs[checked_config] = 0
# If http password exists in config, do not overwrite when blank value received
if 'http_password' in kwargs:
if kwargs['http_password'] == ' ' and plexpy.CONFIG.HTTP_PASSWORD != '':
kwargs['http_password'] = plexpy.CONFIG.HTTP_PASSWORD
for plain_config, use_config in [(x[4:], x) for x in kwargs if x.startswith('use_')]:
# the use prefix is fairly nice in the html, but does not match the actual config
kwargs[plain_config] = kwargs[use_config]
del kwargs[use_config]
# Check if we should refresh our data
refresh_users = False
reschedule = False
if 'monitoring_interval' in kwargs and 'refresh_users_interval' in kwargs:
if (kwargs['monitoring_interval'] != str(plexpy.CONFIG.MONITORING_INTERVAL)) or \
(kwargs['refresh_users_interval'] != str(plexpy.CONFIG.REFRESH_USERS_INTERVAL)):
reschedule = True
if 'pms_ip' in kwargs:
if kwargs['pms_ip'] != plexpy.CONFIG.PMS_IP:
refresh_users = True
plexpy.CONFIG.process_kwargs(kwargs)
# Write the config
plexpy.CONFIG.write()
# Get new server URLs for SSL communications.
plextv.get_real_pms_url()
# Reconfigure scheduler if intervals changed
if reschedule:
plexpy.initialize_scheduler()
# Refresh users table if our server IP changes.
if refresh_users:
threading.Thread(target=plextv.refresh_users).start()
raise cherrypy.HTTPRedirect("settings")
@cherrypy.expose
def set_notification_config(self, **kwargs):
# Handle the variable config options. Note - keys with False values aren't getting passed
checked_configs = [
"email_tls"
]
for checked_config in checked_configs:
if checked_config not in kwargs:
# checked items should be zero or one. if they were not sent then the item was not checked
kwargs[checked_config] = 0
for plain_config, use_config in [(x[4:], x) for x in kwargs if x.startswith('use_')]:
# the use prefix is fairly nice in the html, but does not match the actual config
kwargs[plain_config] = kwargs[use_config]
del kwargs[use_config]
plexpy.CONFIG.process_kwargs(kwargs)
# Write the config
plexpy.CONFIG.write()
cherrypy.response.status = 200
@cherrypy.expose
def do_state_change(self, signal, title, timer):
message = title
quote = self.random_arnold_quotes()
plexpy.SIGNAL = signal
return serve_template(templatename="shutdown.html", title=title,
message=message, timer=timer, quote=quote)
@cherrypy.expose
def get_history(self, user=None, user_id=None, **kwargs):
custom_where=[]
if user_id:
custom_where = [['user_id', user_id]]
elif user:
custom_where = [['user', user]]
if 'rating_key' in kwargs:
rating_key = kwargs.get('rating_key', "")
custom_where = [['rating_key', rating_key]]
if 'parent_rating_key' in kwargs:
rating_key = kwargs.get('parent_rating_key', "")
custom_where = [['parent_rating_key', rating_key]]
if 'grandparent_rating_key' in kwargs:
rating_key = kwargs.get('grandparent_rating_key', "")
custom_where = [['grandparent_rating_key', rating_key]]
if 'start_date' in kwargs:
start_date = kwargs.get('start_date', "")
custom_where = [['strftime("%Y-%m-%d", datetime(date, "unixepoch", "localtime"))', start_date]]
data_factory = datafactory.DataFactory()
history = data_factory.get_history(kwargs=kwargs, custom_where=custom_where)
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(history)
@cherrypy.expose
def history_table_modal(self, start_date=None, **kwargs):
return serve_template(templatename="history_table_modal.html", title="History Data", data=start_date)
@cherrypy.expose
def shutdown(self):
return self.do_state_change('shutdown', 'Shutting Down', 15)
@cherrypy.expose
def restart(self):
return self.do_state_change('restart', 'Restarting', 30)
@cherrypy.expose
def update(self):
return self.do_state_change('update', 'Updating', 120)
@cherrypy.expose
def api(self, *args, **kwargs):
from plexpy.api import Api
a = Api()
a.checkParams(*args, **kwargs)
return a.fetchData()
@cherrypy.expose
def twitterStep1(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
tweet = notifiers.TwitterNotifier()
return tweet._get_authorization()
@cherrypy.expose
def twitterStep2(self, key):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
tweet = notifiers.TwitterNotifier()
result = tweet._get_credentials(key)
logger.info(u"result: " + str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
@cherrypy.expose
def testTwitter(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
tweet = notifiers.TwitterNotifier()
result = tweet.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
@cherrypy.expose
def osxnotifyregister(self, app):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
from osxnotify import registerapp as osxnotify
result, msg = osxnotify.registerapp(app)
if result:
osx_notify = notifiers.OSX_NOTIFY()
osx_notify.notify('Registered', result, 'Success :-)')
logger.info('Registered %s, to re-register a different app, delete this app first' % result)
else:
logger.warn(msg)
return msg
@cherrypy.expose
def get_pms_token(self):
token = plextv.PlexTV()
result = token.get_token()
if result:
return result
else:
logger.warn('Unable to retrieve Plex.tv token.')
return False
@cherrypy.expose
def get_pms_sessions_json(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_sessions('json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
return False
@cherrypy.expose
def get_current_activity(self, **kwargs):
try:
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_current_activity()
except:
return serve_template(templatename="current_activity.html", data=None)
if result:
return serve_template(templatename="current_activity.html", data=result)
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="current_activity.html", data=None)
@cherrypy.expose
def get_current_activity_header(self, **kwargs):
try:
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_current_activity()
except IOError, e:
return serve_template(templatename="current_activity_header.html", data=None)
if result:
return serve_template(templatename="current_activity_header.html", data=result['stream_count'])
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="current_activity_header.html", data=None)
@cherrypy.expose
def get_recently_added(self, count='0', **kwargs):
try:
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_recently_added_details(count)
except IOError, e:
return serve_template(templatename="recently_added.html", data=None)
if result:
return serve_template(templatename="recently_added.html", data=result['recently_added'])
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="recently_added.html", data=None)
@cherrypy.expose
def pms_image_proxy(self, img='', width='0', height='0', fallback=None, **kwargs):
try:
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_image(img, width, height)
cherrypy.response.headers['Content-type'] = result[1]
return result[0]
except:
logger.warn('Image proxy queried but errors occured.')
if fallback == 'poster':
logger.info('Trying fallback image...')
try:
fallback_image = open(self.interface_dir + common.DEFAULT_POSTER_THUMB, 'rb')
cherrypy.response.headers['Content-type'] = 'image/png'
return fallback_image
except IOError, e:
logger.error('Unable to read fallback image. %s' % e)
elif fallback == 'cover':
logger.info('Trying fallback image...')
try:
fallback_image = open(self.interface_dir + common.DEFAULT_COVER_THUMB, 'rb')
cherrypy.response.headers['Content-type'] = 'image/png'
return fallback_image
except IOError, e:
logger.error('Unable to read fallback image. %s' % e)
return None
@cherrypy.expose
def info(self, item_id=None, source=None, **kwargs):
metadata = None
config = {
"pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER
}
if source == 'history':
data_factory = datafactory.DataFactory()
metadata = data_factory.get_metadata_details(row_id=item_id)
else:
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_metadata_details(rating_key=item_id)
if result:
metadata = result['metadata']
if metadata:
return serve_template(templatename="info.html", data=metadata, title="Info", config=config)
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="info.html", data=None, title="Info")
@cherrypy.expose
def get_user_recently_watched(self, user=None, user_id=None, limit='10', **kwargs):
data_factory = datafactory.DataFactory()
result = data_factory.get_recently_watched(user_id=user_id, user=user, limit=limit)
if result:
return serve_template(templatename="user_recently_watched.html", data=result,
title="Recently Watched")
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="user_recently_watched.html", data=None,
title="Recently Watched")
@cherrypy.expose
def get_user_watch_time_stats(self, user=None, user_id=None, **kwargs):
user_data = users.Users()
result = user_data.get_user_watch_time_stats(user_id=user_id, user=user)
if result:
return serve_template(templatename="user_watch_time_stats.html", data=result, title="Watch Stats")
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="user_watch_time_stats.html", data=None, title="Watch Stats")
@cherrypy.expose
def get_user_platform_stats(self, user=None, user_id=None, **kwargs):
user_data = users.Users()
result = user_data.get_user_platform_stats(user_id=user_id, user=user)
if result:
return serve_template(templatename="user_platform_stats.html", data=result,
title="Platform Stats")
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="user_platform_stats.html", data=None, title="Platform Stats")
@cherrypy.expose
def get_item_children(self, rating_key='', **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_item_children(rating_key)
if result:
return serve_template(templatename="info_children_list.html", data=result, title="Children List")
else:
logger.warn('Unable to retrieve data.')
return serve_template(templatename="info_children_list.html", data=None, title="Children List")
@cherrypy.expose
def get_metadata_json(self, rating_key='', **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_metadata(rating_key, 'json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_metadata_xml(self, rating_key='', **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_metadata(rating_key)
if result:
cherrypy.response.headers['Content-type'] = 'application/xml'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_recently_added_json(self, count='0', **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_recently_added(count, 'json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_episode_list_json(self, rating_key='', **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_episode_list(rating_key, 'json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_user_ips(self, user_id=None, user=None, **kwargs):
custom_where=[]
if user_id:
custom_where = [['user_id', user_id]]
elif user:
custom_where = [['user', user]]
user_data = users.Users()
history = user_data.get_user_unique_ips(kwargs=kwargs,
custom_where=custom_where)
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(history)
@cherrypy.expose
def get_plays_by_date(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_per_day(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_by_dayofweek(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_per_dayofweek(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_by_hourofday(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_per_hourofday(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_per_month(self, y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_per_month(y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_by_top_10_platforms(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_by_top_10_platforms(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_by_top_10_users(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_by_top_10_users(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_by_stream_type(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_per_stream_type(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_by_source_resolution(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_by_source_resolution(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plays_by_stream_resolution(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_total_plays_by_stream_resolution(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_stream_type_by_top_10_users(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_stream_type_by_top_10_users(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_stream_type_by_top_10_platforms(self, time_range='30', y_axis='plays', **kwargs):
graph = graphs.Graphs()
result = graph.get_stream_type_by_top_10_platforms(time_range=time_range, y_axis=y_axis)
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_friends_list(self, **kwargs):
plex_tv = plextv.PlexTV()
result = plex_tv.get_plextv_friends('json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_user_details(self, **kwargs):
plex_tv = plextv.PlexTV()
result = plex_tv.get_plextv_user_details('json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_server_list(self, **kwargs):
plex_tv = plextv.PlexTV()
result = plex_tv.get_plextv_server_list('json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_sync_lists(self, machine_id='', **kwargs):
plex_tv = plextv.PlexTV()
result = plex_tv.get_plextv_sync_lists(machine_id=machine_id, output_format='json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_servers(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_server_list(output_format='json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_servers_info(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_servers_info()
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_server_prefs(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_server_prefs(output_format='json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_activity(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_current_activity()
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_full_users_list(self, **kwargs):
plex_tv = plextv.PlexTV()
result = plex_tv.get_full_users_list()
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(result)
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def refresh_users_list(self, **kwargs):
threading.Thread(target=plextv.refresh_users).start()
logger.info('Manual user list refresh requested.')
@cherrypy.expose
def get_sync(self, machine_id=None, user_id=None, **kwargs):
pms_connect = pmsconnect.PmsConnect()
server_id = pms_connect.get_server_identity()
plex_tv = plextv.PlexTV()
if not machine_id:
result = plex_tv.get_synced_items(machine_id=server_id['machine_identifier'], user_id=user_id)
else:
result = plex_tv.get_synced_items(machine_id=machine_id, user_id=user_id)
if result:
output = {"data": result}
else:
logger.warn('Unable to retrieve sync data for user.')
output = {"data": []}
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(output)
@cherrypy.expose
def get_sync_item(self, sync_id, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_sync_item(sync_id, output_format='json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_sync_transcode_queue(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_sync_transcode_queue(output_format='json')
if result:
cherrypy.response.headers['Content-type'] = 'application/json'
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_server_pref(self, pref=None, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_server_pref(pref=pref)
if result:
return result
else:
logger.warn('Unable to retrieve data.')
@cherrypy.expose
def get_plexwatch_export_data(self, database_path=None, table_name=None, import_ignore_interval=0, **kwargs):
from plexpy import plexwatch_import
db_check_msg = plexwatch_import.validate_database(database=database_path,
table_name=table_name)
if db_check_msg == 'success':
threading.Thread(target=plexwatch_import.import_from_plexwatch,
kwargs={'database': database_path,
'table_name': table_name,
'import_ignore_interval': import_ignore_interval}).start()
return 'Import has started. Check the PlexPy logs to monitor any problems.'
else:
return db_check_msg
@cherrypy.expose
def plexwatch_import(self, **kwargs):
return serve_template(templatename="plexwatch_import.html", title="Import PlexWatch Database")
@cherrypy.expose
def get_server_id(self, hostname=None, port=None, **kwargs):
from plexpy import http_handler
if hostname and port:
request_handler = http_handler.HTTPHandler(host=hostname,
port=port,
token=None)
uri = '/identity'
request = request_handler.make_request(uri=uri,
proto='http',
request_type='GET',
output_format='',
no_token=True)
if request:
cherrypy.response.headers['Content-type'] = 'application/xml'
return request
else:
logger.warn('Unable to retrieve data.')
return None
else:
return None
@cherrypy.expose
def random_arnold_quotes(self, **kwargs):
from random import randint
quote_list = ['To crush your enemies, see them driven before you, and to hear the lamentation of their women!',
'Your clothes, give them to me, now!',
'Do it!',
'If it bleeds, we can kill it',
'See you at the party Richter!',
'Let off some steam, Bennett',
'I\'ll be back',
'Get to the chopper!',
'Hasta La Vista, Baby!',
'It\'s not a tumor!',
'Dillon, you son of a bitch!',
'Benny!! Screw you!!',
'Stop whining! You kids are soft. You lack discipline.',
'Nice night for a walk.',
'Stick around!',
'I need your clothes, your boots and your motorcycle.',
'No, it\'s not a tumor. It\'s not a tumor!',
'I LIED!',
'See you at the party, Richter!',
'Are you Sarah Conner?',
'I\'m a cop you idiot!',
'Come with me if you want to live.',
'Who is your daddy and what does he do?'
]
random_number = randint(0, len(quote_list) - 1)
return quote_list[int(random_number)]
@cherrypy.expose
def get_notification_agent_config(self, config_id, **kwargs):
config = notifiers.get_notification_agent_config(config_id=config_id)
checkboxes = {'email_tls': checked(plexpy.CONFIG.EMAIL_TLS)}
return serve_template(templatename="notification_config.html", title="Notification Configuration",
data=config, checkboxes=checkboxes)
@cherrypy.expose
def get_notification_agent_triggers(self, config_id, **kwargs):
if config_id.isdigit():
agents = notifiers.available_notification_agents()
for agent in agents:
if int(config_id) == agent['id']:
this_agent = agent
break
else:
this_agent = None
else:
return None
return serve_template(templatename="notification_triggers_modal.html", title="Notification Triggers",
data=this_agent)
@cherrypy.expose
def delete_history_rows(self, row_id, **kwargs):
data_factory = datafactory.DataFactory()
if row_id:
delete_row = data_factory.delete_session_history_rows(row_id=row_id)
if delete_row:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps({'message': delete_row})
else:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps({'message': 'no data received'})
@cherrypy.expose
def delete_all_user_history(self, user_id, **kwargs):
data_factory = datafactory.DataFactory()
if user_id:
delete_row = data_factory.delete_all_user_history(user_id=user_id)
if delete_row:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps({'message': delete_row})
else:
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps({'message': 'no data received'})
| gnowxilef/plexpy | plexpy/webserve.py | Python | gpl-3.0 | 52,488 |
"""treetools: Tools for transforming treebank trees.
transformations: constants and utilities
Author: Wolfgang Maier <[email protected]>
"""
from . import trees
# Head rules for PTB (WSJ) from Collins (1999, p. 240)
HEAD_RULES_PTB = {
'adjp' : [('left-to-right', 'nns qp nn $ advp jj vbn vbg adjp jjr np jjs dt fw rbr rbs sbar rb')],
'advp' : [('right-to-left', 'rb rbr rbs fw advp to cd jjr jj in np jjs nn')],
'conjp' : [('right-to-left', 'cc rb in')],
'frag' : [('right-to-left', '')],
'intj' : [('left-to-right', '')],
'lst' : [('right-to-left', 'ls :')],
'nac' : [('left-to-right', 'nn nns nnp nnps np nac ex $ cd qp prp vbg jj jjs jjr adjp fw')],
'pp' : [('right-to-left', 'in to vbg vbn rp fw')],
'prn' : [('left-to-right', '')],
'prt' : [('right-to-left', 'rp')],
'qp' : [('left-to-right', ' $ in nns nn jj rb dt cd ncd qp jjr jjs')],
'rrc' : [('right-to-left', 'vp np advp adjp pp')],
's' : [('left-to-right', ' to in vp s sbar adjp ucp np')],
'sbar' : [('left-to-right', 'whnp whpp whadvp whadjp in dt s sq sinv sbar frag')],
'sbarq' : [('left-to-right', 'sq s sinv sbarq frag')],
'sinv' : [('left-to-right', 'vbz vbd vbp vb md vp s sinv adjp np')],
'sq' : [('left-to-right', 'vbz vbd vbp vb md vp sq')],
'ucp' : [('right-to-left', '')],
'vp' : [('left-to-right', 'to vbd vbn md vbz vb vbg vbp vp adjp nn nns np')],
'whadjp' : [('left-to-right', 'cc wrb jj adjp')],
'whadvp' : [('right-to-left', 'cc wrb')],
'whnp' : [('left-to-right', 'wdt wp wp$ whadjp whpp whnp')],
'whpp' : [('right-to-left', 'in to fw')]
}
# Head rules for NeGra/TIGER from rparse
# almost identical to corresponding rules from Stanford parser
HEAD_RULES_NEGRA = {
's' : [('right-to-left', 'vvfin vvimp'),
('right-to-left', 'vp cvp'),
('right-to-left', 'vmfin vafin vaimp'),
('right-to-left', 's cs')],
'vp' : [('right-to-left', 'vvinf vvizu vvpp'),
('right-to-left', 'vz vainf vminf vmpp vapp pp')],
'vz' : [('right-to-left', 'vvinf vainf vminf vvfin vvizu'),
('left-to-right', 'prtzu appr ptkzu')],
'np' : [('right-to-left', 'nn ne mpn np cnp pn car')],
'ap' : [('right-to-left', 'adjd adja cap aa adv')],
'pp' : [('left-to-right', 'kokom appr proav')],
'co' : [('left-to-right', '')],
'avp' : [('right-to-left', 'adv avp adjd proav pp')],
'aa' : [('right-to-left', 'adjd adja')],
'cnp' : [('right-to-left', 'nn ne mpn np cnp pn car')],
'cap' : [('right-to-left', 'adjd adja cap aa adv')],
'cpp' : [('right-to-left', 'appr proav pp cpp')],
'cs' : [('right-to-left', 's cs')],
'cvp' : [('right-to-left', 'vz')],
'cvz' : [('right-to-left', 'vz')],
'cavp' : [('right-to-left', 'adv avp adjd pwav appr ptkvz')],
'mpn' : [('right-to-left', 'ne fm card')],
'nm' : [('right-to-left', 'card nn')],
'cac' : [('right-to-left', 'appr avp')],
'ch' : [('right-to-left', '')],
'mta' : [('right-to-left', 'adja adjd nn')],
'ccp' : [('right-to-left', 'avp')],
'dl' : [('left-to-right', '')],
'isu' : [('right-to-left', '')],
'ql' : [('right-to-left', '')],
'-' : [('right-to-left', 'pp')],
'cd' : [('right-to-left', 'cd')],
'nn' : [('right-to-left', 'nn')],
'nr' : [('right-to-left', 'nr')],
'vroot' : [('left-to-right', '$. $')]
}
def get_headpos_by_rule(parent_label, children_label, rules,
default=0):
"""Given parent and children labels and head rules,
return position of lexical head.
"""
if not parent_label.lower() in rules:
return default
for hrule in rules[parent_label.lower()]:
if len(hrule[1]) == 0:
if hrule[0] == 'left-to-right':
return len(children_label) - 1
elif hrule[0] == 'right-to_left':
return 0
else:
raise ValueError("unknown head rule direction")
for label in hrule[1]:
if hrule[0] == 'left-to-right':
for i, child_label in enumerate(children_label):
parsed_label = trees.parse_label(child_label.lower())
if parsed_label.label.lower() == label:
return i
elif hrule[0] == 'right-to-left':
for i, child_label in \
zip(reversed(range(len(children_label))),
reversed(children_label)):
parsed_label = trees.parse_label(child_label.lower())
if parsed_label.label.lower() == label:
return i
return 0
else:
raise ValueError("unknown head rule direction")
return 0
| wmaier/treetools | trees/transformconst.py | Python | gpl-3.0 | 4,747 |
#!/usr/bin/env python
#
# Author: Pablo Iranzo Gomez ([email protected])
#
# Description: Script for setting the keyring password for RHEV scripts
#
# Requires: python keyring
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import optparse
import keyring
from rhev_functions import *
description = """
RHEV-keyring is a script for mantaining the keyring used by rhev script for storing password
"""
# Option parsing
p = optparse.OptionParser("rhev-clone.py [arguments]", description=description)
p.add_option("-u", "--user", dest="username", help="Username to connect to RHEVM API", metavar="admin@internal",
default=False)
p.add_option("-w", "--password", dest="password", help="Password to use with username", metavar="admin",
default=False)
p.add_option("-W", action="store_true", dest="askpassword", help="Ask for password", metavar="admin", default=False)
p.add_option('-q', "--query", action="store_true", dest="query", help="Query the values stored", default=False)
(options, args) = p.parse_args()
if options.askpassword:
options.password = getpass.getpass("Enter password: ")
# keyring.set_password('redhat', 'kerberos', '<password>')
# remotepasseval = keyring.get_password('redhat', 'kerberos')
if options.query:
print "Username: %s" % keyring.get_password('rhevm-utils', 'username')
print "Password: %s" % keyring.get_password('rhevm-utils', 'password')
if options.username:
keyring.set_password('rhevm-utils', 'username', options.username)
if options.password:
keyring.set_password('rhevm-utils', 'password', options.password)
| DragonRoman/rhevm-utils | rhev-keyring.py | Python | gpl-3.0 | 2,000 |
#!/usr/bin/python
import os,sys,re
#Check the OS Version
RELEASE_FILE = "/etc/redhat-release"
RWM_FILE = "/etc/httpd/conf.modules.d/00-base.conf"
if os.path.isfile(RELEASE_FILE):
f=open(RELEASE_FILE,"r")
rel_list = f.read().split()
if rel_list[2] == "release" and tuple(rel_list[3].split(".")) < ('8','5'):
print("so far good")
else:
raise("Unable to find the OS version")
#Check Apache installed
#TODO
#
#Test if the rewrite module file present
if os.path.isfile(RWM_FILE):
print("re write")
##print sys.version_info
##if sys.version_info < (2,7):
## print "This programm works only with the Python 2.7"###
| sujith7c/py-system-tools | en_mod_rw.py | Python | gpl-3.0 | 636 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-15 14:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0014_auto_20170414_0845'),
]
operations = [
migrations.AlterField(
model_name='jeux',
name='image',
field=models.ImageField(null=True, upload_to='photos_jeux/', verbose_name='Image'),
),
]
| Gatomlo/shareandplay | catalogue/migrations/0015_auto_20170415_1628.py | Python | gpl-3.0 | 495 |
#!/usr/bin/env python
import os
import random
__author__ = 'duceppemo'
class SnpTableMaker(object):
"""
Everything is ran inside the class because data structures have to be
shared across parent and child process during multi threading
"""
def __init__(self, args):
import os
import sys
import glob
import multiprocessing
# Define variables based on supplied arguments
self.args = args
self.ref = args.ref
if not os.path.isfile(self.ref):
sys.exit('Supplied reference genome file does not exists.')
self.vcf = args.vcf
if not os.path.isdir(self.vcf):
sys.exit('Supplied VCF folder does not exists.')
self.minQUAL = args.minQUAL
if not isinstance(self.minQUAL, (int, long)):
sys.exit('minQual value must be an integer')
self.ac1_report = args.ac1
self.section4 = args.section4
self.output = args.output
if not os.path.isdir(self.output):
os.makedirs(self.output)
self.table = args.table
# number of threads to use = number of cpu
self.cpus = int(multiprocessing.cpu_count())
# create dictionaries to hold data
self.refgenome = dict()
self.vcfs = dict()
self.ac1s = dict()
self.ac2s = dict()
self.allac2 = dict()
self.finalac1 = dict()
self.fastas = dict()
self.counts = dict()
self.informative_pos = dict()
# create a list of vcf files in vcfFolder
self.vcfList = list()
for filename in glob.glob(os.path.join(self.vcf, '*.vcf')):
self.vcfList.append(filename)
# run the script
self.snp_table_maker()
def snp_table_maker(self):
self.parse_ref()
self.parse_vcf()
self.find_ac1_in_ac2()
self.write_ac1_report()
self.get_allele_values()
self.get_informative_snps()
self.count_snps()
self.write_fasta()
self.write_root()
self.write_snp_table()
def parse_ref(self):
from Bio import SeqIO
print ' Parsing reference genome'
fh = open(self.ref, "rU")
self.refgenome = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
fh.close()
def parse_vcf(self):
import sys
print ' Parsing VCF files'
for samplefile in self.vcfList:
sample = os.path.basename(samplefile).split('.')[0] # get what's before the first dot
self.vcfs[sample] = dict()
with open(samplefile, 'r') as f: # open file
for line in f: # read file line by line
line = line.rstrip() # chomp -> remove trailing whitespace characters
if line: # skip blank lines or lines with only whitespaces
if line.startswith('##'): # skip comment lines
continue
elif line.startswith('#CHROM'):
sample_name = line.split("\t")[9]
if sample_name != sample:
sys.exit('File name and sample name inside VCF file are different: %s'
% samplefile)
else:
# chrom, pos, alt, qual = [line.split()[i] for i in (0, 1, 4, 5)]
chrom = line.split()[0]
pos = int(line.split()[1])
alt = line.split()[4]
qual = line.split()[5] # string -> needs to be converted to integer
if qual != '.':
try:
qual = float(qual)
except ValueError:
qual = int(qual)
else:
continue # skip line
ac = line.split()[7].split(';')[0]
# http://www.saltycrane.com/blog/2010/02/python-setdefault-example/
self.vcfs.setdefault(sample, {}).setdefault(chrom, {}).setdefault(pos, [])\
.append(alt)
if ac == 'AC=1' and qual > self.args.minQUAL:
self.ac1s.setdefault(sample, {}).setdefault(chrom, []).append(pos)
elif ac == 'AC=2' and qual > self.args.minQUAL:
self.ac2s.setdefault(sample, {}).setdefault(chrom, []).append(pos)
# This is equivalent, but faster?
try:
if pos not in self.allac2[chrom]: # only add is not already present
self.allac2.setdefault(chrom, []).append(pos)
except KeyError: # chromosome does not exist in dictionary
self.allac2.setdefault(chrom, []).append(pos)
# This works
# if chrom in self.allac2:
# if pos in self.allac2[chrom]:
# pass
# else:
# self.allac2.setdefault(chrom, []).append(pos)
# else:
# self.allac2.setdefault(chrom, [])
def find_ac1_in_ac2(self):
print ' Finding AC=1/AC=2 positions'
if isinstance(self.ac1s, dict): # check if it's a dict before using .iteritems()
for sample, chromosomes in self.ac1s.iteritems():
if isinstance(chromosomes, dict): # check for dict
for chrom, positions in chromosomes.iteritems():
if isinstance(positions, list): # check for list
for pos in positions:
if pos in self.allac2[chrom]: # check ac1 in ac2
self.finalac1.setdefault(sample, {}).setdefault(chrom, []).append(pos)
def write_ac1_report(self):
print " Writing AC=1/AC=2 report to file"
# free up resources not needed anymore
self.ac1s.clear()
fh = open(self.ac1_report, 'w')
if isinstance(self.finalac1, dict):
for sample, chromosomes in sorted(self.finalac1.iteritems()):
if isinstance(chromosomes, dict):
for chrom, positions in sorted(chromosomes.iteritems()):
if isinstance(positions, list):
fh.write("{}\nAC=1 is also found in AC=2 in chromosome {}".format(sample, chrom) +
" at position(s): " + ', '.join(map(str, positions)) + "\n\n")
fh.close()
def get_allele_values(self):
print ' Getting allele values'
for sample in self.ac2s:
for chrom in self.ac2s[sample]:
for pos in self.allac2[chrom]:
# if in AC=2 for that sample
if pos in self.ac2s[sample][chrom]:
allele = ''.join(self.vcfs[sample][chrom][pos]) # convert list to string
else:
try: # use a try here because some samples are not in finalac1
# if in AC=1 for that sample, but also in AC=2 in other sample
if pos in self.finalac1[sample][chrom]:
allele = ''.join(self.vcfs[sample][chrom][pos]) # convert list to string
else:
allele = self.refgenome[chrom].seq[pos - 1]
except KeyError:
allele = self.refgenome[chrom].seq[pos - 1]
self.fastas.setdefault(sample, {}).setdefault(chrom, {}).setdefault(pos, []).append(allele)
# Track all alleles for each position
try:
if allele not in self.counts[chrom][pos]:
self.counts.setdefault(chrom, {}).setdefault(pos, []).append(allele)
except KeyError:
self.counts.setdefault(chrom, {}).setdefault(pos, []).append(allele)
def get_informative_snps(self):
"""SNPs position that have at least one different ALT allele within all the samples"""
print ' Getting informative SNPs'
# free up resources not needed anymore
self.ac2s.clear()
self.allac2.clear()
self.finalac1.clear()
# need to get the positions in the same order for all the sample (sort chrom and pos)
for sample in self.fastas:
for chrom in sorted(self.fastas[sample]):
for pos in sorted(self.fastas[sample][chrom]):
if len(self.counts[chrom][pos]) > 1: # if more that one ALT allele, keep it
allele = ''.join(self.fastas[sample][chrom][pos]) # convert list to string
# check if allele is empty
if allele:
self.informative_pos.setdefault(sample, {}).setdefault(chrom, {})\
.setdefault(pos, []).append(''.join(allele))
else:
print "No allele infor for {}, {}:{}".format(sample, chrom, pos)
def count_snps(self):
print ' Counting SNPs'
# free up resources not needed anymore
self.counts.clear()
# All samples should have the same number of informative SNPs
# so any can be used to get the stats
randomsample = random.choice(self.informative_pos.keys())
filteredcount = 0
informativecount = 0
# Account for multiple chromosome
for chrom in self.fastas[randomsample]:
filteredcount += len(self.fastas[randomsample][chrom]) # number of positions
informativecount += len(self.informative_pos[randomsample][chrom])
# print to screen
print "\nTotal filtered SNPs: {}".format(filteredcount)
print "Total informative SNPs: {}\n".format(informativecount)
# write to file
fh = open(self.section4, "a") # append mode
fh.write("Total filtered SNPs: {}\n".format(filteredcount))
fh.write("Total informative SNPs: {}\n\n".format(informativecount))
fh.close()
def write_fasta(self):
print ' Writing sample fasta files'
# free up resources not needed anymore
self.fastas.clear()
# Create output folder for fasta files
if not os.path.exists(self.output):
os.makedirs(self.output)
if isinstance(self.informative_pos, dict):
for sample, chromosomes in sorted(self.informative_pos.iteritems()):
samplepath = os.path.join(self.output, sample + '.fas')
fh = open(samplepath, 'w')
fh.write(">{}\n".format(sample))
if isinstance(chromosomes, dict):
for chrom, positions in sorted(chromosomes.iteritems()):
if isinstance(positions, dict):
for pos, allele in sorted(positions.iteritems()):
if isinstance(allele, list):
fh.write(''.join(allele)) # convert list to text
fh.write("\n")
def write_root(self):
print ' Writing root fasta file'
rootpath = os.path.join(self.output, 'root.fas')
randomsample = random.choice(self.informative_pos.keys())
rootseq = list()
fh = open(rootpath, 'w')
if isinstance(self.informative_pos, dict):
for chrom in self.informative_pos[randomsample]:
for pos in sorted(self.informative_pos[randomsample][chrom]):
rootseq.append(self.refgenome[chrom].seq[pos - 1])
fh.write(">root\n" + "{}\n".format(''.join(rootseq)))
def write_snp_table(self):
print ' Writing SNP table'
fh = open(self.table, 'w')
randomsample = random.choice(self.informative_pos.keys())
ref_pos = list()
ref_call = list()
# reference
if isinstance(self.informative_pos, dict):
for chrom in self.informative_pos[randomsample]:
for pos in sorted(self.informative_pos[randomsample][chrom]):
ref_pos.append(''.join(chrom) + '-' + str(pos))
ref_call.append(self.refgenome[chrom].seq[pos - 1])
fh.write("reference_pos\t{}\n".format("\t".join(ref_pos)))
fh.write("reference_call\t{}\n".format("\t".join(ref_call)))
# sample
if isinstance(self.informative_pos, dict):
for sample, chromosomes in self.informative_pos.iteritems():
fh.write("{}".format(sample))
if isinstance(chromosomes, dict):
for chrom, positions in sorted(chromosomes.iteritems()):
if isinstance(positions, dict):
for pos, allele in sorted(positions.iteritems()):
if isinstance(allele, list):
allele = ''.join(allele) # convert list to text
fh.write("\t{}".format(allele))
fh.write("\n")
fh.close()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description='Generate SNP table and aligned fasta files from VCF files')
parser.add_argument('-r', '--ref', metavar='ref.fasta',
required=True,
help='reference genome used in the VCF files')
parser.add_argument('-v', '--vcf', metavar='vcfFolder',
required=True,
help='location of the VCF files')
parser.add_argument('-q', '--minQUAL', metavar='minQUAL', type=int,
required=True,
help='minimum QUAL value in VCF file')
parser.add_argument('-ac1', '--ac1', metavar='AC1Report.txt',
required=True,
help='output file where positions having both AC=1 and AC=2 are reported')
parser.add_argument('-s4', '--section4', metavar='section4.txt',
required=True,
help='output file where total filtered SNP positions and total informative SNPs are reported')
parser.add_argument('-o', '--output', metavar='fastaOutFolder',
required=True,
help='folder where the output fasta files will be output')
parser.add_argument('-t', '--table', metavar='fastaTable.tsv',
required=True,
help='the SNP table')
# Get the arguments into an object
arguments = parser.parse_args()
SnpTableMaker(arguments)
| OLF-Bioinformatics/snp_analysis | binaries/snpTableMaker.py | Python | gpl-3.0 | 15,587 |
# -*- coding: utf-8 -*-
# File: enemy.py
# Author: Casey Jones
#
# Created on July 20, 2009, 4:48 PM
#
# This file is part of Alpha Beta Gamma (abg).
#
# ABG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ABG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ABG. If not, see <http://www.gnu.org/licenses/>.
#class to handle all enemies on screen
import sys, pygame, frametime, properties, random
from enemy import Enemy
class Enemies:
enemies = []
blackSurface = pygame.Surface([Enemy.enemy.get_width(), Enemy.enemy.get_height()])
blackSurface.fill([0,0,0])
screen = None
def set_screen(self, screen):
self.screen = screen
def create(self):
#range that the current player ship can shoot
where_spawn = random.randint(1, properties.width - Enemy.enemy.get_width())
lenemy = Enemy(where_spawn)
self.enemies.append(lenemy)
def move(self, bullet):
to_update = []
if frametime.can_create_enemy():
self.create()
to_delete = []
to_update += [x.enemyrect for x in self.enemies]
if len(self.enemies) > 0:
for i in range(len(self.enemies)):
self.enemies[i].update(bullet)
self.screen.blit(self.blackSurface, self.enemies[i].enemyrect)
self.screen.blit(Enemy.enemy, self.enemies[i].enemyrect)
#If enemy goes off the bottom of the screen
if self.enemies[i].enemyrect.top > 800:
to_delete.append(i)
for x in to_delete:
self.remove(x)
to_update += [x.enemyrect for x in self.enemies]
return to_update
def getEnemies(self):
return self.enemies
def remove(self, index):
try:
to_update = self.enemies[index].enemyrect
self.screen.blit(self.blackSurface, self.enemies[index].enemyrect)
del self.enemies[index]
return to_update
except IndexError:
print("IndexError for enemy {0} of {1}".format(index, len(self.enemies)))
def game_over(self):
for i in range(len(self.enemies)):
self.screen.blit(self.blackSurface, self.enemies[i].enemyrect)
del self.enemies[:]
| jcnix/abg | enemies.py | Python | gpl-3.0 | 2,843 |
# -*-coding: utf-8-*-
import logging
from pyramid.view import view_config, view_defaults
from pyramid.httpexceptions import HTTPFound
from . import BaseView
from ..models import DBSession
from ..models.account_item import AccountItem
from ..lib.bl.subscriptions import subscribe_resource
from ..lib.utils.common_utils import translate as _
from ..forms.accounts_items import (
AccountItemForm,
AccountItemSearchForm
)
from ..lib.events.resources import (
ResourceCreated,
ResourceChanged,
ResourceDeleted,
)
log = logging.getLogger(__name__)
@view_defaults(
context='..resources.accounts_items.AccountsItemsResource',
)
class AccountsItemsView(BaseView):
@view_config(
request_method='GET',
renderer='travelcrm:templates/accounts_items/index.mako',
permission='view'
)
def index(self):
return {
'title': self._get_title(),
}
@view_config(
name='list',
xhr='True',
request_method='POST',
renderer='json',
permission='view'
)
def list(self):
form = AccountItemSearchForm(self.request, self.context)
form.validate()
qb = form.submit()
return qb.get_serialized()
@view_config(
name='view',
request_method='GET',
renderer='travelcrm:templates/accounts_items/form.mako',
permission='view'
)
def view(self):
if self.request.params.get('rid'):
resource_id = self.request.params.get('rid')
account_item = AccountItem.by_resource_id(resource_id)
return HTTPFound(
location=self.request.resource_url(
self.context, 'view', query={'id': account_item.id}
)
)
result = self.edit()
result.update({
'title': self._get_title(_(u'View')),
'readonly': True,
})
return result
@view_config(
name='add',
request_method='GET',
renderer='travelcrm:templates/accounts_items/form.mako',
permission='add'
)
def add(self):
return {
'title': self._get_title(_(u'Add')),
}
@view_config(
name='add',
request_method='POST',
renderer='json',
permission='add'
)
def _add(self):
form = AccountItemForm(self.request)
if form.validate():
account_item = form.submit()
DBSession.add(account_item)
DBSession.flush()
event = ResourceCreated(self.request, account_item)
event.registry()
return {
'success_message': _(u'Saved'),
'response': account_item.id
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='edit',
request_method='GET',
renderer='travelcrm:templates/accounts_items/form.mako',
permission='edit'
)
def edit(self):
account_item = AccountItem.get(self.request.params.get('id'))
return {
'item': account_item,
'title': self._get_title(_(u'Edit')),
}
@view_config(
name='edit',
request_method='POST',
renderer='json',
permission='edit'
)
def _edit(self):
account_item = AccountItem.get(self.request.params.get('id'))
form = AccountItemForm(self.request)
if form.validate():
form.submit(account_item)
event = ResourceChanged(self.request, account_item)
event.registry()
return {
'success_message': _(u'Saved'),
'response': account_item.id
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='copy',
request_method='GET',
renderer='travelcrm:templates/accounts_items/form.mako',
permission='add'
)
def copy(self):
account_item = AccountItem.get_copy(self.request.params.get('id'))
return {
'action': self.request.path_url,
'item': account_item,
'title': self._get_title(_(u'Copy')),
}
@view_config(
name='copy',
request_method='POST',
renderer='json',
permission='add'
)
def _copy(self):
return self._add()
@view_config(
name='delete',
request_method='GET',
renderer='travelcrm:templates/accounts_items/delete.mako',
permission='delete'
)
def delete(self):
return {
'title': self._get_title(_(u'Delete')),
'rid': self.request.params.get('rid')
}
@view_config(
name='delete',
request_method='POST',
renderer='json',
permission='delete'
)
def _delete(self):
errors = False
ids = self.request.params.getall('id')
if ids:
try:
items = DBSession.query(AccountItem).filter(
AccountItem.id.in_(ids)
)
for item in items:
DBSession.delete(item)
event = ResourceDeleted(self.request, item)
event.registry()
DBSession.flush()
except:
errors=True
DBSession.rollback()
if errors:
return {
'error_message': _(
u'Some objects could not be delete'
),
}
return {'success_message': _(u'Deleted')}
@view_config(
name='subscribe',
request_method='GET',
renderer='travelcrm:templates/accounts_items/subscribe.mako',
permission='view'
)
def subscribe(self):
return {
'id': self.request.params.get('id'),
'title': self._get_title(_(u'Subscribe')),
}
@view_config(
name='subscribe',
request_method='POST',
renderer='json',
permission='view'
)
def _subscribe(self):
ids = self.request.params.getall('id')
for id in ids:
account_item = AccountItem.get(id)
subscribe_resource(self.request, account_item.resource)
return {
'success_message': _(u'Subscribed'),
}
| mazvv/travelcrm | travelcrm/views/accounts_items.py | Python | gpl-3.0 | 6,534 |
# -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.board
import random
import pygame
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,5,10)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,13,11)
def create_game_objects(self, level = 1):
self.board.decolorable = False
self.board.draw_grid = False
color = (234,218,225)
self.color = color
self.grey = (200,200,200)
self.font_hl = (100,0,250)
self.task_str_color = ex.hsv_to_rgb(200,200,230)
self.activated_col = self.font_hl
white = (255,255,255)
self.bg_col = white
self.top_line = 3#self.board.scale//2
if self.mainloop.scheme is not None:
if self.mainloop.scheme.dark:
self.bg_col = (0,0,0)
self.level.games_per_lvl = 5
if self.level.lvl == 1:
rngs = [20,50,10,19]
self.level.games_per_lvl = 3
elif self.level.lvl == 2:
rngs = [50,100,20,49]
self.level.games_per_lvl = 3
elif self.level.lvl == 3:
rngs = [100,250,50,99]
self.level.games_per_lvl = 3
elif self.level.lvl == 4:
rngs = [250,500,100,249]
elif self.level.lvl == 5:
rngs = [500,1000,100,499]
elif self.level.lvl == 6:
rngs = [700,1500,250,699]
elif self.level.lvl == 7:
rngs = [1500,2500,500,1499]
elif self.level.lvl == 8:
rngs = [2500,5000,1500,2499]
elif self.level.lvl == 9:
rngs = [5000,10000,2500,4999]
elif self.level.lvl == 10:
rngs = [10000,84999,5000,9999]
data = [39,18]
self.points = self.level.lvl
#stretch width to fit the screen size
x_count = self.get_x_count(data[1],even=None)
if x_count > 39:
data[0] = x_count
self.data = data
self.vis_buttons = [1,1,1,1,1,1,1,0,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0],data[1])
scale = self.layout.scale
self.board.level_start(data[0],data[1],scale)
self.n1 = random.randrange(rngs[0],rngs[1])
self.n2 = random.randrange(rngs[2],rngs[3])
self.sumn1n2 = self.n1-self.n2
self.n1s = str(self.n1)
self.n2s = str(self.n2)
self.sumn1n2s = str(self.sumn1n2)
self.n1sl = len(self.n1s)
self.n2sl = len(self.n2s)
self.sumn1n2sl =len(self.sumn1n2s)
self.cursor_pos = 0
self.correct = False
self.carry1l = []
self.carry10l = []
self.resultl = []
self.nums1l = []
self.nums2l = []
self.ship_id = 0
self.digits = ["0","1","2","3","4","5","6","7","8","9"]
if self.lang.lang == 'el':
qm = ";"
else:
qm = "?"
question = self.n1s + " - " + self.n2s + " = " + qm
self.board.add_unit(1,0,data[0]-3-(max(self.n1sl,self.n2sl))*3 ,3,classes.board.Label,question,self.bg_col,"",21)
self.board.units[-1].align = 1
#borrow 1
for i in range(self.n1sl - 1):
self.board.add_unit(data[0]-6-i*3,0,1,1,classes.board.Label,"-",self.bg_col,"",0)
self.board.add_unit(data[0]-5-i*3,0,1,1,classes.board.Letter,"",self.bg_col,"",1)
self.carry1l.append(self.board.ships[-1])
self.carry1l[-1].set_outline(self.grey, 2)
self.carry1l[-1].pos_id = i
self.board.units[-1].align = 2
#add 10
for i in range(self.n1sl - 1):
self.board.add_unit(data[0]-3-i*3,1,1,1,classes.board.Label,"+",self.bg_col,"",0)
self.board.add_unit(data[0]-2-i*3,1,1,1,classes.board.Letter,"",self.bg_col,"",1)
self.carry10l.append(self.board.ships[-1])
self.carry10l[-1].set_outline(self.grey, 2)
self.carry10l[-1].pos_id = i
self.board.units[-1].align = 2
self.board.add_unit(data[0]-2-self.n1sl*3,0,2,1,classes.board.Label,"-1",self.bg_col,"",0)
self.board.add_unit(data[0]-2-self.n1sl*3,1,2,1,classes.board.Label,"+10",self.bg_col,"",0)
#first number
for i in range(self.n1sl):
self.board.add_unit(data[0]-3-i*3,2,3,3,classes.board.Label,self.n1s[-(i+1)],self.bg_col,"",21)
self.nums1l.append(self.board.units[-1])
self.nums1l[-1].font_color = self.grey
self.nums1l[-1].pos_id = i
#second number
i = 0
for i in range(self.n2sl):
self.board.add_unit(data[0]-3-i*3,5,3,3,classes.board.Label,self.n2s[-(i+1)],self.bg_col,"",21)
self.nums2l.append(self.board.units[-1])
self.nums2l[-1].pos_id = i
i += 1
self.board.add_unit(data[0]-3-i*3,5,3,3,classes.board.Label,"-",self.bg_col,"",21)
self.plus_label = self.board.units[-1]
#line
#line = "―" * (self.sumn1n2sl*2)
self.board.add_unit(data[0]-self.sumn1n2sl*3,8,self.sumn1n2sl*3,1,classes.board.Label,"",self.bg_col,"",21)
self.draw_hori_line(self.board.units[-1])
#self.board.units[-1].text_wrap = False
#result
for i in range(self.sumn1n2sl):
self.board.add_unit(data[0]-3-i*3,9,3,3,classes.board.Letter,"",self.bg_col,"",21)
self.resultl.append(self.board.ships[-1])
self.resultl[-1].set_outline(self.grey, 2)
self.resultl[-1].pos_id = i
self.resultl[0].set_outline(self.activated_col, 3)
self.home_square = self.resultl[0]
self.board.active_ship = self.home_square.unit_id
self.activable_count = len(self.board.ships)
for each in self.board.ships:
each.immobilize()
self.deactivate_colors()
self.reactivate_colors()
def draw_hori_line(self,unit):
w = unit.grid_w*self.board.scale
h = unit.grid_h*self.board.scale
center = [w//2,h//2]
canv = pygame.Surface([w, h-1])
canv.fill(self.bg_col)
pygame.draw.line(canv,self.grey,(0,self.top_line),(w,self.top_line),3)
unit.painting = canv.copy()
unit.update_me = True
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
if self.show_msg == False:
if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
self.home_sqare_switch(self.board.active_ship+1)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
self.home_sqare_switch(self.board.active_ship-1)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
if self.home_square in self.resultl:
self.home_sqare_switch(self.board.active_ship-self.n1sl+1)
elif self.home_square in self.carry10l:
self.home_sqare_switch(self.board.active_ship-self.n1sl+1)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.home_sqare_switch(self.board.active_ship+self.n1sl-1)
elif event.type == pygame.KEYDOWN and event.key != pygame.K_RETURN and not self.correct:
lhv = len(self.home_square.value)
self.changed_since_check = True
if event.key == pygame.K_BACKSPACE:
if lhv > 0:
self.home_square.value = self.home_square.value[0:lhv-1]
else:
char = event.unicode
if (len(char)>0 and lhv < 3 and char in self.digits):
if self.home_square in self.resultl:
if lhv == 1:
s = self.home_square.value + char
if s[0] == "0":
self.home_square.value = char
else:
n = int(s)
if n < 20:
self.home_square.value = str(n % 10)
else:
self.home_square.value = char
else:
self.home_square.value = char
elif self.home_square in self.carry1l:
if char == "1":
self.home_square.value = "1"
self.carry10l[self.home_square.pos_id].value = "10"
else:
self.home_square.value = ""
self.carry10l[self.home_square.pos_id].value = ""
self.carry10l[self.home_square.pos_id].update_me = True
elif self.home_square in self.carry10l:
if lhv == 0:
if char == "1":
self.home_square.value = "10"
elif lhv == 1:
if char == "0":
self.home_square.value = "10"
else:
self.home_square.value = ""
else:
if char == "1":
self.home_square.value = "10"
else:
self.home_square.value = ""
if self.home_square.value == "10":
self.carry1l[self.home_square.pos_id].value = "1"
else:
self.carry1l[self.home_square.pos_id].value = ""
self.carry1l[self.home_square.pos_id].update_me = True
self.home_square.update_me = True
self.mainloop.redraw_needed[0] = True
elif event.type == pygame.MOUSEBUTTONUP:
self.home_sqare_switch(self.board.active_ship)
def home_sqare_switch(self, activate):
if activate < 0 or activate > self.activable_count:
activate = self.activable_count - self.sumn1n2sl
if activate >= 0 and activate < self.activable_count:
self.board.active_ship = activate
self.home_square.update_me = True
if self.board.active_ship >= 0:
self.home_square.set_outline(self.grey, 2)
self.deactivate_colors()
self.home_square = self.board.ships[self.board.active_ship]
self.home_square.set_outline(self.activated_col, 3)
self.reactivate_colors()
self.home_square.font_color = self.font_hl
self.home_square.update_me = True
self.mainloop.redraw_needed[0] = True
def deactivate_colors(self):
for each in self.board.ships:
each.font_color = self.grey
each.update_me = True
for each in self.board.units:
each.font_color = self.grey
each.update_me = True
def reactivate_colors(self):
self.plus_label.font_color = self.font_hl
self.board.units[0].font_color = self.task_str_color
if self.home_square in self.carry1l:
self.carry10l[self.home_square.pos_id].font_color = self.font_hl
elif self.home_square in self.carry10l:
self.carry1l[self.home_square.pos_id].font_color = self.font_hl
elif self.home_square in self.resultl:
if self.home_square.pos_id > 0:
self.carry1l[self.home_square.pos_id-1].font_color = self.font_hl
if self.home_square.pos_id >= 0 and self.home_square.pos_id < self.n1sl-1:
self.carry10l[self.home_square.pos_id].font_color = self.font_hl
if (self.n1sl > self.home_square.pos_id):
self.nums1l[self.home_square.pos_id].font_color = self.font_hl
if (self.n2sl > self.home_square.pos_id):
self.nums2l[self.home_square.pos_id].font_color = self.font_hl
self.resultl[self.home_square.pos_id].font_color = self.font_hl
def update(self,game):
game.fill(self.color)
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
s = ""
for each in reversed(self.resultl):
s += each.value
if s == self.sumn1n2s:
self.update_score(self.points)
self.level.next_board()
else:
if self.points > 0:
self.points -= 1
self.level.try_again()
| OriHoch/pysiogame | game_boards/game070.py | Python | gpl-3.0 | 12,968 |
#!/usr/bin/env python3
# - coding: utf-8 -
# Copyright (C) 2010 Matías Ribecky <matias at mribecky.com.ar>
# Copyright (C) 2010-2012 Toms Bauģis <[email protected]>
# Copyright (C) 2012 Ted Smith <tedks at cs.umd.edu>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
'''A script to control the applet from the command line.'''
import sys, os
import argparse
import re
import gi
gi.require_version('Gdk', '3.0') # noqa: E402
gi.require_version('Gtk', '3.0') # noqa: E402
from gi.repository import GLib as glib
from gi.repository import Gdk as gdk
from gi.repository import Gtk as gtk
from gi.repository import Gio as gio
from gi.repository import GLib as glib
import hamster
from hamster import client, reports
from hamster import logger as hamster_logger
from hamster.about import About
from hamster.edit_activity import CustomFactController
from hamster.overview import Overview
from hamster.preferences import PreferencesEditor
from hamster.lib import default_logger, stuff
from hamster.lib import datetime as dt
from hamster.lib.fact import Fact
logger = default_logger(__file__)
def word_wrap(line, max_len):
"""primitive word wrapper"""
lines = []
cur_line, cur_len = "", 0
for word in line.split():
if len("%s %s" % (cur_line, word)) < max_len:
cur_line = ("%s %s" % (cur_line, word)).strip()
else:
if cur_line:
lines.append(cur_line)
cur_line = word
if cur_line:
lines.append(cur_line)
return lines
def fact_dict(fact_data, with_date):
fact = {}
if with_date:
fmt = '%Y-%m-%d %H:%M'
else:
fmt = '%H:%M'
fact['start'] = fact_data.start_time.strftime(fmt)
if fact_data.end_time:
fact['end'] = fact_data.end_time.strftime(fmt)
else:
end_date = dt.datetime.now()
fact['end'] = ''
fact['duration'] = fact_data.delta.format()
fact['activity'] = fact_data.activity
fact['category'] = fact_data.category
if fact_data.tags:
fact['tags'] = ' '.join('#%s' % tag for tag in fact_data.tags)
else:
fact['tags'] = ''
fact['description'] = fact_data.description
return fact
class Hamster(gtk.Application):
"""Hamster gui.
Actions should eventually be accessible via Gio.DBusActionGroup
with the 'org.gnome.Hamster.GUI' id.
but that is still experimental, the actions API is subject to change.
Discussion with "external" developers welcome !
The separate dbus org.gnome.Hamster.WindowServer
is still the stable recommended way to show windows for now.
"""
def __init__(self):
# inactivity_timeout: How long (ms) the service should stay alive
# after all windows have been closed.
gtk.Application.__init__(self,
application_id="org.gnome.Hamster.GUI",
#inactivity_timeout=10000,
register_session=True)
self.about_controller = None # 'about' window controller
self.fact_controller = None # fact window controller
self.overview_controller = None # overview window controller
self.preferences_controller = None # settings window controller
self.connect("startup", self.on_startup)
self.connect("activate", self.on_activate)
# we need them before the startup phase
# so register/activate_action work before the app is ran.
# cf. https://gitlab.gnome.org/GNOME/glib/blob/master/gio/tests/gapplication-example-actions.c
self.add_actions()
def add_actions(self):
# most actions have no parameters
# for type "i", use Variant.new_int32() and .get_int32() to pack/unpack
for name in ("about", "add", "clone", "edit", "overview", "preferences"):
data_type = glib.VariantType("i") if name in ("edit", "clone") else None
action = gio.SimpleAction.new(name, data_type)
action.connect("activate", self.on_activate_window)
self.add_action(action)
action = gio.SimpleAction.new("quit", None)
action.connect("activate", self.on_activate_quit)
self.add_action(action)
def on_activate(self, data=None):
logger.debug("activate")
if not self.get_windows():
self.activate_action("overview")
def on_activate_window(self, action=None, data=None):
self._open_window(action.get_name(), data)
def on_activate_quit(self, data=None):
self.on_activate_quit()
def on_startup(self, data=None):
logger.debug("startup")
# Must be the same as application_id. Won't be required with gtk4.
glib.set_prgname(self.get_application_id())
# localized name, but let's keep it simple.
glib.set_application_name("Hamster")
def _open_window(self, name, data=None):
logger.debug("opening '{}'".format(name))
if name == "about":
if not self.about_controller:
# silence warning "GtkDialog mapped without a transient parent"
# https://stackoverflow.com/a/38408127/3565696
_dummy = gtk.Window()
self.about_controller = About(parent=_dummy)
logger.debug("new About")
controller = self.about_controller
elif name in ("add", "clone", "edit"):
if self.fact_controller:
# Something is already going on, with other arguments, present it.
# Or should we just discard the forgotten one ?
logger.warning("Fact controller already active. Please close first.")
else:
fact_id = data.get_int32() if data else None
self.fact_controller = CustomFactController(name, fact_id=fact_id)
logger.debug("new CustomFactController")
controller = self.fact_controller
elif name == "overview":
if not self.overview_controller:
self.overview_controller = Overview()
logger.debug("new Overview")
controller = self.overview_controller
elif name == "preferences":
if not self.preferences_controller:
self.preferences_controller = PreferencesEditor()
logger.debug("new PreferencesEditor")
controller = self.preferences_controller
window = controller.window
if window not in self.get_windows():
self.add_window(window)
logger.debug("window added")
# Essential for positioning on wayland.
# This should also select the correct window type if unset yet.
# https://specifications.freedesktop.org/wm-spec/wm-spec-1.3.html
if name != "overview" and self.overview_controller:
window.set_transient_for(self.overview_controller.window)
# so the dialog appears on top of the transient-for:
window.set_type_hint(gdk.WindowTypeHint.DIALOG)
else:
# toplevel
window.set_transient_for(None)
controller.present()
logger.debug("window presented")
def present_fact_controller(self, action, fact_id=0):
"""Present the fact controller window to add, clone or edit a fact.
Args:
action (str): "add", "clone" or "edit"
"""
assert action in ("add", "clone", "edit")
if action in ("clone", "edit"):
action_data = glib.Variant.new_int32(int(fact_id))
else:
action_data = None
# always open dialogs through actions,
# both for consistency, and to reduce the paths to test.
app.activate_action(action, action_data)
class HamsterCli(object):
"""Command line interface."""
def __init__(self):
self.storage = client.Storage()
def assist(self, *args):
assist_command = args[0] if args else ""
if assist_command == "start":
hamster_client._activities(sys.argv[-1])
elif assist_command == "export":
formats = "html tsv xml ical".split()
chosen = sys.argv[-1]
formats = [f for f in formats if not chosen or f.startswith(chosen)]
print("\n".join(formats))
def toggle(self):
self.storage.toggle()
def start(self, *args):
'''Start a new activity.'''
if not args:
print("Error: please specify activity")
return 0
fact = Fact.parse(" ".join(args), range_pos="tail")
if fact.start_time is None:
fact.start_time = dt.datetime.now()
self.storage.check_fact(fact, default_day=dt.hday.today())
id_ = self.storage.add_fact(fact)
return id_
def stop(self, *args):
'''Stop tracking the current activity.'''
self.storage.stop_tracking()
def export(self, *args):
args = args or []
export_format, start_time, end_time = "html", None, None
if args:
export_format = args[0]
(start_time, end_time), __ = dt.Range.parse(" ".join(args[1:]))
start_time = start_time or dt.datetime.combine(dt.date.today(), dt.time())
end_time = end_time or start_time.replace(hour=23, minute=59, second=59)
facts = self.storage.get_facts(start_time, end_time)
writer = reports.simple(facts, start_time.date(), end_time.date(), export_format)
def _activities(self, search=""):
'''Print the names of all the activities.'''
if "@" in search:
activity, category = search.split("@")
for cat in self.storage.get_categories():
if not category or cat['name'].lower().startswith(category.lower()):
print("{}@{}".format(activity, cat['name']))
else:
for activity in self.storage.get_activities(search):
print(activity['name'])
if activity['category']:
print("{}@{}".format(activity['name'], activity['category']))
def activities(self, *args):
'''Print the names of all the activities.'''
search = args[0] if args else ""
for activity in self.storage.get_activities(search):
print("{}@{}".format(activity['name'], activity['category']))
def categories(self, *args):
'''Print the names of all the categories.'''
for category in self.storage.get_categories():
print(category['name'])
def list(self, *times):
"""list facts within a date range"""
(start_time, end_time), __ = dt.Range.parse(" ".join(times or []))
start_time = start_time or dt.datetime.combine(dt.date.today(), dt.time())
end_time = end_time or start_time.replace(hour=23, minute=59, second=59)
self._list(start_time, end_time)
def current(self, *args):
"""prints current activity. kinda minimal right now"""
facts = self.storage.get_todays_facts()
if facts and not facts[-1].end_time:
print("{} {}".format(str(facts[-1]).strip(),
facts[-1].delta.format(fmt="HH:MM")))
else:
print((_("No activity")))
def search(self, *args):
"""search for activities by name and optionally within a date range"""
args = args or []
search = ""
if args:
search = args[0]
(start_time, end_time), __ = dt.Range.parse(" ".join(args[1:]))
start_time = start_time or dt.datetime.combine(dt.date.today(), dt.time())
end_time = end_time or start_time.replace(hour=23, minute=59, second=59)
self._list(start_time, end_time, search)
def _list(self, start_time, end_time, search=""):
"""Print a listing of activities"""
facts = self.storage.get_facts(start_time, end_time, search)
headers = {'activity': _("Activity"),
'category': _("Category"),
'tags': _("Tags"),
'description': _("Description"),
'start': _("Start"),
'end': _("End"),
'duration': _("Duration")}
# print date if it is not the same day
print_with_date = start_time.date() != end_time.date()
cols = 'start', 'end', 'duration', 'activity', 'category'
widths = dict([(col, len(headers[col])) for col in cols])
for fact in facts:
fact = fact_dict(fact, print_with_date)
for col in cols:
widths[col] = max(widths[col], len(fact[col]))
cols = ["{{{col}: <{len}}}".format(col=col, len=widths[col]) for col in cols]
fact_line = " | ".join(cols)
row_width = sum(val + 3 for val in list(widths.values()))
print()
print(fact_line.format(**headers))
print("-" * min(row_width, 80))
by_cat = {}
for fact in facts:
cat = fact.category or _("Unsorted")
by_cat.setdefault(cat, dt.timedelta(0))
by_cat[cat] += fact.delta
pretty_fact = fact_dict(fact, print_with_date)
print(fact_line.format(**pretty_fact))
if pretty_fact['description']:
for line in word_wrap(pretty_fact['description'], 76):
print(" {}".format(line))
if pretty_fact['tags']:
for line in word_wrap(pretty_fact['tags'], 76):
print(" {}".format(line))
print("-" * min(row_width, 80))
cats = []
total_duration = dt.timedelta()
for cat, duration in sorted(by_cat.items(), key=lambda x: x[1], reverse=True):
cats.append("{}: {}".format(cat, duration.format()))
total_duration += duration
for line in word_wrap(", ".join(cats), 80):
print(line)
print("Total: ", total_duration.format())
print()
def version(self):
print(hamster.__version__)
if __name__ == '__main__':
from hamster.lib import i18n
i18n.setup_i18n()
usage = _(
"""
Actions:
* add [activity [start-time [end-time]]]: Add an activity
* stop: Stop tracking current activity.
* list [start-date [end-date]]: List activities
* search [terms] [start-date [end-date]]: List activities matching a search
term
* export [html|tsv|ical|xml] [start-date [end-date]]: Export activities with
the specified format
* current: Print current activity
* activities: List all the activities names, one per line.
* categories: List all the categories names, one per line.
* overview / preferences / add / about: launch specific window
* version: Show the Hamster version
Time formats:
* 'YYYY-MM-DD hh:mm': If start-date is missing, it will default to today.
If end-date is missing, it will default to start-date.
* '-minutes': Relative time in minutes from the current date and time.
Note:
* For list/search/export a "hamster day" starts at the time set in the
preferences (default 05:00) and ends one minute earlier the next day.
Activities are reported for each "hamster day" in the interval.
Example usage:
hamster start bananas -20
start activity 'bananas' with start time 20 minutes ago
hamster search pancakes 2012-08-01 2012-08-30
look for an activity matching terms 'pancakes` between 1st and 30st
August 2012. Will check against activity, category, description and tags
""")
hamster_client = HamsterCli()
app = Hamster()
logger.debug("app instanciated")
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
parser = argparse.ArgumentParser(
description="Time tracking utility",
epilog=usage,
formatter_class=argparse.RawDescriptionHelpFormatter)
# cf. https://stackoverflow.com/a/28611921/3565696
parser.add_argument("--log", dest="log_level",
choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
default='WARNING',
help="Set the logging level (default: %(default)s)")
parser.add_argument("action", nargs="?", default="overview")
parser.add_argument('action_args', nargs=argparse.REMAINDER, default=[])
args, unknown_args = parser.parse_known_args()
# logger for current script
logger.setLevel(args.log_level)
# hamster_logger for the rest
hamster_logger.setLevel(args.log_level)
if not hamster.installed:
logger.info("Running in devel mode")
if args.action in ("start", "track"):
action = "add" # alias
elif args.action == "prefs":
# for backward compatibility
action = "preferences"
else:
action = args.action
if action in ("about", "add", "edit", "overview", "preferences"):
if action == "add" and args.action_args:
assert not unknown_args, "unknown options: {}".format(unknown_args)
# directly add fact from arguments
id_ = hamster_client.start(*args.action_args)
assert id_ > 0, "failed to add fact"
sys.exit(0)
else:
app.register()
if action == "edit":
assert len(args.action_args) == 1, (
"edit requires exactly one argument, got {}"
.format(args.action_args))
id_ = int(args.action_args[0])
assert id_ > 0, "received non-positive id : {}".format(id_)
action_data = glib.Variant.new_int32(id_)
else:
action_data = None
app.activate_action(action, action_data)
run_args = [sys.argv[0]] + unknown_args
logger.debug("run {}".format(run_args))
status = app.run(run_args)
logger.debug("app exited")
sys.exit(status)
elif hasattr(hamster_client, action):
getattr(hamster_client, action)(*args.action_args)
else:
sys.exit(usage % {'prog': sys.argv[0]})
| projecthamster/hamster | src/hamster-cli.py | Python | gpl-3.0 | 18,772 |
# encoding=utf-8
from gi.repository import Gtk
from .i18n import _
class AboutDialog(Gtk.AboutDialog):
def __init__(self, parent):
super(AboutDialog, self).__init__(title=_('About'), parent=parent)
self.set_modal(True)
self.set_program_name('Ydict')
self.set_authors(['Wiky L<[email protected]>'])
self.set_artists(['Wiky L<[email protected]>'])
self.set_comments('')
self.set_copyright('Copyright (c) Wiky L 2015')
self.set_license_type(Gtk.License.GPL_3_0)
self.set_logo_icon_name('ydict')
self.set_version('1.0')
self.set_website('https://github.com/wiiiky/ydict')
self.set_website_label('GitHub')
self.set_wrap_license(True)
| wiiiky/ydict | pydict/about.py | Python | gpl-3.0 | 743 |
from ._costs import *
| csxeba/ReSkiv | brainforge/costs/__init__.py | Python | gpl-3.0 | 22 |
# -*- coding: utf8 -*-
import argparse
import logging
import pytest
import SMSShell
import SMSShell.commands
def test_abstract_init():
"""Test abstract init methods
"""
abs = SMSShell.commands.AbstractCommand(logging.getLogger(),
object(),
object(),
object())
assert abs.name == 'abstractcommand'
def test_abstract_not_implemented():
abs = SMSShell.commands.AbstractCommand(logging.getLogger(),
object(),
object(),
object())
with pytest.raises(SMSShell.commands.CommandBadImplemented):
abs.description([])
with pytest.raises(SMSShell.commands.CommandBadImplemented):
abs.usage([])
with pytest.raises(SMSShell.commands.CommandBadImplemented):
abs.main([])
def test_abstract_bad_input_state_type():
class Bad(SMSShell.commands.AbstractCommand):
def inputStates(self):
return dict()
com = Bad(logging.getLogger(),
object(),
object(),
object())
with pytest.raises(SMSShell.commands.CommandBadImplemented):
com._inputStates()
def test_abstract_bad_input_state_value():
class Bad(SMSShell.commands.AbstractCommand):
def inputStates(self):
return ['d']
com = Bad(logging.getLogger(),
object(),
object(),
object())
with pytest.raises(SMSShell.commands.CommandBadImplemented):
com._inputStates()
def test_abstract_bad_arg_parser_type():
class Bad(SMSShell.commands.AbstractCommand):
def argsParser(self):
return 'a'
com = Bad(logging.getLogger(),
object(),
object(),
object())
with pytest.raises(SMSShell.commands.CommandBadImplemented):
com._argsParser()
def test_abstract_bad_arg_parser_init():
class Bad(SMSShell.commands.AbstractCommand):
def argsParser(self):
raise ValueError('no')
com = Bad(logging.getLogger(),
object(),
object(),
object())
with pytest.raises(SMSShell.commands.CommandBadImplemented):
com._argsParser()
| Turgon37/SMSShell | tests/test_smsshell_commands.py | Python | gpl-3.0 | 2,388 |
# pylint: disable = too-many-lines, invalid-name, line-too-long, too-many-instance-attributes,
# pylint: disable = too-many-branches,too-many-locals, too-many-nested-blocks
from __future__ import (absolute_import, division, print_function)
try:
from mantidplot import *
except ImportError:
canMantidPlot = False #
import csv
import os
import re
from operator import itemgetter
import itertools
from PyQt4 import QtCore, QtGui
from mantid.simpleapi import *
from isis_reflectometry.quick import *
from isis_reflectometry.convert_to_wavelength import ConvertToWavelength
from isis_reflectometry import load_live_runs
from isis_reflectometry.combineMulti import *
import mantidqtpython
from mantid.api import Workspace, WorkspaceGroup, CatalogManager, AlgorithmManager
from mantid import UsageService
from ui.reflectometer.ui_refl_window import Ui_windowRefl
from ui.reflectometer.refl_save import Ui_SaveWindow
from ui.reflectometer.refl_choose_col import ReflChoose
from ui.reflectometer.refl_options import ReflOptions
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
canMantidPlot = True
class ReflGui(QtGui.QMainWindow, Ui_windowRefl):
current_instrument = None
current_table = None
current_polarisation_method = None
labelStatus = None
accMethod = None
def __init__(self):
"""
Initialise the interface
"""
super(QtGui.QMainWindow, self).__init__()
self.setupUi(self)
self.loading = False
self.clip = QtGui.QApplication.clipboard()
self.shown_cols = {}
self.mod_flag = False
self.run_cols = [0, 5, 10]
self.angle_cols = [1, 6, 11]
self.scale_col = 16
self.stitch_col = 17
self.plot_col = 18
self.__graphs = dict()
self._last_trans = ""
self.icat_file_map = None
self.__instrumentRuns = None
self.__icat_download = False
self.__group_tof_workspaces = True
# Q Settings
self.__generic_settings = "Mantid/ISISReflGui"
self.__live_data_settings = "Mantid/ISISReflGui/LiveData"
self.__search_settings = "Mantid/ISISReflGui/Search"
self.__column_settings = "Mantid/ISISReflGui/Columns"
self.__icat_download_key = "icat_download"
self.__ads_use_key = "AlgUse"
self.__alg_migration_key = "AlgUseReset"
self.__live_data_frequency_key = "frequency"
self.__live_data_method_key = "method"
self.__group_tof_workspaces_key = "group_tof_workspaces"
self.__stitch_right_key = "stitch_right"
# Setup instrument with defaults assigned.
self.instrument_list = ['INTER', 'SURF', 'CRISP', 'POLREF', 'OFFSPEC']
self.polarisation_instruments = ['CRISP', 'POLREF']
self.polarisation_options = {'None': PolarisationCorrection.NONE,
'1-PNR': PolarisationCorrection.PNR,
'2-PA': PolarisationCorrection.PA}
# Set the live data settings, use default if none have been set before
settings = QtCore.QSettings()
settings.beginGroup(self.__live_data_settings)
self.live_method = settings.value(self.__live_data_method_key, "", type=str)
self.live_freq = settings.value(self.__live_data_frequency_key, 0, type=float)
if not self.live_freq:
logger.information(
"No settings were found for Update frequency of loading live data, Loading default of 60 seconds")
self.live_freq = float(60)
settings.setValue(self.__live_data_frequency_key, self.live_freq)
if not self.live_method:
logger.information(
"No settings were found for Accumulation Method of loading live data, Loading default of \"Add\"")
self.live_method = "Add"
settings.setValue(self.__live_data_method_key, self.live_method)
settings.endGroup()
settings.beginGroup(self.__generic_settings)
self.__alg_migrate = settings.value(self.__alg_migration_key, True, type=bool)
if self.__alg_migrate:
self.__alg_use = True # We will use the algorithms by default rather than the quick scripts
self.__alg_migrate = False # Never do this again. We only want to reset once.
else:
self.__alg_use = settings.value(self.__ads_use_key, True, type=bool)
self.__icat_download = settings.value(self.__icat_download_key, False, type=bool)
self.__group_tof_workspaces = settings.value(self.__group_tof_workspaces_key, True, type=bool)
self.__scale_right = settings.value(self.__stitch_right_key, True, type=bool)
settings.setValue(self.__ads_use_key, self.__alg_use)
settings.setValue(self.__icat_download_key, self.__icat_download)
settings.setValue(self.__group_tof_workspaces_key, self.__group_tof_workspaces)
settings.setValue(self.__alg_migration_key, self.__alg_migrate)
settings.setValue(self.__stitch_right_key, self.__scale_right)
settings.endGroup()
del settings
# register startup
UsageService.registerFeatureUsage("Interface", "ISIS Reflectomety", False)
def __del__(self):
"""
Save the contents of the table if the modified flag was still set
"""
if self.mod_flag:
self._save(true)
def _save_check(self):
"""
Show a custom message box asking if the user wants to save, or discard their changes or cancel back to the interface
"""
msgBox = QtGui.QMessageBox()
msgBox.setText("The table has been modified. Do you want to save your changes?")
accept_btn = QtGui.QPushButton('Save')
cancel_btn = QtGui.QPushButton('Cancel')
discard_btn = QtGui.QPushButton('Discard')
msgBox.addButton(accept_btn, QtGui.QMessageBox.AcceptRole)
msgBox.addButton(cancel_btn, QtGui.QMessageBox.RejectRole)
msgBox.addButton(discard_btn, QtGui.QMessageBox.NoRole)
msgBox.setIcon(QtGui.QMessageBox.Question)
msgBox.setDefaultButton(accept_btn)
msgBox.setEscapeButton(cancel_btn)
msgBox.exec_()
btn = msgBox.clickedButton()
saved = None
if btn.text() == accept_btn.text():
ret = QtGui.QMessageBox.AcceptRole
saved = self._save()
elif btn.text() == cancel_btn.text():
ret = QtGui.QMessageBox.RejectRole
else:
ret = QtGui.QMessageBox.NoRole
return ret, saved
def closeEvent(self, event):
"""
Close the window. but check if the user wants to save
"""
self.buttonProcess.setFocus()
if self.mod_flag:
event.ignore()
ret, saved = self._save_check()
if ret == QtGui.QMessageBox.AcceptRole:
if saved:
self.mod_flag = False
event.accept()
elif ret == QtGui.QMessageBox.RejectRole:
event.ignore()
elif ret == QtGui.QMessageBox.NoRole:
self.mod_flag = False
event.accept()
def _instrument_selected(self, instrument):
"""
Change the default instrument to the selected one
"""
config['default.instrument'] = self.instrument_list[instrument]
logger.notice("Instrument is now: " + str(config['default.instrument']))
self.textRB.clear()
self._populate_runs_list()
self.current_instrument = self.instrument_list[instrument]
self.comboPolarCorrect.setEnabled(
self.current_instrument in self.polarisation_instruments) # Enable as appropriate
self.comboPolarCorrect.setCurrentIndex(self.comboPolarCorrect.findText('None')) # Reset to None
def _table_modified(self, row, column):
"""
sets the modified flag when the table is altered
"""
# Sometimes users enter leading or trailing whitespace into a cell.
# Let's remove it for them automatically.
item = self.tableMain.item(row, column)
item.setData(0, str.strip(str(item.data(0))))
if not self.loading:
self.mod_flag = True
plotbutton = self.tableMain.cellWidget(row, self.plot_col).children()[1]
self.__reset_plot_button(plotbutton)
def _plot_row(self):
"""
handler for the plot buttons
"""
plotbutton = self.sender()
self._plot(plotbutton)
def _show_slit_calculator(self):
calc = mantidqtpython.MantidQt.MantidWidgets.SlitCalculator(self)
calc.setCurrentInstrumentName(self.current_instrument)
calc.processInstrumentHasBeenChanged()
calc.exec_()
def _polar_corr_selected(self):
"""
Event handler for polarisation correction selection.
"""
if self.current_instrument in self.polarisation_instruments:
chosen_method = self.comboPolarCorrect.currentText()
self.current_polarisation_method = self.polarisation_options[chosen_method]
else:
logger.notice("Polarisation correction is not supported on " + str(self.current_instrument))
def setup_layout(self):
"""
Do further setup layout that couldn't be done in the designer
"""
self.comboInstrument.addItems(self.instrument_list)
current_instrument = config['default.instrument'].upper()
if current_instrument in self.instrument_list:
self.comboInstrument.setCurrentIndex(self.instrument_list.index(current_instrument))
else:
self.comboInstrument.setCurrentIndex(0)
config['default.instrument'] = 'INTER'
self.current_instrument = config['default.instrument'].upper()
# Setup polarisation options with default assigned
self.comboPolarCorrect.clear()
self.comboPolarCorrect.addItems(list(self.polarisation_options.keys()))
self.comboPolarCorrect.setCurrentIndex(self.comboPolarCorrect.findText('None'))
self.current_polarisation_method = self.polarisation_options['None']
self.comboPolarCorrect.setEnabled(self.current_instrument in self.polarisation_instruments)
self.splitterList.setSizes([200, 800])
self.labelStatus = QtGui.QLabel("Ready")
self.statusMain.addWidget(self.labelStatus)
self._initialise_table()
self._populate_runs_list()
self._connect_slots()
return True
def _reset_table(self):
"""
Reset the plot buttons and stitch checkboxes back to thier defualt state
"""
# switches from current to true, to false to make sure stateChanged fires
self.checkTickAll.setCheckState(2)
self.checkTickAll.setCheckState(0)
for row in range(self.tableMain.rowCount()):
plotbutton = self.tableMain.cellWidget(row, self.plot_col).children()[1]
self.__reset_plot_button(plotbutton)
def __reset_plot_button(self, plotbutton):
"""
Reset the provided plot button to ti's default state: disabled and with no cache
"""
plotbutton.setDisabled(True)
plotbutton.setProperty('runno', None)
plotbutton.setProperty('overlapLow', None)
plotbutton.setProperty('overlapHigh', None)
plotbutton.setProperty('wksp', None)
def _initialise_table(self):
"""
Initialise the table. Clearing all data and adding the checkboxes and plot buttons
"""
# first check if the table has been changed before clearing it
if self.mod_flag:
ret, _saved = self._save_check()
if ret == QtGui.QMessageBox.RejectRole:
return
self.current_table = None
settings = QtCore.QSettings()
settings.beginGroup(self.__column_settings)
for column in range(self.tableMain.columnCount()):
for row in range(self.tableMain.rowCount()):
if column in self.run_cols:
item = QtGui.QTableWidgetItem()
item.setText('')
item.setToolTip('Runs can be colon delimited to coadd them')
self.tableMain.setItem(row, column, item)
elif column in self.angle_cols:
item = QtGui.QTableWidgetItem()
item.setText('')
item.setToolTip('Angles are in degrees')
self.tableMain.setItem(row, column, item)
elif column == self.stitch_col:
check = QtGui.QCheckBox()
check.setCheckState(False)
check.setToolTip('If checked, the runs in this row will be stitched together')
item = QtGui.QWidget()
layout = QtGui.QHBoxLayout(item)
layout.addWidget(check)
layout.setAlignment(QtCore.Qt.AlignCenter)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
item.setLayout(layout)
item.setContentsMargins(0, 0, 0, 0)
self.tableMain.setCellWidget(row, self.stitch_col, item)
elif column == self.plot_col:
button = QtGui.QPushButton('Plot')
button.setProperty("row", row)
self.__reset_plot_button(button)
button.setToolTip('Plot the workspaces produced by processing this row.')
button.clicked.connect(self._plot_row)
item = QtGui.QWidget()
layout = QtGui.QHBoxLayout(item)
layout.addWidget(button)
layout.setAlignment(QtCore.Qt.AlignCenter)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
item.setLayout(layout)
item.setContentsMargins(0, 0, 0, 0)
self.tableMain.setCellWidget(row, self.plot_col, item)
else:
item = QtGui.QTableWidgetItem()
item.setText('')
self.tableMain.setItem(row, column, item)
vis_state = settings.value(str(column), True, type=bool)
self.shown_cols[column] = vis_state
if vis_state:
self.tableMain.showColumn(column)
else:
self.tableMain.hideColumn(column)
settings.endGroup()
del settings
self.tableMain.resizeColumnsToContents()
self.mod_flag = False
def _connect_slots(self):
"""
Connect the signals to the corresponding methods
"""
self.checkTickAll.stateChanged.connect(self._set_all_stitch)
self.comboInstrument.activated[int].connect(self._instrument_selected)
self.comboPolarCorrect.activated.connect(self._polar_corr_selected)
self.textRB.returnPressed.connect(self._populate_runs_list)
self.buttonAuto.clicked.connect(self._autofill)
self.buttonSearch.clicked.connect(self._populate_runs_list)
self.buttonClear.clicked.connect(self._initialise_table)
self.buttonProcess.clicked.connect(self._process)
self.buttonTransfer.clicked.connect(self._transfer)
self.buttonColumns.clicked.connect(self._choose_columns)
self.actionOpen_Table.triggered.connect(self._load_table)
self.actionReload_from_Disk.triggered.connect(self._reload_table)
self.actionSave.triggered.connect(self._save)
self.actionSave_As.triggered.connect(self._save_as)
self.actionSave_Workspaces.triggered.connect(self._save_workspaces)
self.actionClose_Refl_Gui.triggered.connect(self.close)
self.actionMantid_Help.triggered.connect(self._show_help)
self.actionAutofill.triggered.connect(self._autofill)
self.actionSearch_RB.triggered.connect(self._populate_runs_list)
self.actionClear_Table.triggered.connect(self._initialise_table)
self.actionProcess.triggered.connect(self._process)
self.actionTransfer.triggered.connect(self._transfer)
self.tableMain.cellChanged.connect(self._table_modified)
self.actionClear.triggered.connect(self._clear_cells)
self.actionPaste.triggered.connect(self._paste_cells)
self.actionCut.triggered.connect(self._cut_cells)
self.actionCopy.triggered.connect(self._copy_cells)
self.actionChoose_Columns.triggered.connect(self._choose_columns)
self.actionRefl_Gui_Options.triggered.connect(self._options_dialog)
self.actionSlit_Calculator.triggered.connect(self._show_slit_calculator)
def __valid_rb(self):
# Ensure that you cannot put zero in for an rb search
rbSearchValidator = QtGui.QIntValidator(self)
current_text = self.textRB.text()
rbSearchValidator.setBottom(1)
state = rbSearchValidator.validate(current_text, 0)[0]
if state == QtGui.QValidator.Acceptable:
return True
else:
self.textRB.clear()
if current_text:
logger.warning("RB search restricted to numbers > 0")
return False
def _populate_runs_list(self):
"""
Populate the list at the right with names of runs and workspaces from the archives
"""
# Clear existing
self.listMain.clear()
if self.__valid_rb():
# Use ICAT for a journal search based on the RB number
active_session_id = None
if CatalogManager.numberActiveSessions() == 0:
# Execute the CatalogLoginDialog
login_alg = CatalogLoginDialog()
session_object = login_alg.getProperty("KeepAlive").value
active_session_id = session_object.getPropertyValue("Session")
# Fetch out an existing session id
active_session_id = CatalogManager.getActiveSessions()[-1].getSessionId()
# This might be another catalog session, but at present there is no way to tell.
search_alg = AlgorithmManager.create('CatalogGetDataFiles')
search_alg.initialize()
search_alg.setChild(True) # Keeps the results table out of the ADS
search_alg.setProperty('InvestigationId', str(self.textRB.text()))
search_alg.setProperty('Session', active_session_id)
search_alg.setPropertyValue('OutputWorkspace', '_dummy')
search_alg.execute()
search_results = search_alg.getProperty('OutputWorkspace').value
self.icat_file_map = {}
self.statusMain.clearMessage()
for row in search_results:
file_name = row['Name']
file_id = row['Id']
description = row['Description']
run_number = re.search(r'[1-9]\d+', file_name).group()
if bool(re.search('(raw)$', file_name, re.IGNORECASE)): # Filter to only display and map raw files.
title = (run_number + ': ' + description).strip()
self.icat_file_map[title] = (file_id, run_number, file_name)
self.listMain.addItem(title)
self.listMain.sortItems()
del search_results
def _autofill(self):
"""
copy the contents of the selected cells to the row below as long as the row below contains a run number in the first cell
"""
# make sure all selected cells are in the same row
sum = 0
howMany = len(self.tableMain.selectedItems())
for cell in self.tableMain.selectedItems():
sum = sum + self.tableMain.row(cell)
if howMany:
selectedrow = self.tableMain.row(self.tableMain.selectedItems()[0])
if sum / howMany == selectedrow:
startrow = selectedrow + 1
filled = 0
for cell in self.tableMain.selectedItems():
row = startrow
txt = cell.text()
while self.tableMain.item(row, 0).text() != '':
item = QtGui.QTableWidgetItem()
item.setText(txt)
self.tableMain.setItem(row, self.tableMain.column(cell), item)
row = row + 1
filled = filled + 1
if not filled:
QtGui.QMessageBox.critical(self.tableMain,
'Cannot perform Autofill',
"No target cells to autofill. Rows to be filled should contain a run number in their "
"first cell, and start from directly below the selected line.")
else:
QtGui.QMessageBox.critical(self.tableMain, 'Cannot perform Autofill',
"Selected cells must all be in the same row.")
else:
QtGui.QMessageBox.critical(self.tableMain, 'Cannot perform Autofill', "There are no source cells selected.")
def _clear_cells(self):
"""
Clear the selected area of data
"""
cells = self.tableMain.selectedItems()
for cell in cells:
column = cell.column()
if column < self.stitch_col:
cell.setText('')
def _cut_cells(self):
"""
copy the selected cells then clear the area
"""
self._copy_cells()
self._clear_cells()
def _copy_cells(self):
"""
Copy the selected ranage of cells to the clipboard
"""
cells = self.tableMain.selectedItems()
if not cells:
print
'nothing to copy'
return
# first discover the size of the selection and initialise a list
mincol = cells[0].column()
if mincol > self.scale_col:
logger.error("Cannot copy, all cells out of range")
return
maxrow = -1
maxcol = -1
minrow = cells[0].row()
for cell in reversed(range(len(cells))):
col = cells[cell].column()
if col < self.stitch_col:
maxcol = col
maxrow = cells[cell].row()
break
colsize = maxcol - mincol + 1
rowsize = maxrow - minrow + 1
selection = [['' for x in range(colsize)] for y in range(rowsize)]
# now fill that list
for cell in cells:
row = cell.row()
col = cell.column()
if col < self.stitch_col:
selection[row - minrow][col - mincol] = str(cell.text())
tocopy = ''
for y in range(rowsize):
for x in range(colsize):
if x > 0:
tocopy += '\t'
tocopy += selection[y][x]
if y < (rowsize - 1):
tocopy += '\n'
self.clip.setText(str(tocopy))
def _paste_cells(self):
"""
Paste the contents of the clipboard to the table at the selected position
"""
pastedtext = self.clip.text()
if not pastedtext:
logger.warning("Nothing to Paste")
return
selected = self.tableMain.selectedItems()
if not selected:
logger.warning("Cannot paste, no editable cells selected")
return
pasted = pastedtext.splitlines()
pastedcells = []
for row in pasted:
pastedcells.append(row.split('\t'))
pastedcols = len(pastedcells[0])
pastedrows = len(pastedcells)
if len(selected) > 1:
# discover the size of the selection
mincol = selected[0].column()
if mincol > self.scale_col:
logger.error("Cannot copy, all cells out of range")
return
minrow = selected[0].row()
# now fill that list
for cell in selected:
row = cell.row()
col = cell.column()
if col < self.stitch_col and (col - mincol) < pastedcols and (row - minrow) < pastedrows and len(
pastedcells[row - minrow]):
cell.setText(pastedcells[row - minrow][col - mincol])
elif selected:
# when only a single cell is selected, paste all the copied item up until the table limits
cell = selected[0]
currow = cell.row()
homecol = cell.column()
tablerows = self.tableMain.rowCount()
for row in pastedcells:
if len(row):
curcol = homecol
if currow < tablerows:
for col in row:
if curcol < self.stitch_col:
curcell = self.tableMain.item(currow, curcol)
curcell.setText(col)
curcol += 1
else:
# the row has hit the end of the editable cells
break
currow += 1
else:
# it's dropped off the bottom of the table
break
else:
logger.warning("Cannot paste, no editable cells selected")
def _transfer(self):
"""
Transfer run numbers to the table
"""
tup = ()
for idx in self.listMain.selectedItems():
split_title = re.split(":th=|th=|:|dq/q=", idx.text())
if len(split_title) < 3:
split_title = re.split(":", idx.text())
if len(split_title) < 2:
logger.warning('cannot transfer ' + idx.text() + ' title is not in the right form ')
continue
else:
theta = 0
split_title.append(theta) # Append a dummy theta value.
if len(split_title) < 4:
dqq = 0
split_title.append(dqq) # Append a dummy dq/q value.
tup = tup + (split_title,) # Tuple of lists containing (run number, title, theta, dq/q)
tupsort = sorted(tup, key=itemgetter(1, 2)) # now sorted by title then theta
row = 0
for _key, group in itertools.groupby(tupsort, lambda x: x[1]): # now group by title
col = 0
dqq = 0 # only one value of dqq per row
run_angle_pairs_of_title = list() # for storing run_angle pairs all with the same title
for object in group: # loop over all with equal title
run_no = object[0]
dqq = object[-1]
angle = object[-2]
run_angle_pairs_of_title.append((run_no, angle))
for angle_key, group in itertools.groupby(run_angle_pairs_of_title, lambda x: x[1]):
runnumbers = "+".join(["%s" % pair[0] for pair in group])
# set the runnumber
item = QtGui.QTableWidgetItem()
item.setText(str(runnumbers))
self.tableMain.setItem(row, col, item)
# Set the angle
item = QtGui.QTableWidgetItem()
item.setText(str(angle_key))
self.tableMain.setItem(row, col + 1, item)
# Set the transmission
item = QtGui.QTableWidgetItem()
item.setText(self.textRuns.text())
self.tableMain.setItem(row, col + 2, item)
col = col + 5
if col >= 11:
col = 0
# set dq/q
item = QtGui.QTableWidgetItem()
item.setText(str(dqq))
self.tableMain.setItem(row, 15, item)
row = row + 1
if self.__icat_download:
# If ICAT is being used for download, then files must be downloaded at the same time as they are transferred
contents = str(idx.text()).strip()
file_id, _runnumber, file_name = self.icat_file_map[contents]
active_session_id = CatalogManager.getActiveSessions()[-1].getSessionId()
# This might be another catalog session, but at present there is no way to tell.
save_location = config['defaultsave.directory']
CatalogDownloadDataFiles(file_id, FileNames=file_name, DownloadPath=save_location,
Session=active_session_id)
current_search_dirs = config.getDataSearchDirs()
if save_location not in current_search_dirs:
config.appendDataSearchDir(save_location)
def _set_all_stitch(self, state):
"""
Set the checkboxes in the Stitch? column to the same
"""
for row in range(self.tableMain.rowCount()):
self.tableMain.cellWidget(row, self.stitch_col).children()[1].setCheckState(state)
def __checked_row_stiched(self, row):
return self.tableMain.cellWidget(row, self.stitch_col).children()[1].checkState() > 0
def _process(self):
"""
Process has been pressed, check what has been selected then pass the selection (or whole table) to quick
"""
# --------- If "Process" button pressed, convert raw files to IvsLam and IvsQ and combine if checkbox ticked -------------
_overallQMin = float("inf")
_overallQMax = float("-inf")
try:
willProcess = True
rows = self.tableMain.selectionModel().selectedRows()
rowIndexes = []
for idx in rows:
rowIndexes.append(idx.row())
if not len(rowIndexes):
reply = QtGui.QMessageBox.question(self.tableMain, 'Process all rows?',
"This will process all rows in the table. Continue?",
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
logger.notice("Cancelled!")
willProcess = False
else:
rowIndexes = range(self.tableMain.rowCount())
if willProcess:
for row in rowIndexes: # range(self.tableMain.rowCount()):
runno = []
wksp = []
overlapLow = []
overlapHigh = []
if self.tableMain.item(row, 0).text() != '':
self.statusMain.showMessage("Processing row: " + str(row + 1))
logger.debug("Processing row: " + str(row + 1))
for i in range(3):
run_entry = str(self.tableMain.item(row, i * 5).text())
if run_entry != '':
runno.append(run_entry)
ovLow = str(self.tableMain.item(row, (i * 5) + 3).text())
if ovLow != '':
overlapLow.append(float(ovLow))
ovHigh = str(self.tableMain.item(row, (i * 5) + 4).text())
if ovHigh != '':
overlapHigh.append(float(ovHigh))
# Determine resolution
if self.tableMain.item(row, 15).text() == '':
loadedRun = None
if load_live_runs.is_live_run(runno[0]):
loadedRun = load_live_runs.get_live_data(config['default.instrument'],
frequency=self.live_freq,
accumulation=self.live_method)
else:
Load(Filename=runno[0], OutputWorkspace="_run")
loadedRun = mtd["_run"]
theta_in_str = str(self.tableMain.item(row, 1).text())
try:
theta_in = None
if len(theta_in_str) > 0:
theta_in = float(theta_in_str)
# Make sure we only ever run calculate resolution on a non-group workspace.
# If we're given a group workspace, we can just run it on the first member of the group instead
thetaRun = loadedRun
if isinstance(thetaRun, WorkspaceGroup):
thetaRun = thetaRun[0]
if not theta_in:
theta_in = getLogValue(thetaRun, "Theta")
dqq = NRCalculateSlitResolution(Workspace=thetaRun, TwoTheta=2*theta_in)
# Put the calculated resolution into the table
resItem = QtGui.QTableWidgetItem()
resItem.setText(str(dqq))
self.tableMain.setItem(row, 15, resItem)
# Update the value for theta_in in the table
ttItem = QtGui.QTableWidgetItem()
ttItem.setText(str(theta_in))
self.tableMain.setItem(row, 1, ttItem)
logger.notice("Calculated resolution: " + str(dqq))
except:
self.statusMain.clearMessage()
logger.error(
"Failed to calculate dq/q because we could not find theta in the workspace's sample log. "
"Try entering theta or dq/q manually.")
return
else:
dqq = float(self.tableMain.item(row, 15).text())
# Check secondary and tertiary theta_in columns, if they're
# blank and their corresponding run columns are set, fill them.
for run_col in [5, 10]:
tht_col = run_col + 1
run_val = str(self.tableMain.item(row, run_col).text())
tht_val = str(self.tableMain.item(row, tht_col).text())
if run_val and not tht_val:
Load(Filename=run_val, OutputWorkspace="_run")
loadedRun = mtd["_run"]
tht_val = getLogValue(loadedRun, "Theta")
if tht_val:
self.tableMain.item(row, tht_col).setText(str(tht_val))
# Populate runlist
first_wq = None
for i in range(0, len(runno)):
theta, qmin, qmax, _wlam, wqBinnedAndScaled, _wqUnBinnedAndUnScaled = \
self._do_run(runno[i], row, i)
if not first_wq:
first_wq = wqBinnedAndScaled # Cache the first Q workspace
theta = round(theta, 3)
qmin = round(qmin, 3)
qmax = round(qmax, 3)
wksp.append(wqBinnedAndScaled.name())
if self.tableMain.item(row, i * 5 + 1).text() == '':
item = QtGui.QTableWidgetItem()
item.setText(str(theta))
self.tableMain.setItem(row, i * 5 + 1, item)
if self.tableMain.item(row, i * 5 + 3).text() == '':
item = QtGui.QTableWidgetItem()
item.setText(str(qmin))
self.tableMain.setItem(row, i * 5 + 3, item)
overlapLow.append(qmin)
if self.tableMain.item(row, i * 5 + 4).text() == '':
item = QtGui.QTableWidgetItem()
item.setText(str(qmax))
self.tableMain.setItem(row, i * 5 + 4, item)
overlapHigh.append(qmax)
if wksp[i].find(',') > 0 or wksp[i].find(':') > 0:
wksp[i] = first_wq.name()
if self.__checked_row_stiched(row):
if len(runno) == 1:
logger.notice("Nothing to combine for processing row : " + str(row))
else:
w1 = getWorkspace(wksp[0])
w2 = getWorkspace(wksp[-1])
if len(runno) == 2:
outputwksp = runno[0] + '_' + runno[1][3:]
else:
outputwksp = runno[0] + '_' + runno[-1][3:]
# get Qmax
if self.tableMain.item(row, i * 5 + 4).text() == '':
overlapHigh = 0.3 * max(w1.readX(0))
Qmin = min(w1.readX(0))
Qmax = max(w2.readX(0))
if len(self.tableMain.item(row, i * 5 + 3).text()) > 0:
Qmin = float(self.tableMain.item(row, i * 5 + 3).text())
if len(self.tableMain.item(row, i * 5 + 4).text()) > 0:
Qmax = float(self.tableMain.item(row, i * 5 + 4).text())
if Qmax > _overallQMax:
_overallQMax = Qmax
if Qmin < _overallQMin:
_overallQMin = Qmin
combineDataMulti(wksp, outputwksp, overlapLow, overlapHigh,
_overallQMin, _overallQMax, -dqq, 1, keep=True,
scale_right=self.__scale_right)
# Enable the plot button
plotbutton = self.tableMain.cellWidget(row, self.plot_col).children()[1]
plotbutton.setProperty('runno', runno)
plotbutton.setProperty('overlapLow', overlapLow)
plotbutton.setProperty('overlapHigh', overlapHigh)
plotbutton.setProperty('wksp', wksp)
plotbutton.setEnabled(True)
self.statusMain.clearMessage()
self.accMethod = None
self.statusMain.clearMessage()
except:
self.statusMain.clearMessage()
raise
def _plot(self, plotbutton):
"""
Plot the row belonging to the selected button
"""
if not isinstance(plotbutton, QtGui.QPushButton):
logger.error("Problem accessing cached data: Wrong data type passed, expected QtGui.QPushbutton")
return
import unicodedata
# make sure the required data can be retrieved properly
try:
runno_u = plotbutton.property('runno')
runno = []
for uni in runno_u:
runno.append(unicodedata.normalize('NFKD', uni).encode('ascii', 'ignore'))
wksp_u = plotbutton.property('wksp')
wksp = []
for uni in wksp_u:
wksp.append(unicodedata.normalize('NFKD', uni).encode('ascii', 'ignore'))
overlapLow = plotbutton.property('overlapLow')
overlapHigh = plotbutton.property('overlapHigh')
row = plotbutton.property('row')
wkspBinned = []
w1 = getWorkspace(wksp[0])
w2 = getWorkspace(wksp[len(wksp) - 1])
dqq = float(self.tableMain.item(row, 15).text())
except:
logger.error("Unable to plot row, required data couldn't be retrieved")
self.__reset_plot_button(plotbutton)
return
for i in range(len(runno)):
if len(overlapLow):
Qmin = overlapLow[0]
else:
Qmin = min(w1.readX(0))
if len(overlapHigh):
Qmax = overlapHigh[len(overlapHigh) - 1]
else:
Qmax = max(w2.readX(0))
ws_name_binned = wksp[i]
wkspBinned.append(ws_name_binned)
wsb = getWorkspace(ws_name_binned)
_Imin = min(wsb.readY(0))
_Imax = max(wsb.readY(0))
if canMantidPlot:
# Get the existing graph if it exists
base_graph = self.__graphs.get(wksp[0], None)
# Clear the window if we're the first of a new set of curves
clearWindow = (i == 0)
# Plot the new curve
base_graph = plotSpectrum(ws_name_binned, 0, True, window=base_graph, clearWindow=clearWindow)
# Save the graph so we can re-use it
self.__graphs[wksp[i]] = base_graph
titl = groupGet(ws_name_binned, 'samp', 'run_title')
if isinstance(titl, str):
base_graph.activeLayer().setTitle(titl)
base_graph.activeLayer().setAxisScale(Layer.Left, _Imin * 0.1, _Imax * 10, Layer.Log10)
base_graph.activeLayer().setAxisScale(Layer.Bottom, Qmin * 0.9, Qmax * 1.1, Layer.Log10)
base_graph.activeLayer().setAutoScale()
# Create and plot stitched outputs
if self.__checked_row_stiched(row):
if len(runno) == 2:
outputwksp = runno[0] + '_' + runno[1][3:]
else:
outputwksp = runno[0] + '_' + runno[2][3:]
if not getWorkspace(outputwksp, report_error=False):
# Stitching has not been done as part of processing, so we need to do it here.
combineDataMulti(wkspBinned, outputwksp, overlapLow, overlapHigh, Qmin, Qmax, -dqq, 1,
keep=True, scale_right=self.__scale_right)
Qmin = min(getWorkspace(outputwksp).readX(0))
Qmax = max(getWorkspace(outputwksp).readX(0))
if canMantidPlot:
stitched_graph = self.__graphs.get(outputwksp, None)
stitched_graph = plotSpectrum(outputwksp, 0, True, window=stitched_graph, clearWindow=True)
titl = groupGet(outputwksp, 'samp', 'run_title')
stitched_graph.activeLayer().setTitle(titl)
stitched_graph.activeLayer().setAxisScale(Layer.Left, 1e-8, 100.0, Layer.Log10)
stitched_graph.activeLayer().setAxisScale(Layer.Bottom, Qmin * 0.9, Qmax * 1.1, Layer.Log10)
self.__graphs[outputwksp] = stitched_graph
def __name_trans(self, transrun):
"""
From a comma or colon separated string of run numbers
construct an output workspace name for the transmission workspace that fits the form
TRANS_{trans_1}_{trans_2}
"""
if bool(re.search("^(TRANS)", transrun)):
# The user has deliberately tried to supply the transmission run directly
return transrun
else:
split_trans = re.split(',|:', transrun)
if len(split_trans) == 0:
return None
name = 'TRANS'
for t in split_trans:
name += '_' + str(t)
return name
def _do_run(self, runno, row, which):
"""
Run quick on the given run and row
"""
transrun = str(self.tableMain.item(row, (which * 5) + 2).text())
# Formulate a WS Name for the processed transmission run.
transrun_named = self.__name_trans(transrun)
# Look for existing transmission workspaces that match the name
transmission_ws = None
if mtd.doesExist(transrun_named):
if isinstance(mtd[transrun_named], WorkspaceGroup):
unit = mtd[transrun_named][0].getAxis(0).getUnit().unitID()
else:
unit = mtd[transrun_named].getAxis(0).getUnit().unitID()
if unit == "Wavelength":
logger.notice('Reusing transmission workspace ' + transrun_named)
transmission_ws = mtd[transrun_named]
angle_str = str(self.tableMain.item(row, which * 5 + 1).text())
if len(angle_str) > 0:
angle = float(angle_str)
else:
angle = None
loadedRun = runno
if load_live_runs.is_live_run(runno):
load_live_runs.get_live_data(config['default.instrument'], frequency=self.live_freq,
accumulation=self.live_method)
wlam, wq, th, wqBinned = None, None, None, None
# Only make a transmission workspace if we need one.
if transrun and not transmission_ws:
converter = ConvertToWavelength(transrun)
size = converter.get_ws_list_size()
out_ws_name = transrun_named
if size == 1:
trans1 = converter.get_workspace_from_list(0)
transmission_ws = CreateTransmissionWorkspaceAuto(FirstTransmissionRun=trans1,
OutputWorkspace=out_ws_name,
Params=0.02, StartOverlap=10.0, EndOverlap=12.0,
Version=1)
elif size == 2:
trans1 = converter.get_workspace_from_list(0)
trans2 = converter.get_workspace_from_list(1)
transmission_ws = CreateTransmissionWorkspaceAuto(FirstTransmissionRun=trans1,
OutputWorkspace=out_ws_name,
SecondTransmissionRun=trans2, Params=0.02,
StartOverlap=10.0, EndOverlap=12.0, Version=1)
else:
raise RuntimeError("Up to 2 transmission runs can be specified. No more than that.")
# Load the runs required ConvertToWavelength will deal with the transmission runs, while .to_workspace will deal with the run itself
ws = ConvertToWavelength.to_workspace(loadedRun, ws_prefix="")
if self.__alg_use:
if self.tableMain.item(row, self.scale_col).text():
factor = float(self.tableMain.item(row, self.scale_col).text())
else:
factor = 1.0
if self.tableMain.item(row, 15).text():
Qstep = float(self.tableMain.item(row, 15).text())
else:
Qstep = None
if len(self.tableMain.item(row, which * 5 + 3).text()) > 0:
Qmin = float(self.tableMain.item(row, which * 5 + 3).text())
else:
Qmin = None
if len(self.tableMain.item(row, which * 5 + 4).text()) > 0:
Qmax = float(self.tableMain.item(row, which * 5 + 4).text())
else:
Qmax = None
# If we're dealing with a workspace group, we'll manually map execution over each group member
# We do this so we can get ThetaOut correctly (see ticket #10597 for why we can't at the moment)
if isinstance(ws, WorkspaceGroup):
wqGroupBinned = []
wqGroup = []
wlamGroup = []
thetaGroup = []
group_trans_ws = transmission_ws
for i in range(0, ws.size()):
# If the transmission workspace is a group, we'll use it pair-wise with the tof workspace group
if isinstance(transmission_ws, WorkspaceGroup):
group_trans_ws = transmission_ws[i]
alg = AlgorithmManager.create("ReflectometryReductionOneAuto")
alg.initialize()
alg.setProperty("InputWorkspace", ws[i])
if group_trans_ws:
alg.setProperty("FirstTransmissionRun", group_trans_ws)
if angle is not None:
alg.setProperty("ThetaIn", angle)
alg.setProperty("OutputWorkspaceBinned", runno + '_IvsQ_binned_' + str(i + 1))
alg.setProperty("OutputWorkspace", runno + '_IvsQ_' + str(i + 1))
alg.setProperty("OutputWorkspaceWavelength", runno + '_IvsLam_' + str(i + 1))
alg.setProperty("ScaleFactor", factor)
if Qstep is not None:
alg.setProperty("MomentumTransferStep", Qstep)
if Qmin is not None:
alg.setProperty("MomentumTransferMin", Qmin)
if Qmax is not None:
alg.setProperty("MomentumTransferMax", Qmax)
alg.execute()
wqBinned = mtd[runno + '_IvsQ_binned_' + str(i + 1)]
wq = mtd[runno + '_IvsQ_' + str(i + 1)]
wlam = mtd[runno + '_IvsLam_' + str(i + 1)]
th = alg.getProperty("ThetaIn").value
wqGroupBinned.append(wqBinned)
wqGroup.append(wq)
wlamGroup.append(wlam)
thetaGroup.append(th)
wqBinned = GroupWorkspaces(InputWorkspaces=wqGroupBinned, OutputWorkspace=runno + '_IvsQ_binned')
wq = GroupWorkspaces(InputWorkspaces=wqGroup, OutputWorkspace=runno + '_IvsQ')
wlam = GroupWorkspaces(InputWorkspaces=wlamGroup, OutputWorkspace=runno + '_IvsLam')
th = thetaGroup[0]
else:
alg = AlgorithmManager.create("ReflectometryReductionOneAuto")
alg.initialize()
alg.setProperty("InputWorkspace", ws)
if transmission_ws:
alg.setProperty("FirstTransmissionRun", transmission_ws)
if angle is not None:
alg.setProperty("ThetaIn", angle)
alg.setProperty("OutputWorkspaceBinned", runno + '_IvsQ_binned')
alg.setProperty("OutputWorkspace", runno + '_IvsQ')
alg.setProperty("OutputWorkspaceWavelength", runno + '_IvsLam')
alg.setProperty("ScaleFactor", factor)
if Qstep is not None:
alg.setProperty("MomentumTransferStep", Qstep)
if Qmin is not None:
alg.setProperty("MomentumTransferMin", Qmin)
if Qmax is not None:
alg.setProperty("MomentumTransferMax", Qmax)
alg.execute()
wqBinned = mtd[runno + '_IvsQ_binned']
wq = mtd[runno + '_IvsQ']
wlam = mtd[runno + '_IvsLam']
th = alg.getProperty("ThetaIn").value
cleanup()
else:
wlam, wq, th = quick(loadedRun, trans=transmission_ws, theta=angle, tof_prefix="")
if self.__group_tof_workspaces and not isinstance(ws, WorkspaceGroup):
if "TOF" in mtd:
tof_group = mtd["TOF"]
if not tof_group.contains(loadedRun):
tof_group.add(loadedRun)
else:
tof_group = GroupWorkspaces(InputWorkspaces=loadedRun, OutputWorkspace="TOF")
if ':' in runno:
runno = runno.split(':')[0]
if ',' in runno:
runno = runno.split(',')[0]
if isinstance(wq, WorkspaceGroup):
inst = wq[0].getInstrument()
else:
inst = wq.getInstrument()
lmin = inst.getNumberParameter('LambdaMin')[0]
lmax = inst.getNumberParameter('LambdaMax')[0]
qmin = 4 * math.pi / lmax * math.sin(th * math.pi / 180)
qmax = 4 * math.pi / lmin * math.sin(th * math.pi / 180)
return th, qmin, qmax, wlam, wqBinned, wq
def _save_table_contents(self, filename):
"""
Save the contents of the table
"""
try:
writer = csv.writer(open(filename, "wb"))
for row in range(self.tableMain.rowCount()):
rowtext = []
for column in range(self.tableMain.columnCount() - 2):
rowtext.append(self.tableMain.item(row, column).text())
if len(rowtext) > 0:
writer.writerow(rowtext)
self.current_table = filename
logger.notice("Saved file to " + filename)
self.mod_flag = False
except:
return False
self.mod_flag = False
return True
def _save(self, failsave=False):
"""
Save the table, showing no interface if not necessary. This also provides the failing save functionality.
"""
filename = ''
if failsave:
# this is an emergency autosave as the program is failing
logger.error(
"The ISIS Reflectonomy GUI has encountered an error, it will now attempt to save a copy of your work.")
msgBox = QtGui.QMessageBox()
msgBox.setText(
"The ISIS Reflectonomy GUI has encountered an error, it will now attempt to save a copy of your work.\n"
"Please check the log for details.")
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.setEscapeButton(QtGui.QMessageBox.Ok)
msgBox.exec_()
import datetime
failtime = datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S')
if self.current_table:
filename = self.current_table.rsplit('.', 1)[0] + "_recovered_" + failtime + ".tbl"
else:
mantidDefault = config['defaultsave.directory']
if os.path.exists(mantidDefault):
filename = os.path.join(mantidDefault, "mantid_reflectometry_recovered_" + failtime + ".tbl")
else:
import tempfile
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, "mantid_reflectometry_recovered_" + failtime + ".tbl")
else:
# this is a save-on-quit or file->save
if self.current_table:
filename = self.current_table
else:
saveDialog = QtGui.QFileDialog(self.widgetMainRow.parent(), "Save Table")
saveDialog.setFileMode(QtGui.QFileDialog.AnyFile)
saveDialog.setNameFilter("Table Files (*.tbl);;All files (*)")
saveDialog.setDefaultSuffix("tbl")
saveDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
if saveDialog.exec_():
filename = saveDialog.selectedFiles()[0]
else:
return False
return self._save_table_contents(filename)
def _save_as(self):
"""
show the save as dialog and save to a .tbl file with that name
"""
saveDialog = QtGui.QFileDialog(self.widgetMainRow.parent(), "Save Table")
saveDialog.setFileMode(QtGui.QFileDialog.AnyFile)
saveDialog.setNameFilter("Table Files (*.tbl);;All files (*)")
saveDialog.setDefaultSuffix("tbl")
saveDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
if saveDialog.exec_():
filename = saveDialog.selectedFiles()[0]
self._save_table_contents(filename)
def _load_table(self):
"""
Load a .tbl file from disk
"""
self.loading = True
loadDialog = QtGui.QFileDialog(self.widgetMainRow.parent(), "Open Table")
loadDialog.setFileMode(QtGui.QFileDialog.ExistingFile)
loadDialog.setNameFilter("Table Files (*.tbl);;All files (*)")
if loadDialog.exec_():
try:
# before loading make sure you give them a chance to save
if self.mod_flag:
ret, _saved = self._save_check()
if ret == QtGui.QMessageBox.RejectRole:
# if they hit cancel abort the load
self.loading = False
return
self._reset_table()
filename = loadDialog.selectedFiles()[0]
self.current_table = filename
reader = csv.reader(open(filename, "rb"))
row = 0
for line in reader:
if row < 100:
for column in range(self.tableMain.columnCount() - 2):
item = QtGui.QTableWidgetItem()
item.setText(line[column])
self.tableMain.setItem(row, column, item)
row = row + 1
except:
logger.error('Could not load file: ' + str(filename) + '. File not found or unable to read from file.')
self.loading = False
self.mod_flag = False
def _reload_table(self):
"""
Reload the last loaded file from disk, replacing anything in the table already
"""
self.loading = True
filename = self.current_table
if filename:
if self.mod_flag:
msgBox = QtGui.QMessageBox()
msgBox.setText(
"The table has been modified. Are you sure you want to reload the table and lose your changes?")
msgBox.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
msgBox.setIcon(QtGui.QMessageBox.Question)
msgBox.setDefaultButton(QtGui.QMessageBox.Yes)
msgBox.setEscapeButton(QtGui.QMessageBox.No)
ret = msgBox.exec_()
if ret == QtGui.QMessageBox.No:
# if they hit No abort the reload
self.loading = False
return
try:
self._reset_table()
reader = csv.reader(open(filename, "rb"))
row = 0
for line in reader:
if row < 100:
for column in range(self.tableMain.columnCount() - 2):
item = QtGui.QTableWidgetItem()
item.setText(line[column])
self.tableMain.setItem(row, column, item)
row = row + 1
self.mod_flag = False
except:
logger.error('Could not load file: ' + str(filename) + '. File not found or unable to read from file.')
else:
logger.notice('No file in table to reload.')
self.loading = False
def _save_workspaces(self):
"""
Shows the export dialog for saving workspaces to non mantid formats
"""
try:
Dialog = QtGui.QDialog()
u = Ui_SaveWindow()
u.setupUi(Dialog)
Dialog.exec_()
except Exception as ex:
logger.notice("Could not open save workspace dialog")
logger.notice(str(ex))
def _options_dialog(self):
"""
Shows the dialog for setting options regarding live data
"""
try:
dialog_controller = ReflOptions(def_method=self.live_method, def_freq=self.live_freq,
def_alg_use=self.__alg_use,
def_icat_download=self.__icat_download,
def_group_tof_workspaces=self.__group_tof_workspaces,
def_stitch_right=self.__scale_right)
if dialog_controller.exec_():
# Fetch the settings back off the controller
self.live_freq = dialog_controller.frequency()
self.live_method = dialog_controller.method()
self.__alg_use = dialog_controller.useAlg()
self.__icat_download = dialog_controller.icatDownload()
self.__group_tof_workspaces = dialog_controller.groupTOFWorkspaces()
self.__scale_right = dialog_controller.stitchRight()
# Persist the settings
settings = QtCore.QSettings()
settings.beginGroup(self.__live_data_settings)
settings.setValue(self.__live_data_frequency_key, self.live_freq)
settings.setValue(self.__live_data_method_key, self.live_method)
settings.endGroup()
settings.beginGroup(self.__generic_settings)
settings.setValue(self.__ads_use_key, self.__alg_use)
settings.setValue(self.__icat_download_key, self.__icat_download)
settings.setValue(self.__group_tof_workspaces_key, self.__group_tof_workspaces)
settings.setValue(self.__stitch_right_key, self.__scale_right)
settings.endGroup()
del settings
except Exception as ex:
logger.notice("Problem opening options dialog or problem retrieving values from dialog")
logger.notice(str(ex))
def _choose_columns(self):
"""
shows the choose columns dialog for hiding and revealing of columns
"""
try:
dialog = ReflChoose(self.shown_cols, self.tableMain)
if dialog.exec_():
settings = QtCore.QSettings()
settings.beginGroup(self.__column_settings)
for key, value in dialog.visiblestates.iteritems():
self.shown_cols[key] = value
settings.setValue(str(key), value)
if value:
self.tableMain.showColumn(key)
else:
self.tableMain.hideColumn(key)
settings.endGroup()
del settings
except Exception as ex:
logger.notice("Could not open choose columns dialog")
logger.notice(str(ex))
def _show_help(self):
"""
Launches the wiki page for this interface
"""
import webbrowser
webbrowser.open('http://www.mantidproject.org/ISIS_Reflectometry_GUI')
def getLogValue(wksp, field=''):
"""
returns the last value from a sample log
"""
ws = getWorkspace(wksp)
log = ws.getRun().getLogData(field).value
if isinstance(log, int) or isinstance(log, str):
return log
else:
return log[-1]
def getWorkspace(wksp, report_error=True):
"""
Gets the first workspace associated with the given string. Does not load.
"""
if isinstance(wksp, Workspace):
return wksp
elif isinstance(wksp, str):
exists = mtd.doesExist(wksp)
if not exists:
if report_error:
logger.error("Unable to get workspace: " + str(wksp))
return exists # Doesn't exist
else:
return exists # Doesn't exist
elif isinstance(mtd[wksp], WorkspaceGroup):
wout = mtd[wksp][0]
else:
wout = mtd[wksp]
return wout
| dymkowsk/mantid | scripts/Interface/ui/reflectometer/refl_gui.py | Python | gpl-3.0 | 64,194 |
from discord.ext import commands
import discord.utils
def is_owner_check(ctx):
author = str(ctx.message.author)
owner = ctx.bot.config['master']
return author == owner
def is_owner():
return commands.check(is_owner_check)
def check_permissions(ctx, perms):
#if is_owner_check(ctx):
# return True
if not perms:
return False
ch = ctx.message.channel
author = ctx.message.author
resolved = ch.permissions_for(author)
return all(getattr(resolved, name, None) == value for name, value in perms.items())
def role_or_permissions(ctx, check, **perms):
if check_permissions(ctx, perms):
return True
ch = ctx.message.channel
author = ctx.message.author
if ch.is_private:
return False # can't have roles in PMs
role = discord.utils.find(check, author.roles)
return role is not None
def serverowner_or_permissions(**perms):
def predicate(ctx):
owner = ctx.message.server.owner
if ctx.message.author.id == owner.id:
return True
return check_permissions(ctx,perms)
return commands.check(predicate)
def serverowner():
return serverowner_or_permissions()
def check_wantchannel(ctx):
if ctx.message.server is None:
return False
channel = ctx.message.channel
server = ctx.message.server
try:
want_channels = ctx.bot.server_dict[server]['want_channel_list']
except KeyError:
return False
if channel in want_channels:
return True
def check_citychannel(ctx):
if ctx.message.server is None:
return False
channel = ctx.message.channel.name
server = ctx.message.server
try:
city_channels = ctx.bot.server_dict[server]['city_channels'].keys()
except KeyError:
return False
if channel in city_channels:
return True
def check_raidchannel(ctx):
if ctx.message.server is None:
return False
channel = ctx.message.channel
server = ctx.message.server
try:
raid_channels = ctx.bot.server_dict[server]['raidchannel_dict'].keys()
except KeyError:
return False
if channel in raid_channels:
return True
def check_eggchannel(ctx):
if ctx.message.server is None:
return False
channel = ctx.message.channel
server = ctx.message.server
try:
type = ctx.bot.server_dict[server]['raidchannel_dict'][channel]['type']
except KeyError:
return False
if type == 'egg':
return True
def check_raidactive(ctx):
if ctx.message.server is None:
return False
channel = ctx.message.channel
server = ctx.message.server
try:
return ctx.bot.server_dict[server]['raidchannel_dict'][channel]['active']
except KeyError:
return False
def check_raidset(ctx):
if ctx.message.server is None:
return False
server = ctx.message.server
try:
return ctx.bot.server_dict[server]['raidset']
except KeyError:
return False
def check_wildset(ctx):
if ctx.message.server is None:
return False
server = ctx.message.server
try:
return ctx.bot.server_dict[server]['wildset']
except KeyError:
return False
def check_wantset(ctx):
if ctx.message.server is None:
return False
server = ctx.message.server
try:
return ctx.bot.server_dict[server]['wantset']
except KeyError:
return False
def check_teamset(ctx):
if ctx.message.server is None:
return False
server = ctx.message.server
try:
return ctx.bot.server_dict[server]['team']
except KeyError:
return False
def teamset():
def predicate(ctx):
return check_teamset(ctx)
return commands.check(predicate)
def wantset():
def predicate(ctx):
return check_wantset(ctx)
return commands.check(predicate)
def wildset():
def predicate(ctx):
return check_wildset(ctx)
return commands.check(predicate)
def raidset():
def predicate(ctx):
return check_raidset(ctx)
return commands.check(predicate)
def citychannel():
def predicate(ctx):
return check_citychannel(ctx)
return commands.check(predicate)
def wantchannel():
def predicate(ctx):
if check_wantset(ctx):
return check_wantchannel(ctx)
return commands.check(predicate)
def raidchannel():
def predicate(ctx):
return check_raidchannel(ctx)
return commands.check(predicate)
def notraidchannel():
def predicate(ctx):
return not check_raidchannel(ctx)
return commands.check(predicate)
def activeraidchannel():
def predicate(ctx):
if check_raidchannel(ctx):
return check_raidactive(ctx)
return commands.check(predicate)
def cityraidchannel():
def predicate(ctx):
if check_raidchannel(ctx) == True:
return True
elif check_citychannel(ctx) == True:
return True
return commands.check(predicate)
def cityeggchannel():
def predicate(ctx):
if check_raidchannel(ctx) == True:
if check_eggchannel(ctx) == True:
return True
elif check_citychannel(ctx) == True:
return True
return commands.check(predicate)
| Jonqora/whiskers | checks.py | Python | gpl-3.0 | 5,290 |
# -*- coding: utf-8 -*-
"""
nwdiag.sphinx_ext
~~~~~~~~~~~~~~~~~~~~
Allow nwdiag-formatted diagrams to be included in Sphinx-generated
documents inline.
:copyright: Copyright 2010 by Takeshi Komiya.
:license: BSDL.
"""
from __future__ import absolute_import
import os
import re
import traceback
from collections import namedtuple
from docutils import nodes
from sphinx import addnodes
from sphinx.util.osutil import ensuredir
import nwdiag.utils.rst.nodes
import nwdiag.utils.rst.directives
from blockdiag.utils.bootstrap import detectfont
from blockdiag.utils.compat import u, string_types
from blockdiag.utils.fontmap import FontMap
# fontconfig; it will be initialized on `builder-inited` event.
fontmap = None
class nwdiag_node(nwdiag.utils.rst.nodes.nwdiag):
def to_drawer(self, image_format, builder, **kwargs):
if 'filename' in kwargs:
filename = kwargs.pop('filename')
else:
filename = self.get_abspath(image_format, builder)
antialias = builder.config.nwdiag_antialias
image = super(nwdiag_node, self).to_drawer(image_format, filename, fontmap,
antialias=antialias, **kwargs)
for node in image.diagram.traverse_nodes():
node.href = resolve_reference(builder, node.href)
return image
def get_relpath(self, image_format, builder):
options = dict(antialias=builder.config.nwdiag_antialias,
fontpath=builder.config.nwdiag_fontpath,
fontmap=builder.config.nwdiag_fontmap,
format=image_format)
outputdir = getattr(builder, 'imgpath', builder.outdir)
return os.path.join(outputdir, self.get_path(**options))
def get_abspath(self, image_format, builder):
options = dict(antialias=builder.config.nwdiag_antialias,
fontpath=builder.config.nwdiag_fontpath,
fontmap=builder.config.nwdiag_fontmap,
format=image_format)
if hasattr(builder, 'imgpath'):
outputdir = os.path.join(builder.outdir, '_images')
else:
outputdir = builder.outdir
path = os.path.join(outputdir, self.get_path(**options))
ensuredir(os.path.dirname(path))
return path
class Nwdiag(nwdiag.utils.rst.directives.NwdiagDirective):
node_class = nwdiag_node
def node2image(self, node, diagram):
return node
def resolve_reference(builder, href):
if href is None:
return None
pattern = re.compile(u("^:ref:`(.+?)`"), re.UNICODE)
matched = pattern.search(href)
if matched is None:
return href
else:
refid = matched.group(1)
domain = builder.env.domains['std']
node = addnodes.pending_xref(refexplicit=False)
xref = domain.resolve_xref(builder.env, builder.current_docname, builder,
'ref', refid, node, node)
if xref:
if 'refid' in xref:
return "#" + xref['refid']
else:
return xref['refuri']
else:
builder.warn('undefined label: %s' % refid)
return None
def html_render_svg(self, node):
image = node.to_drawer('SVG', self.builder, filename=None, nodoctype=True)
image.draw()
if 'align' in node['options']:
align = node['options']['align']
self.body.append('<div align="%s" class="align-%s">' % (align, align))
self.context.append('</div>\n')
else:
self.body.append('<div>')
self.context.append('</div>\n')
# reftarget
for node_id in node['ids']:
self.body.append('<span id="%s"></span>' % node_id)
# resize image
size = image.pagesize().resize(**node['options'])
self.body.append(image.save(size))
self.context.append('')
def html_render_clickablemap(self, image, width_ratio, height_ratio):
href_nodes = [node for node in image.nodes if node.href]
if not href_nodes:
return
self.body.append('<map name="map_%d">' % id(image))
for node in href_nodes:
x1, y1, x2, y2 = image.metrics.cell(node)
x1 *= width_ratio
x2 *= width_ratio
y1 *= height_ratio
y2 *= height_ratio
areatag = '<area shape="rect" coords="%s,%s,%s,%s" href="%s">' % (x1, y1, x2, y2, node.href)
self.body.append(areatag)
self.body.append('</map>')
def html_render_png(self, node):
image = node.to_drawer('PNG', self.builder)
if not os.path.isfile(image.filename):
image.draw()
image.save()
# align
if 'align' in node['options']:
align = node['options']['align']
self.body.append('<div align="%s" class="align-%s">' % (align, align))
self.context.append('</div>\n')
else:
self.body.append('<div>')
self.context.append('</div>')
# link to original image
relpath = node.get_relpath('PNG', self.builder)
if 'width' in node['options'] or 'height' in node['options'] or 'scale' in node['options']:
self.body.append('<a class="reference internal image-reference" href="%s">' % relpath)
self.context.append('</a>')
else:
self.context.append('')
# <img> tag
original_size = image.pagesize()
resized = original_size.resize(**node['options'])
img_attr = dict(src=relpath,
width=resized.width,
height=resized.height)
if any(node.href for node in image.nodes):
img_attr['usemap'] = "#map_%d" % id(image)
width_ratio = float(resized.width) / original_size.width
height_ratio = float(resized.height) / original_size.height
html_render_clickablemap(self, image, width_ratio, height_ratio)
if 'alt' in node['options']:
img_attr['alt'] = node['options']['alt']
self.body.append(self.starttag(node, 'img', '', empty=True, **img_attr))
def html_visit_nwdiag(self, node):
try:
image_format = get_image_format_for(self.builder)
if image_format.upper() == 'SVG':
html_render_svg(self, node)
else:
html_render_png(self, node)
except UnicodeEncodeError:
if self.builder.config.nwdiag_debug:
traceback.print_exc()
msg = ("nwdiag error: UnicodeEncodeError caught "
"(check your font settings)")
self.builder.warn(msg)
raise nodes.SkipNode
except Exception as exc:
if self.builder.config.nwdiag_debug:
traceback.print_exc()
self.builder.warn('dot code %r: %s' % (node['code'], str(exc)))
raise nodes.SkipNode
def html_depart_nwdiag(self, node):
self.body.append(self.context.pop())
self.body.append(self.context.pop())
def get_image_format_for(builder):
if builder.format == 'html':
image_format = builder.config.nwdiag_html_image_format.upper()
elif builder.format == 'latex':
if builder.config.nwdiag_tex_image_format:
image_format = builder.config.nwdiag_tex_image_format.upper()
else:
image_format = builder.config.nwdiag_latex_image_format.upper()
else:
image_format = 'PNG'
if image_format.upper() not in ('PNG', 'PDF', 'SVG'):
raise ValueError('unknown format: %s' % image_format)
if image_format.upper() == 'PDF':
try:
import reportlab # NOQA: importing test
except ImportError:
raise ImportError('Could not output PDF format. Install reportlab.')
return image_format
def on_builder_inited(self):
# show deprecated message
if self.builder.config.nwdiag_tex_image_format:
self.builder.warn('nwdiag_tex_image_format is deprecated. Use nwdiag_latex_image_format.')
# initialize fontmap
global fontmap
try:
fontmappath = self.builder.config.nwdiag_fontmap
fontmap = FontMap(fontmappath)
except:
fontmap = FontMap(None)
try:
fontpath = self.builder.config.nwdiag_fontpath
if isinstance(fontpath, string_types):
fontpath = [fontpath]
if fontpath:
config = namedtuple('Config', 'font')(fontpath)
fontpath = detectfont(config)
fontmap.set_default_font(fontpath)
except:
pass
def on_doctree_resolved(self, doctree, docname):
if self.builder.format == 'html':
return
try:
image_format = get_image_format_for(self.builder)
except Exception as exc:
if self.builder.config.nwdiag_debug:
traceback.print_exc()
self.builder.warn('nwdiag error: %s' % exc)
for node in doctree.traverse(nwdiag_node):
node.parent.remove(node)
return
for node in doctree.traverse(nwdiag_node):
try:
relfn = node.get_relpath(image_format, self.builder)
image = node.to_drawer(image_format, self.builder)
if not os.path.isfile(image.filename):
image.draw()
image.save()
image = nodes.image(uri=image.filename, candidates={'*': relfn}, **node['options'])
node.parent.replace(node, image)
except Exception as exc:
if self.builder.config.nwdiag_debug:
traceback.print_exc()
self.builder.warn('dot code %r: %s' % (node['code'], str(exc)))
node.parent.remove(node)
def setup(app):
app.add_node(nwdiag_node,
html=(html_visit_nwdiag, html_depart_nwdiag))
app.add_directive('nwdiag', Nwdiag)
app.add_config_value('nwdiag_fontpath', None, 'html')
app.add_config_value('nwdiag_fontmap', None, 'html')
app.add_config_value('nwdiag_antialias', False, 'html')
app.add_config_value('nwdiag_debug', False, 'html')
app.add_config_value('nwdiag_html_image_format', 'PNG', 'html')
app.add_config_value('nwdiag_tex_image_format', None, 'html') # backward compatibility for 0.6.1
app.add_config_value('nwdiag_latex_image_format', 'PNG', 'html')
app.connect("builder-inited", on_builder_inited)
app.connect("doctree-resolved", on_doctree_resolved)
| bboalimoe/ndn-cache-policy | docs/sphinx-contrib/nwdiag/sphinxcontrib/nwdiag.py | Python | gpl-3.0 | 10,218 |
#!/usr/bin/python
import sys
def fib(n):
if(n<=2):
return (n-1)
else:
return fib(n-1)+fib(n-2)
if ( len(sys.argv) == 2 ):
print fib(int(sys.argv[1]))
else:
print "Usage : "+sys.argv[0]+" <term required>"
| simula67/Coding | python/AAD PoC/naive_fibinocci.py | Python | gpl-3.0 | 241 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Tiger Soldier
#
# This file is part of OSD Lyrics.
#
# OSD Lyrics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OSD Lyrics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OSD Lyrics. If not, see <http://www.gnu.org/licenses/>.
#/
import consts
import urlparse
import urllib
import os.path
from errors import PatternException
def expand_file(pattern, metadata):
"""
Expands the pattern to a file name according to the infomation of a music
The following are supported place holder in the pattern:
- %t: Title of the track. 'title' in metadata
- %p: Performer (artist) of the music. 'artist' in metadata
- %a: Album of the music. 'album' in metadata
- %n: Track number of the music. 'tracknumber' in metadata
- %f: Filename without extension of the music. 'location' in metadata.
- %%: The `%' punctuation
Arguments:
- `pattern`: The pattern to expand.
- `metadata`: A dict representing metadata. Useful keys are listed above.
If the pattern cannot be expand, raise an PatternException. Otherwise
return the expended pattern.
>>> metadata = {'artist': 'Foo',
... 'title': 'Bar',
... 'tracknumber': '1',
... 'album': 'Album',
... 'location': 'file:///%E6%AD%8C%E6%9B%B2/%E7%9A%84/%E5%9C%B0%E5%9D%80.mp3'}
>>> expand_file('%p - %t', metadata)
'Foo - Bar'
>>> expand_file('foobar', metadata)
'foobar'
>>> print expand_file('name is %f :)', metadata)
name is 地址 :)
>>> expand_file('%something else', metadata)
'%something else'
>>> expand_file('%%a - %%t', metadata)
'%a - %t'
>>> expand_file('%%%', metadata)
'%%'
>>> expand_file('%n - %a:%p,%t', metadata)
'1 - Album:Foo,Bar'
>>> expand_file('%t', {})
Traceback (most recent call last):
...
PatternException: 'title not in metadata'
"""
keys = {'t': 'title',
'p': 'artist',
'a': 'album',
'n': 'tracknum',
}
start = 0
parts = []
while start < len(pattern):
end = pattern.find('%', start)
if end > -1:
parts.append(pattern[start:end])
has_tag = False
if end + 1 < len(pattern):
tag = pattern[end + 1]
if tag == '%':
has_tag = True
parts.append('%')
elif tag == 'f':
location = metadata.location
if not location:
raise PatternException('Location not found in metadata')
uri = urlparse.urlparse(location)
if uri.scheme != '' and not uri.scheme in ['file']:
raise PatternException('Unsupported file scheme %s' % uri.scheme)
if uri.scheme == '':
path = uri.path
else:
path = urllib.url2pathname(uri.path)
basename = os.path.basename(path)
root, ext = os.path.splitext(basename)
has_tag = True
parts.append(root)
elif tag in keys:
value = getattr(metadata, keys[tag])
if not value:
raise PatternException('%s not in metadata' % keys[tag])
has_tag = True
parts.append(value)
if has_tag:
start = end + 2
else:
start = end + 1
parts.append('%')
else:
parts.append(pattern[start:])
break
return ''.join(parts)
def expand_path(pattern, metadata):
"""
Expands the pattern to a directory path according to the infomation of a music
The pattern can be one of the three forms:
- begin with `/': the path is an absolute path and will not be expanded
- begin with `~/': the path is an relative path and the `~' wiil be expanded to
the absolute path of the user's home directory
- `%': the path will be expanded to the directory of the music file according to
its URI. ``location`` attribute is used in metadata
Arguments:
- `pattern`: The pattern to expand.
- `metadata`: A dict representing metadata. Useful keys are listed above.
If the pattern cannot be expand, raise an PatternException. Otherwise
return the expended pattern.
>>> expand_path('%', {'location': 'file:///tmp/a.lrc'})
'/tmp'
>>> expand_path('%foo', {'location': 'file:///tmp/a.lrc'})
'%foo'
>>> expand_path('/bar', {})
'/bar'
>>> expand_path('%', {'Title': 'hello'})
Traceback (most recent call last):
...
PatternException: 'Location not found in metadata'
"""
if pattern == '%':
location = metadata.location
if not location:
raise PatternException('Location not found in metadata')
uri = urlparse.urlparse(location)
if not uri.scheme in ['file']:
raise PatternException('Unsupported file scheme %s' % uri.scheme)
path = urllib.url2pathname(uri.path)
return os.path.dirname(path)
return os.path.expanduser(pattern)
if __name__ == '__main__':
import doctest
doctest.testmod()
| ihacklog/osdlyrics | python/pattern.py | Python | gpl-3.0 | 5,769 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# encryption-generator.py
#
# Copyright 2016 Netuser <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# encryption-generator.py Version 2.0
# site http://zorgonteam.wordpress.com
import os
import sys
import time
import base64
import urllib
import hashlib
import subprocess
from datetime import date
from datetime import datetime
from Crypto.Cipher import DES
from Crypto import Random
date=date.today()
now=datetime.now()
if os.name in ['nt','win32']:
os.system('cls')
else:
os.system('clear')
print "[*] Author Netuser [*]"
print "[*] encryption generator [*]"
print "[*] date :",date," [*]"
print
print "[*] Encrypt With Strong Crypto is Coming soon"
back = 'back'
#while back == 'back':
while True:
try:
menu=raw_input('\n[*] encrypt or decrypt $ ')
menu_item="update"
if menu_item == menu:
print "[*] Updating Databases Information .... "
url=urllib.urlretrieve("https://raw.githubusercontent.com/P1d0f/encryptGen/master/encryption-generator.py","encryption-generator.py")
print "[*] Update Succesfully"
sys.exit()
menu_item="help"
if menu == menu_item:
print """
you just type encrypt or decrypt
example :
encrypt = encrypt or decrypt $ encrypt (enter)
decrypt = encrypt or decrypt $ decrypt (enter)
"""
menu_item="encrypt"
if menu == menu_item:
print
print "----> md5"
print "----> sha1"
print "----> sha224"
print "----> sha256"
print "----> sha384"
print "----> sha512"
print "----> base16"
print "----> base32"
print "----> base64"
print "----> cryptoDES"
print
raw=raw_input('[*] type and choice one $ ')
menu_item="exit"
if raw == menu_item:
print "[*] thanks for shopping"
sys.exit()
menu_item="cryptoDES"
if menu_item == raw:
telo=raw_input('[*] your text $ ')
iv=Random.get_random_bytes(8)
des1=DES.new('01234567', DES.MODE_CFB, iv)
des2=DES.new('01234567', DES.MODE_CFB, iv)
text=telo
cipher_text=des2.encrypt(text)
nama_file=open('text.encrypt','w')
nama_file.writelines(cipher_text)
nama_file.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] saved into text.encrypt"
menu_item="base16"
if menu_item == raw:
telo=raw_input('[*] text $ ')
base16=base64.b16encode('%s' % (telo))
for i in(5,4,3,2,1):
print "[*] encoded at",now
print "\n[*] result :",base16
menu_item="sha224"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha224=hashlib.sha224('%s' % (telo)).hexdigest()
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] result :",sha224
menu_item="sha384"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha384=hashlib.sha384('%s' % (telo)).hexdigest()
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] result :",sha384
menu_item="sha512"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha512=hashlib.sha512('%s' % (telo)).hexdigest()
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] result :",sha512
menu_item="base64"
if menu_item == raw:
telo=raw_input('[*] text $ ')
base64=base64.b64encode('%s' % (telo))
for i in(5,4,3,2,1):
print "[*] encoded at",now
print "\n[*] result :",base64
menu_item="md5"
if menu_item == raw:
telo=raw_input('[*] text $ ')
md5=hashlib.md5('%s' % (telo)).hexdigest()
for i in(1,2,3,4,5):
print "[*] encrypted at",now
print "\n[*] result :",md5
menu_item="sha256"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha256=hashlib.sha256('%s' % (telo)).hexdigest()
print
for i in(1,2,3,4,5):
print "[*] encrypted at",now
print "\n[*] result :",sha256
menu_item="sha1"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha1=hashlib.sha1('%s' % (telo)).hexdigest()
print
for i in(1,2,3,4,5):
print "[*] encrypted at",now
print "\n[*] result :",sha1
menu_item="base32"
if menu_item == raw:
ff=raw_input('[*] text or file $ ')
menu_fuck="text"
if menu_fuck == ff:
telo=raw_input('text $ ')
base32=base64.b32encode('%s' % (telo))
print
for i in(1,2,3,4,5):
print "[*] encoded at",now
print "\n[*] result :",base32
menu_ss="file"
if menu_ss == ff:
try:
print "[*] WARNING : if you encrypt this file your file original will be remove !"
fileno=raw_input('\n[*] file to encrypt $ ')
baca=open('%s' % (fileno), 'r')
ss=baca.read()
decrypt=base64.b32encode(ss)
simpan=open('text.enc','w')
simpan.writelines(decrypt)
simpan.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] encoded at",now
print "\n[*] saved to text.enc"
os.remove(fileno)
except IOError:
print "\n[*] no file found",fileno
sys.exit()
menu_telo="decrypt"
if menu_telo == menu:
print
print "----> base16"
print "----> base32"
print "----> base64"
print "----> cryptoDES"
print
oke=raw_input('[*] type and choice one $ ')
menu_telo="cryptoDES"
if menu_telo == oke:
try:
telo=raw_input('[*] file.encrypt : ')
iv=Random.get_random_bytes(8)
des1=DES.new('01234567', DES.MODE_CFB, iv)
des2=DES.new('01234567', DES.MODE_CFB, iv)
nama_file=open('%s' % (telo),'r')
ss=nama_file.read()
decs=des2.decrypt(ss)
save1=open('text.decrypt','w')
save1.writelines(decs)
save1.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] decrypted at",now
print "\n[*] saved file text.decrypt"
except IOError:
print "\n[*] Not found file encrypt",telo
menu_telo="base16"
if oke == menu_telo:
raw1=raw_input('[*] text base16 $ ')
dec16=base64.b16decode('%s' % (raw1))
for i in(5,4,3,2,1):
print "[*] decoded at",now
print "\n[*] result :",dec16
menu_telo="base32"
if oke == menu_telo:
ss=raw_input('[*] text or file $ ')
menu_gg="text"
if menu_gg == ss:
raw2=raw_input('[*] text base32 $ ')
print
dec32=base64.b32decode('%s' % (raw2))
for i in(5,4,3,2,1):
print "[*] decoded at",now
print "\n[*] result :",dec32
menu_hh="file"
if menu_hh == ss:
try:
fileno=raw_input('[*] file text.enc $ ')
print
fuck=open('%s' % (fileno), 'r')
anjir=fuck.read()
dec43=base64.b32decode(anjir)
telo=open('text.dec','w')
telo.writelines(dec43)
telo.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] decoded at",now
print "\n[*] save file text.dec"
os.remove(fileno)
except:
print "[*] Not found file enc "
menu_telo="base64" #this is Bug Sorry
if oke == menu_telo:#
raw3=raw_input('[*] text base64 $ ')#
dec64=base64.b64decode('%s' % (raw3))#
for i in (5,4,3,2,1):#
print "[*] decoded at",now#
print "\n[*] result :",dec64#
menu_telo="exit"
if menu_telo == oke:
print "[*] thanks for shopping"
sys.exit()
menu_item="exit"
if menu == menu_item:
print "[*] thanks for shopping"
sys.exit()
except KeyboardInterrupt:
print "\n[*] ctrl+c active "
sys.exit()
##### Finished #################################### Finished ##################
###############################################################################
#the Bug is cannot decrypt crypto encryption but i will try to repair and make#
#progam is the best ever #you can wait this progam to be version 2.0 #
| P1d0f/encryptGen | encryption-generator.py | Python | gpl-3.0 | 8,291 |
#!/bin/env python2.7
# -*- coding: utf-8 -*-
# This file is part of AT-Platform.
#
# EPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EPlatform. If not, see <http://www.gnu.org/licenses/>.
import wxversion
wxversion.select( '2.8' )
import glob, os, time
import wx, alsaaudio
import wx.lib.buttons as bt
from pymouse import PyMouse
from string import maketrans
from pygame import mixer
import subprocess as sp
import shlex
import numpy as np
from random import shuffle
#=============================================================================
class speller( wx.Frame ):
def __init__(self, parent):
self.parent = parent
self.initializeParameters( )
self.initializeBitmaps( )
self.createGui( )
#-------------------------------------------------------------------------
def initializeParameters(self):
self.pathToEPlatform = './'
with open( self.pathToEPlatform + 'spellerParameters', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'polishLettersColour':
self.polishLettersColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'voice':
pass
elif line[ :line.find('=')-1 ] == 'vowelColour':
self.vowelColour= line[ line.rfind('=')+2:-1 ]
elif not line.isspace( ):
print '\nNiewłaściwie opisany parametr. Błąd w linii:\n%s' % line
self.vowelColour = 'red'
self.polishLettersColour = 'blue'
with open( self.pathToEPlatform + 'parametersCW', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'textSize':
pass
elif line[ :line.find('=')-1 ] == 'checkTime':
pass
elif line[ :line.find('=')-1 ] == 'maxPoints':
pass
elif line[ :line.find('=')-1 ] == 'colorGrat':
pass
elif line[ :line.find('=')-1 ] == 'colorNiest':
pass
elif line[ :line.find('=')-1 ] == 'ileLuk':
pass
#self.ileLuk= int(line[ line.rfind('=')+2:-1 ])
elif not line.isspace( ):
print 'Niewłaściwie opisane parametry'
print 'Błąd w linii', line
#self.ileLuk=2
with open( self.pathToEPlatform + 'parameters', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'timeGap':
self.timeGap = int( line[ line.rfind('=')+2:-1 ] )
elif line[ :line.find('=')-1 ] == 'backgroundColour':
self.backgroundColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'textColour':
self.textColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'scanningColour':
self.scanningColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'selectionColour':
self.selectionColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'musicVolume':
pass
elif line[ :line.find('=')-1 ] == 'filmVolume':
pass
elif not line.isspace( ):
print '\nNiewłaściwie opisany parametr. Błąd w linii:\n%s' % line
self.timeGap = 1500
self.backgroundColour = 'white'
self.textColour = 'black'
self.scanningColour = '#E7FAFD'
self.selectionColour = '#9EE4EF'
self.labels = [ 'a e b c d f g h i o j k l m n p u y r s t w z SPECIAL_CHARACTERS DELETE TRASH CHECK ORISPEAK SPEAK EXIT'.split( ), '1 2 3 4 5 6 7 8 9 0 + - * / = % $ & . , ; : " ? ! @ # ( ) [ ] { } < > ~ DELETE TRASH CHECK ORISPEAK SPEAK EXIT'.split( ) ]
self.colouredLabels = [ 'a','e','i','o','u','y']
self.winWidth, self.winHeight = wx.DisplaySize( )
self.voice=False
self.slowo=self.parent.word
self.ileLiter =len(self.slowo)
#if self.ileLuk >=len(self.slowo):
#self.ileLuk=len(self.slowo)-1
self.numberOfRows = [4, 5 ]
self.numberOfColumns = [ 8, 9 ]
#self.flag = 'row'
#self.rowIteration = 0
#self.columnIteration = 0
#self.countRows = 0
#self.countColumns = 0
self.kolejnyKrok=0
#self.maxNumberOfColumns = 2
self.numberOfPresses = 1
self.subSizerNumber = 0
self.mouseCursor = PyMouse( )
mixer.init( )
self.typewriterKeySound = mixer.Sound( self.pathToEPlatform+'sounds/typewriter_key.wav' )
self.typewriterForwardSound = mixer.Sound( self.pathToEPlatform+'sounds/typewriter_forward.wav' )
self.typewriterSpaceSound = mixer.Sound( self.pathToEPlatform+'sounds/typewriter_space.wav' )
self.phones = glob.glob( self.pathToEPlatform+'sounds/phone/*' )
self.phoneLabels = [ item[ item.rfind( '/' )+1 : item.rfind( '_' ) ] for item in self.phones ]
self.sounds = [ mixer.Sound( self.sound ) for self.sound in self.phones ]
self.parent.SetBackgroundColour( 'dark grey' )
#-------------------------------------------------------------------------
def initializeBitmaps(self):
self.path=self.pathToEPlatform+'multimedia/'
labelFiles = [ file for file in [ self.path+'icons/speller/special_characters.png', self.path+'icons/speller/DELETE.png', self.path+'icons/speller/TRASH.png', self.path+'icons/speller/CHECK.png',self.path+'icons/speller/ORISPEAK.png', self.path+'icons/speller/SPEAK.png', self.path+'icons/speller/exit.png', ] ]
self.labelBitmaps = { }
labelBitmapIndex = [ self.labels[ 0 ].index( self.labels[ 0 ][ -7 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -6 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -5 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -4 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -3 ] ),self.labels[ 0 ].index( self.labels[ 0 ][ -2 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -1 ] ) ]
for labelFilesIndex, labelIndex in enumerate( labelBitmapIndex ):
self.labelBitmaps[ self.labels[ 0 ][ labelIndex ] ] = wx.BitmapFromImage( wx.ImageFromStream( open( labelFiles[ labelFilesIndex ], 'rb' )) )
self.labelBitmaps2 = { }
labelBitmapIndex2 = [ self.labels[ 1 ].index( self.labels[ 1 ][ -6 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -5 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -4 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -3 ] ),self.labels[ 1 ].index( self.labels[ 1 ][ -2 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -1 ] ) ]
for labelFilesIndex2, labelIndex2 in enumerate( labelBitmapIndex2 ):
self.labelBitmaps2[ self.labels[ 1 ][ labelIndex2 ] ] = wx.BitmapFromImage( wx.ImageFromStream( open( labelFiles[ 1: ][ labelFilesIndex2 ], 'rb' )) )
#-------------------------------------------------------------------------
def createGui(self):
self.textField = wx.TextCtrl( self.parent, style = wx.TE_LEFT|wx.TE_RICH2, size = ( self.winWidth, 0.2 * self.winHeight ) )
self.textField.SetFont( wx.Font( 60, wx.SWISS, wx.NORMAL, wx.NORMAL ) )
self.parent.mainSizer.Add( self.textField, flag = wx.EXPAND | wx.TOP | wx.BOTTOM, border = 3 )
self.subSizers = [ ]
subSizer = wx.GridBagSizer( 3, 3 )
self.pomieszane=[]
for i in self.slowo:
self.pomieszane.append(self.labels[0].index(i))
shuffle(self.pomieszane)
#print self.pomieszane
for litera in self.pomieszane:
if self.pomieszane.count(litera) > 1:
self.pomieszane.remove(litera)
zakres=(self.numberOfRows[0]-1)* self.numberOfColumns[0] -1
print zakres
dodaj=np.random.randint(0,zakres,1)[0]
while dodaj in self.pomieszane:
dodaj=np.random.randint(0,zakres,1)[0]
self.pomieszane.append(dodaj)
slowoList=list(self.slowo)
shuffle(slowoList)
zmieszane_slowo= ''.join(slowoList)
#print zmieszane_slowo
for i in self.pomieszane:
self.labels[0][i]=zmieszane_slowo[-1]
zmieszane_slowo=zmieszane_slowo[:-1]
self.pomieszane.sort()
ile=0
for index_1, item in enumerate( self.labels[ 0 ][ :-7 ] ):
ile+=1
b = bt.GenButton( self.parent, -1, item , name = item+str(ile), size = ( 0.985*self.winWidth / self.numberOfColumns[ 0 ], 0.79 * self.winHeight / self.numberOfRows[ 0 ] ) )
b.SetFont( wx.Font( 100, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetBezelWidth( 3 )
if index_1 not in self.pomieszane:
b.SetBackgroundColour( 'grey' )
else:
b.SetBackgroundColour( self.backgroundColour )
if item in self.colouredLabels and self.vowelColour != 'False':
if index_1 not in self.pomieszane:
b.SetForegroundColour( 'grey' )
else:
b.SetForegroundColour( self.vowelColour )
else:
if index_1 not in self.pomieszane:
b.SetForegroundColour( 'grey' )
else:
b.SetForegroundColour( self.textColour )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer.Add( b, ( index_1 / self.numberOfColumns[ 0 ], index_1 % self.numberOfColumns[ 0 ] ), wx.DefaultSpan, wx.EXPAND )
for index_2, item in enumerate( self.labels[ 0 ][ -7 : ] ):
if item == 'SPECIAL_CHARACTERS':
b = bt.GenButton( self.parent, -1, item, name = item, size = ( 0.985*self.winWidth / self.numberOfColumns[ 0 ], 0.79 * self.winHeight / self.numberOfRows[ 0 ] ) )
b.SetFont( wx.Font( 100, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetForegroundColour( 'grey' )
b.SetBackgroundColour( 'grey' )
else:
b = bt.GenBitmapButton( self.parent, -1, bitmap = self.labelBitmaps[ item ] )
b.SetBackgroundColour( self.backgroundColour )
b.SetBezelWidth( 3 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
if index_2==3:
subSizer.Add( b, ( ( index_1 + index_2 +1) / self.numberOfColumns[ 0 ], ( index_1 + index_2+1 ) % self.numberOfColumns[ 0 ] ), (1,3), wx.EXPAND )
elif index_2>3:
subSizer.Add( b, ( ( index_1 + index_2 +3) / self.numberOfColumns[ 0 ], ( index_1 + index_2 +3) % self.numberOfColumns[ 0 ] ), wx.DefaultSpan, wx.EXPAND )
else:
subSizer.Add( b, ( ( index_1 + index_2+1 ) / self.numberOfColumns[ 0 ], ( index_1 + index_2 +1) % self.numberOfColumns[ 0 ] ), wx.DefaultSpan, wx.EXPAND )
self.subSizers.append( subSizer )
self.parent.mainSizer.Add( self.subSizers[ 0 ], proportion = 1, flag = wx.EXPAND )
self.parent.SetSizer( self.parent.mainSizer )
subSizer2 = wx.GridBagSizer( 3, 3 )
for index_1, item in enumerate( self.labels[ 1 ][ :-6 ] ):
b = bt.GenButton( self.parent, -1, item, name = item, size = ( 0.985*self.winWidth / self.numberOfColumns[ 1 ], 0.75 * self.winHeight / self.numberOfRows[ 1 ] ) )
b.SetFont( wx.Font( 100, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetBezelWidth( 3 )
b.SetBackgroundColour( self.backgroundColour )
b.SetForegroundColour( self.textColour )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer2.Add( b, ( index_1 / self.numberOfColumns[ 1 ], index_1 % self.numberOfColumns[ 1 ] ), wx.DefaultSpan, wx.EXPAND )
for index_2, item in enumerate( self.labels[ 1 ][ -6 : ] ):
b = bt.GenBitmapButton( self.parent, -1, bitmap = self.labelBitmaps2[ item ] )
b.SetBackgroundColour( self.backgroundColour )
b.SetBezelWidth( 3 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
if index_2==2:
subSizer2.Add( b, ( ( index_1 + index_2 +1) / self.numberOfColumns[ 1 ], ( index_1 + index_2 +1) % self.numberOfColumns[ 1 ] ), (1,4), wx.EXPAND )
elif index_2>2:
subSizer2.Add( b, ( ( index_1 + index_2 +4) / self.numberOfColumns[ 1], ( index_1 + index_2+4 ) % self.numberOfColumns[ 1 ] ), wx.DefaultSpan, wx.EXPAND )
else:
subSizer2.Add( b, ( ( index_1 + index_2+1 ) / self.numberOfColumns[ 1 ], ( index_1 + index_2 +1) % self.numberOfColumns[ 1 ] ), wx.DefaultSpan, wx.EXPAND )
self.subSizers.append( subSizer2 )
self.parent.mainSizer.Add( self.subSizers[ 1 ], proportion = 1, flag = wx.EXPAND )
self.parent.mainSizer.Show( item = self.subSizers[ 1 ], show = False, recursive = True )
self.parent.SetSizer( self.parent.mainSizer )
ikony=range(self.numberOfColumns[0]*self.numberOfRows[0]-8,self.numberOfColumns[0]*self.numberOfRows[0]-2)
self.ktore=self.pomieszane
for i in ikony:
self.ktore.append(i)
self.parent.Layout()
self.usuniete=[]
def onExit(self):
self.parent.PicNr-=1
self.parent.stoper2.Stop( )
self.parent.back()
def czytajLitere(self,litera):
time.sleep(1)
soundIndex = self.phoneLabels.index( [ item for item in self.phoneLabels if litera.swapcase() in item ][ 0 ] )
sound = self.sounds[ soundIndex ]
sound.play( )
self.parent.SetFocus()
#----------------------------------------------------------------------------
def onPress(self, event):
self.numberOfPresses += 1
if self.numberOfPresses == 1:
label = self.labels[ 0 ][self.ktore[self.kolejnyKrok-1]]
item = self.subSizers[ 0 ].GetChildren()
b = item[self.ktore[self.kolejnyKrok-1]]
b=b.GetWindow( )
if label != 'SPEAK':
b.SetBackgroundColour( self.selectionColour )
else:
pass
b.SetFocus( )
b.Update( )
if label in self.slowo:
self.typewriterKeySound.play()
self.textField.WriteText(label)
item = self.subSizers[ 0 ].GetChildren()
b = item[self.ktore[self.kolejnyKrok-1]]
b=b.GetWindow( )
b.SetBackgroundColour( 'grey' )
b.SetForegroundColour('grey')
b.SetFocus( )
b.Update( )
self.usuniete.append(self.ktore[self.kolejnyKrok-1])
self.ktore.remove( self.ktore[self.kolejnyKrok-1] )
self.kolejnyKrok=0
elif label == 'DELETE':
text=self.textField.GetValue()
if text:
self.typewriterForwardSound.play( )
item = self.subSizers[ 0 ].GetChildren()
b = item[self.usuniete[-1]]
b=b.GetWindow( )
b.SetBackgroundColour( self.backgroundColour)
if self.labels[0][self.usuniete[-1]] in self.colouredLabels:
b.SetForegroundColour( self.vowelColour )
else:
b.SetForegroundColour( self.textColour )
b.SetFocus( )
b.Update( )
self.ktore.append(self.usuniete[-1])
self.ktore.sort()
self.usuniete.remove( self.usuniete[-1] )
self.textField.Remove(self.textField.GetInsertionPoint()-1, self.textField.GetInsertionPoint())
self.kolejnyKrok=0
else:
pass
elif label == 'SPEAK':
if not self.voice:
self.voice=True
b.SetBackgroundColour('indian red')
b.SetFocus( )
b.Update()
else:
b.SetBackgroundColour(self.backgroundColour)
b.SetFocus( )
b.Update()
self.voice=False
elif label == 'ORISPEAK':
self.parent.stoper2.Stop()
if str(self.parent.word)+'.ogg' not in os.listdir(self.pathToEPlatform+'multimedia/spelling/'):
command='sox -m '+self.pathToEPlatform+'sounds/phone/'+list(self.parent.word)[0].swapcase()+'.wav'
ile=0
for l in list(self.parent.word)[1:]:
ile+=2
command+=' "|sox '+self.pathToEPlatform+'sounds/phone/'+l.swapcase()+'.wav'+' -p pad '+str(ile)+'"'
command+=' '+self.pathToEPlatform+'multimedia/spelling/'+self.parent.word+'.ogg'
wykonaj=sp.Popen(shlex.split(command))
time.sleep(1.5)
do_literowania=mixer.Sound(self.pathToEPlatform+'multimedia/spelling/'+self.parent.word+'.ogg')
do_literowania.play()
self.parent.stoper4.Start((do_literowania.get_length()+0.5 )* 1000)
elif label == 'TRASH':
text=self.textField.GetValue()
if text:
self.typewriterForwardSound.play()
self.textField.Remove(0,self.textField.GetInsertionPoint())
for litera in self.usuniete:
item = self.subSizers[ 0 ].GetChildren()
b = item[litera]
b=b.GetWindow( )
b.SetBackgroundColour( self.backgroundColour)
if self.labels[0][litera] in self.colouredLabels:
b.SetForegroundColour( self.vowelColour )
else:
b.SetForegroundColour( self.textColour )
#print self.usuniete,self.ktore
b.SetFocus( )
b.Update( )
while self.usuniete:
self.ktore.append(self.usuniete[-1])
self.ktore.sort()
self.usuniete.remove(self.usuniete[-1] )
self.kolejnyKrok=0
else:
pass
elif label == 'EXIT':
self.onExit( )
elif label =='CHECK':
self.parent.stoper2.Stop()
self.parent.ownWord=self.textField.GetValue()
self.parent.check()
else:
pass
else:
event.Skip( )
#-------------------------------------------------------------------------
def timerUpdate(self, event):
self.mouseCursor.move( self.winWidth - 12, self.winHeight - 20 )
self.numberOfPresses = 0
for i in self.ktore:
if self.voice and i == self.numberOfRows[0]*self.numberOfColumns[0]-4:
items = self.subSizers[ 0 ].GetChildren()
b = items[i]
b=b.GetWindow( )
b.SetBackgroundColour( 'indian red')
b.SetFocus( )
b.Update( )
else:
items = self.subSizers[ 0 ].GetChildren()
b = items[i]
b=b.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
b.Update( )
if self.voice and self.ktore[self.kolejnyKrok] == self.numberOfRows[0]*self.numberOfColumns[0]-4:
item = self.subSizers[ 0 ].GetChildren()
b = item[self.ktore[self.kolejnyKrok]]
b=b.GetWindow( )
b.SetBackgroundColour( 'orange red')
b.SetFocus( )
b.Update( )
else:
item = self.subSizers[ 0 ].GetChildren()
b = item[self.ktore[self.kolejnyKrok]]
b=b.GetWindow( )
b.SetBackgroundColour( self.scanningColour)
b.SetFocus( )
b.Update( )
if self.voice and self.labels[0][self.ktore[self.kolejnyKrok]] in self.slowo:
self.parent.stoper2.Stop()
label = self.labels[ 0 ][self.ktore[self.kolejnyKrok]]
self.czytajLitere(label)
self.parent.stoper2.Start(self.timeGap)
if self.kolejnyKrok == len(self.ktore)-1:
self.kolejnyKrok=0
else:
self.kolejnyKrok+=1
| bjura/EPlatform | spellerPuzzle.py | Python | gpl-3.0 | 24,523 |
import json
import argparse
import numpy
import sys
import copy
from astropy.coordinates import SkyCoord
from astropy import units
import operator
class Program(object):
def __init__(self, runid="16BP06", pi_login="gladman"):
self.config = {"runid": runid,
"pi_login": pi_login,
"program_configuration": {"mjdates": [],
"observing_blocks": [],
"observing_groups": []
}}
def add_target(self, target):
self.config["program_configuration"]["mjdates"].append(target)
def add_observing_block(self, observing_block):
self.config["program_configuration"]["observing_blocks"].append(observing_block)
def add_observing_group(self, observing_group):
self.config["program_configuration"]["observing_groups"].append(observing_group)
class Target(object):
def __init__(self, filename=None):
self.config = json.load(open(filename))
@property
def token(self):
return self.config["identifier"]["client_token"]
@property
def mag(self):
return self.config["moving_target"]["ephemeris_points"][0]["mag"]
@property
def coordinate(self):
return SkyCoord(self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["ra"],
self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["dec"],
unit='degree')
class ObservingBlock(object):
def __init__(self, client_token, target_token):
self.config = {"identifier": {"client_token": client_token},
"target_identifier": {"client_token": target_token},
"constraint_identifiers": [{"server_token": "C1"}],
"instrument_config_identifiers": [{"server_token": "I1"}]}
@property
def token(self):
return self.config["identifier"]["client_token"]
class ObservingGroup(object):
def __init__(self, client_token):
self.config = {"identifier": {"client_token": client_token},
"observing_block_identifiers": []}
def add_ob(self, client_token):
self.config["observing_block_identifiers"].append({"client_token": client_token})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ogname')
parser.add_argument('mjdates', nargs='+')
args = parser.parse_args()
# Break the mjdates into OBs based on their max mag of source in pointing.
cuts = numpy.array([23.0, 23.5, 24.0, 24.5, 25.0, 25.5, 26.0, 30.0])
IC_exptimes = [50, 100, 200, 300, 400, 500, 600, 700]
program = Program()
ob_tokens = []
mags = {}
ob_coordinate = {}
for filename in args.mjdates:
target = Target(filename)
program.add_target(target.config)
ob_token = "OB-{}-{}".format(target.token, target.mag)
ob = ObservingBlock(ob_token, target.token)
idx = (target.mag > cuts).sum() + 4
ob.config["instrument_config_identifiers"] = [{"server_token": "I{}".format(idx)}]
program.add_observing_block(ob.config)
ob_tokens.append(ob_token)
mags[ob_token] = target.mag
ob_coordinate[ob_token] = target.coordinate
sf = lambda x, y: cmp(x.ra, y.ra)
order_tokens = sorted(ob_coordinate, cmp=sf, key=ob_coordinate.get)
total_itime = 0
ogs = {}
scheduled = {}
og_idx = 0
while len(scheduled) < len(ob_tokens):
og_idx += 1
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, 0)
sys.stdout.write("{}: ".format(og_token))
og = ObservingGroup(og_token)
og_coord = None
og_itime = 0
for ob_token in order_tokens:
if ob_token not in scheduled:
if og_coord is None:
og_coord = ob_coordinate[ob_token]
if ob_coordinate[ob_token].separation(og_coord) > 30 * units.degree:
continue
og.add_ob(ob_token)
scheduled[ob_token] = True
sys.stdout.write("{} ".format(ob_token))
sys.stdout.flush()
idx = (mags[ob_token] > cuts).sum()
print ob_token, mags[ob_token], idx + 4
og_itime += IC_exptimes[idx] + 40
if og_itime > 3000.0:
break
break
total_itime += og_itime
sys.stdout.write(" {}s \n".format(og_itime))
program.add_observing_group(og.config)
nrepeats = 0
for repeat in range(nrepeats):
total_itime += og_itime
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, repeat + 1)
og = copy.deepcopy(og)
og.config["identifier"]["client_token"] = og_token
program.add_observing_group(og.config)
print "Total I-Time: {} hrs".format(total_itime/3600.)
json.dump(program.config, open('program.json', 'w'), indent=4, sort_keys=True)
| CFIS-Octarine/octarine | planning/ph2.py | Python | gpl-3.0 | 5,099 |
#-*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
app_name = "perso"
urlpatterns = [
url(r'^$', views.main, name='main'),
url(r'^(?P<pageId>[0-9]+)/?$', views.main, name='main'),
url(r'^about/?$', views.about, name='about'),
url(r'^contact/?$', views.contact, name='contact'),
url(r'^(?P<cat_slug>[-a-zA-Z0-9_]+)/?$', views.main, name='main'),
url(r'^(?P<cat_slug>[-a-zA-Z0-9_]+)/(?P<pageId>[0-9]+)/?$', views.main, name='main'),
url(r'^publication/(?P<slug>[-a-zA-Z0-9_]+)/?$', views.publication, name='publication'),
url(r'^tag/(?P<slug>[-a-zA-Z0-9_]+)/?$', views.tag, name='tag'),
]
| LeMinaw/minaw.net | perso/urls.py | Python | gpl-3.0 | 901 |
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
"""
Get the list of all the user files.
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
days = 0
months = 0
years = 0
wildcard = None
baseDir = ''
emptyDirsFlag = False
Script.registerSwitch( "D:", "Days=", "Match files older than number of days [%s]" % days )
Script.registerSwitch( "M:", "Months=", "Match files older than number of months [%s]" % months )
Script.registerSwitch( "Y:", "Years=", "Match files older than number of years [%s]" % years )
Script.registerSwitch( "w:", "Wildcard=", "Wildcard for matching filenames [All]" )
Script.registerSwitch( "b:", "BaseDir=", "Base directory to begin search (default /[vo]/user/[initial]/[username])" )
Script.registerSwitch( "e", "EmptyDirs", "Create a list of empty directories" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ...' % Script.scriptName, ] ) )
Script.parseCommandLine( ignoreErrors = False )
for switch in Script.getUnprocessedSwitches():
if switch[0] == "D" or switch[0].lower() == "days":
days = int( switch[1] )
if switch[0] == "M" or switch[0].lower() == "months":
months = int( switch[1] )
if switch[0] == "Y" or switch[0].lower() == "years":
years = int( switch[1] )
if switch[0].lower() == "w" or switch[0].lower() == "wildcard":
wildcard = switch[1]
if switch[0].lower() == "b" or switch[0].lower() == "basedir":
baseDir = switch[1]
if switch[0].lower() == "e" or switch[0].lower() == "emptydirs":
emptyDirsFlag = True
import DIRAC
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.List import sortList
from datetime import datetime, timedelta
import sys, os, time, fnmatch
fc = FileCatalog()
def isOlderThan( cTimeStruct, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
if cTimeStruct < maxCTime:
return True
return False
withMetadata = False
if days or months or years:
withMetadata = True
totalDays = 0
if years:
totalDays += 365 * years
if months:
totalDays += 30 * months
if days:
totalDays += days
res = getProxyInfo( False, False )
if not res['OK']:
gLogger.error( "Failed to get client proxy information.", res['Message'] )
DIRAC.exit( 2 )
proxyInfo = res['Value']
username = proxyInfo['username']
vo = ''
if 'group' in proxyInfo:
vo = getVOForGroup( proxyInfo['group'] )
if not baseDir:
if not vo:
gLogger.error( 'Could not determine VO' )
Script.showHelp()
baseDir = '/%s/user/%s/%s' % ( vo, username[0], username )
baseDir = baseDir.rstrip( '/' )
gLogger.info( 'Will search for files in %s' % baseDir )
activeDirs = [baseDir]
allFiles = []
emptyDirs = []
while len( activeDirs ) > 0:
currentDir = activeDirs.pop()
res = fc.listDirectory( currentDir, withMetadata, timeout = 360 )
if not res['OK']:
gLogger.error( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
elif currentDir in res['Value']['Failed']:
gLogger.error( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Value']['Failed'][currentDir] ) )
else:
dirContents = res['Value']['Successful'][currentDir]
subdirs = dirContents['SubDirs']
files = dirContents['Files']
if not subdirs and not files:
emptyDirs.append( currentDir )
gLogger.notice( '%s: empty directory' % currentDir )
else:
for subdir in sorted( subdirs, reverse = True ):
if ( not withMetadata ) or isOlderThan( subdirs[subdir]['CreationDate'], totalDays ):
activeDirs.append( subdir )
for filename in sorted( files ):
fileOK = False
if ( not withMetadata ) or isOlderThan( files[filename]['MetaData']['CreationDate'], totalDays ):
if wildcard is None or fnmatch.fnmatch( filename, wildcard ):
fileOK = True
if not fileOK:
files.pop( filename )
allFiles += sorted( files )
gLogger.notice( "%s: %d files%s, %d sub-directories" % ( currentDir, len( files ), ' matching' if withMetadata or wildcard else '', len( subdirs ) ) )
outputFileName = '%s.lfns' % baseDir.replace( '/%s' % vo, '%s' % vo ).replace( '/', '-' )
outputFile = open( outputFileName, 'w' )
for lfn in sortList( allFiles ):
outputFile.write( lfn + '\n' )
outputFile.close()
gLogger.notice( '%d matched files have been put in %s' % ( len( allFiles ), outputFileName ) )
if emptyDirsFlag:
outputFileName = '%s.emptydirs' % baseDir.replace( '/%s' % vo, '%s' % vo ).replace( '/', '-' )
outputFile = open( outputFileName, 'w' )
for dir in sortList( emptyDirs ):
outputFile.write( dir + '\n' )
outputFile.close()
gLogger.notice( '%d empty directories have been put in %s' % ( len( emptyDirs ), outputFileName ) )
DIRAC.exit( 0 )
| vmendez/DIRAC | DataManagementSystem/scripts/dirac-dms-user-lfns.py | Python | gpl-3.0 | 5,259 |
#!/usr/bin/env python
"""
@file costFunctionChecker.py
@author Michael Behrisch
@author Daniel Krajzewicz
@author Jakob Erdmann
@date 2009-08-31
@version $Id: costFunctionChecker.py 13811 2013-05-01 20:31:43Z behrisch $
Run duarouter repeatedly and simulate weight changes via a cost function.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, sys, subprocess, types
from datetime import datetime
from optparse import OptionParser
from xml.sax import make_parser, handler
def call(command, log):
if not isinstance(args, types.StringTypes):
command = [str(c) for c in command]
print >> log, "-" * 79
print >> log, command
log.flush()
retCode = subprocess.call(command, stdout=log, stderr=log)
if retCode != 0:
print >> sys.stderr, "Execution of %s failed. Look into %s for details." % (command, log.name)
sys.exit(retCode)
def writeRouteConf(step, options, file, output):
fd = open("iteration_" + str(step) + ".duarcfg", "w")
print >> fd, """<configuration>
<input>
<net-file value="%s"/>""" % options.net
if step==0:
if options.flows:
print >> fd, ' <flow-definition value="%s"/>' % file
else:
print >> fd, ' <trip-defs value="%s"/>' % file
else:
print >> fd, ' <alternatives value="%s"/>' % file
print >> fd, ' <weights value="dump_%s_%s.xml"/>' % (step-1, options.aggregation)
print >> fd, """ </input>
<output>
<output-file value="%s"/>
<exit-times value="True"/>
</output>""" % output
print >> fd, """ <processing>
<continue-on-unbuild value="%s"/>
<expand-weights value="True"/>
<gBeta value="%s"/>
<gA value="%s"/>
</processing>""" % (options.continueOnUnbuild, options.gBeta, options.gA)
print >> fd, ' <random_number><abs-rand value="%s"/></random_number>' % options.absrand
print >> fd, ' <time><begin value="%s"/>' % options.begin,
if options.end:
print >> fd, '<end value="%s"/>' % options.end,
print >> fd, """</time>
<report>
<verbose value="%s"/>
<suppress-warnings value="%s"/>
</report>
</configuration>""" % (options.verbose, options.noWarnings)
fd.close()
class RouteReader(handler.ContentHandler):
def __init__(self):
self._edgeWeights = {}
self._maxDepart = 0
def startElement(self, name, attrs):
if name == 'route':
for edge in attrs['edges'].split():
if not edge in self._edgeWeights:
self._edgeWeights[edge] = 0
self._edgeWeights[edge] += 1
elif name == 'vehicle':
if float(attrs['depart']) > self._maxDepart:
self._maxDepart = float(attrs['depart'])
def getWeight(self, edge):
return self._edgeWeights.get(edge, 0)
def getMaxDepart(self):
return self._maxDepart
class NetReader(handler.ContentHandler):
def __init__(self):
self._edges = []
def startElement(self, name, attrs):
if name == 'edge':
if not attrs.has_key('function') or attrs['function'] == 'normal':
self._edges.append(attrs['id'])
def getEdges(self):
return self._edges
def identity(edge, weight):
return weight
def generateWeights(step, options, edges, weights, costFunction):
fd = open("dump_%s_%s.xml" % (step, options.aggregation), "w")
print >> fd, '<?xml version="1.0"?>\n<netstats>'
for time in range(0, int(reader.getMaxDepart()+1), options.aggregation):
print >> fd, ' <interval begin="%s" end="%s" id="dump_%s">' % (time, time + options.aggregation, options.aggregation)
for edge in edges:
cost = costFunction(edge, weights.getWeight(edge))
if cost != None:
print >> fd, ' <edge id="%s" traveltime="%s"/>' % (edge, cost)
print >> fd, ' </interval>'
print >> fd, '</netstats>'
fd.close()
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
optParser.add_option("-C", "--continue-on-unbuild", action="store_true", dest="continueOnUnbuild",
default=False, help="continues on unbuild routes")
optParser.add_option("-w", "--disable-warnings", action="store_true", dest="noWarnings",
default=False, help="disables warnings")
optParser.add_option("-n", "--net-file", dest="net",
help="SUMO network (mandatory)", metavar="FILE")
optParser.add_option("-t", "--trips", dest="trips",
help="trips in step 0 (this or flows is mandatory)", metavar="FILE")
optParser.add_option("-F", "--flows",
help="flows in step 0 (this or trips is mandatory)", metavar="FILE")
optParser.add_option("-+", "--additional", dest="additional",
default="", help="Additional files")
optParser.add_option("-b", "--begin", dest="begin",
type="int", default=0, help="Set simulation/routing begin [default: %default]")
optParser.add_option("-e", "--end", dest="end",
type="int", help="Set simulation/routing end [default: %default]")
optParser.add_option("-R", "--route-steps", dest="routeSteps",
type="int", default=200, help="Set simulation route steps [default: %default]")
optParser.add_option("-a", "--aggregation", dest="aggregation",
type="int", default=900, help="Set main weights aggregation period [default: %default]")
optParser.add_option("-A", "--gA", dest="gA",
type="float", default=.5, help="Sets Gawron's Alpha [default: %default]")
optParser.add_option("-B", "--gBeta", dest="gBeta",
type="float", default=.9, help="Sets Gawron's Beta [default: %default]")
optParser.add_option("-f", "--first-step", dest="firstStep",
type="int", default=0, help="First DUA step [default: %default]")
optParser.add_option("-l", "--last-step", dest="lastStep",
type="int", default=50, help="Last DUA step [default: %default]")
optParser.add_option("-p", "--path", dest="path",
default=os.environ.get("SUMO_BINDIR", ""), help="Path to binaries [default: %default]")
optParser.add_option("-y", "--absrand", dest="absrand", action="store_true",
default=False, help="use current time to generate random number")
optParser.add_option("-c", "--cost-function", dest="costfunc",
default="identity", help="(python) function to use as cost function")
(options, args) = optParser.parse_args()
if not options.net or not (options.trips or options.flows):
optParser.error("At least --net-file and --trips or --flows have to be given!")
duaBinary = os.environ.get("DUAROUTER_BINARY", os.path.join(options.path, "duarouter"))
log = open("dua-log.txt", "w+")
parser = make_parser()
reader = NetReader()
parser.setContentHandler(reader)
parser.parse(options.net)
edges = reader.getEdges()
if "." in options.costfunc:
idx = options.costfunc.rfind(".")
module = options.costfunc[:idx]
func = options.costfunc[idx+1:]
exec("from %s import %s as costFunction" % (module, func))
else:
exec("costFunction = %s" % options.costfunc)
if options.flows:
tripFiles = options.flows.split(",")
else:
tripFiles = options.trips.split(",")
starttime = datetime.now()
for step in range(options.firstStep, options.lastStep):
btimeA = datetime.now()
print "> Executing step " + str(step)
# router
files = []
for tripFile in tripFiles:
file = tripFile
tripFile = os.path.basename(tripFile)
if step>0:
file = tripFile[:tripFile.find(".")] + "_%s.rou.alt.xml" % (step-1)
output = tripFile[:tripFile.find(".")] + "_%s.rou.xml" % step
print ">> Running router with " + file
btime = datetime.now()
print ">>> Begin time: %s" % btime
writeRouteConf(step, options, file, output)
retCode = call([duaBinary, "-c", "iteration_%s.duarcfg" % step], log)
etime = datetime.now()
print ">>> End time: %s" % etime
print ">>> Duration: %s" % (etime-btime)
print "<<"
files.append(output)
# generating weights file
print ">> Generating weights"
reader = RouteReader()
parser.setContentHandler(reader)
for f in files:
parser.parse(f)
generateWeights(step, options, edges, reader, costFunction)
print "<<"
print "< Step %s ended (duration: %s)" % (step, datetime.now() - btimeA)
print "------------------\n"
sys.stdout.flush()
print "dua-iterate ended (duration: %s)" % (datetime.now() - starttime)
log.close()
| rudhir-upretee/Sumo17_With_Netsim | tools/assign/costFunctionChecker.py | Python | gpl-3.0 | 8,986 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datapoint/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| jacobtomlinson/datapoint-python | datapoint/_version.py | Python | gpl-3.0 | 18,444 |
# -*- coding: utf-8 -*-
"""
Common structures and functions used by other scripts.
"""
from xml.etree import cElementTree as ET
str_to_entailment = {'none': 0,
'entailment': 1,
'paraphrase': 2}
entailment_to_str = {v: k for k, v in str_to_entailment.items()}
class Pair(object):
'''
Class representing a pair of texts from SICK or RTE.
It is meant to be used as an abstract representation for both.
'''
def __init__(self, t, h, id_, entailment, similarity):
'''
:param t: string with the text
:param h: string with the hypothesis
:param id_: int indicating id in the original file
:param entailment: int indicating entailment class
:param similarity: float
'''
self.t = t
self.h = h
self.id = id_
self.entailment = entailment
self.similarity = similarity
def read_xml(filename, need_labels):
'''
Read an RTE XML file and return a list of Pair objects.
:param filename: name of the file to read
:param need_labels: boolean indicating if labels should be present
'''
pairs = []
tree = ET.parse(filename)
root = tree.getroot()
for xml_pair in root.iter('pair'):
t = xml_pair.find('t').text
h = xml_pair.find('h').text
attribs = dict(xml_pair.items())
id_ = int(attribs['id'])
if 'entailment' in attribs:
ent_string = attribs['entailment'].lower()
try:
ent_value = str_to_entailment[ent_string]
except ValueError:
msg = 'Unexpected value for attribute "entailment" at pair {}: {}'
raise ValueError(msg.format(id_, ent_string))
else:
ent_value = None
if 'similarity' in attribs:
similarity = float(attribs['similarity'])
else:
similarity = None
if need_labels and similarity is None and ent_value is None:
msg = 'Missing both entailment and similarity values for pair {}'.format(id_)
raise ValueError(msg)
pair = Pair(t, h, id_, ent_value, similarity)
pairs.append(pair)
return pairs
| nathanshartmann/portuguese_word_embeddings | sentence_similarity/utils/commons.py | Python | gpl-3.0 | 2,224 |
from edges import EdgeExtractor
from extractor import Extractor
from parambfs import ParamExtractor | daajoe/trellis | trellis/extractor/__init__.py | Python | gpl-3.0 | 99 |
"""
.. module: FSRStools.rraman
:platform: Windows
.. moduleauthor:: Daniel Dietze <[email protected]>
Resonance Raman excitation profile calculation based on the time-domain picture of resonance Raman. See Myers and Mathies in *Biological Applications of Raman Spectroscopy*, Vol. 2, pp. 1-58 (John Wiley and Sons, New York, 1987) for details (referred to as Myers in the following). The code is mainly based on Myers' Fortran 77 code (see Appendix of PhD Thesis of K. M. Spillane, 2011, UC Berkeley for source code).
**Changelog:**
*10-7-2015:*
- Added / modified functions for calculating fluorescence spectra.
- Added a convenience function to calculate Raman spectra from a set of excitation profiles.
- Added some more damping functions and phenomenological support for Stokes shift in simple homogeneous damping function.
*10-21-2015:*
- Some bug fixes concerning the prefactors and the normalization of the fluorescence spectra.
- Fixed a bug regarding the Raman overlaps.
**Example Code**
Here is a short example calculating Myers' *Gedankenmolecule* from Myers and Mathies::
import numpy as np
import FSRStools.rraman as rr
# parameters:
# -----------
# displacements
D = np.array([1.27, 0.3, 0.7, 0.53])
# ground state frequencies
RMg = np.array([1550.0, 1300.0, 1150.0, 1000.0])
# excited state frequencies
RMe = np.array([1550.0, 1300.0, 1150.0, 1000.0])
# electronic zero-zero energy
E0 = 20700.0
# homogeneous linewidth and shape parameter
Gamma = 200.0
halpha = 0
# inhomogeneous linewidth and shape parameter
sig = 400.0
ialpha = 1
# electronic transition dipole length
M = 0.8
# index of refraction of surrounding medium
IOR = 1.0
# time axis parameters for integrations
tmax = 5000
dt = 0.2
# just calculate fundamentals
nquanta = np.identity(len(RMg))
sshift = np.dot(nquanta, RMg)
# calculation part
# ----------------
# create axes
t, wn = rr.getAxes(tmax, dt)
# zero-zero energy and damping
# add here all time domain stuff
TDpart = rr.getHomogeneousDamping(t, Gamma, halpha)
# time dependent overlap integrals
OVLPS = rr.getOverlaps(t, D, RMg, RMe, nquanta)
# calculate cross-sections
sigmaA, sigmaR, kF = rr.getCrossSections(t, wn, E0, OVLPS, sshift, M, IOR, TDpart, sig, ialpha)
..
This file is part of the FSRStools python module.
The FSRStools python module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The FSRStools python module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the FSRStools python module. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014, 2015 Daniel Dietze <[email protected]>.
"""
import numpy as np
# some constants
hbar = 5308.880986 #: Planck's constant over 2 pi, hbar, in `cm-1 fs`
c0 = 2.99792458e-5 #: speed of light in `cm / fs`
kB = 0.695 #: Boltzman's constant in `cm-1 / K`
# -------------------------------------------------------------------------------------------------------------------
# some useful functions
def radperfs2wn(w):
"""Angular frequency (rad / fs) to wavenumber (cm-1).
"""
return hbar * w
def wn2radperfs(e):
"""Wavenumber (cm-1) to angular frequency (rad / fs).
"""
return e / hbar
def wn2lambda(w):
"""Convert wavenumber (cm-1) to wavelength (nm).
"""
return 1e7 / w
def lambda2wn(w):
"""Convert wavelength (nm) to wavenumber (cm-1).
"""
return 1e7 / w
def getWnIndex(wn, wn0):
"""Get the index into an array of wavenumbers wn with wavenumber closest to wn0. Use this function for :py:func:`getRamanSpectrum`.
"""
if np.amin(wn) > wn0 or np.amax(wn) < wn0:
print "Warning: wn0 lies outside of wn."
return np.argmin(np.absolute(wn - wn0))
def getAxes(tmax, dt):
"""Create time and frequency axes for the resonance Raman calculations.
:param float tmax: Endpoint for time domain calculation (fs). This value should be high enough to capture the full dephasing.
:param float dt: Increment of time axis (fs). This value should be small enough to capture the highest vibronic feature in the excited state.
:returns: Time axis (fs) and frequency axis (cm-1).
"""
t = np.arange(0, tmax + dt, dt)
numPoints = len(t)
wn = np.arange(numPoints) / (c0 * dt * numPoints)
return t, wn
def molarExtinction2AbsCS(eSpctr, IOR):
"""Convert molar extinction (cm-1 / M) to molecular absorption cross section (A**2 / molec).
See McHale, Resonance Raman Spectroscopy, Wiley, (2002), p. 545 or Myers & Mathies for details. The absorption cross section in solution has to be scaled by index of refraction unless the molar extinction has not been corrected.
:param array eSpctr: Extinction spectrum in (cm-1 / M).
:param float IOR: Index of refraction of surrounding solvent / medium.
:returns: Absorption spectrum in units of (A**2 / molec.), same shape as eSpcrt.
"""
return 1e3 * np.log(10.0) * eSpctr / 6.0221e23 * 1e8 * 1e8 / IOR
def diff2absRamanCS(diffRaCS, rho):
"""Convert the differential Raman cross section (A**2/molec sr) to absolute Raman cross section in (A**2 / molec) for a given depolarization ratio rho.
:param float diffRaCS: Differential Raman cross section (A**2/molec sr).
:param float rho: Associated depolarization ratio of this Raman mode.
:returns: Absolute Raman cross section in (A**2 / molec).
"""
return 8.0 * np.pi / 3.0 * (1.0 + 2.0 * rho) / (1.0 + rho) * diffRaCS
def getRamanSpectrum(wn, iEL, RMg, nquanta, sigmaR, dw=10.0, alpha=0):
"""
Convenience function to calculate the Raman spectrum. The spectrum is scattered power per infinitesimal frequency normalized to incident power times molecular density (cm-3) times path length (cm). See Myers, *Chem. Phys.* **180**, 215 (1994), Eq. 7 for details.
:param array wn: Wavenumber axis (Stokes shift, not electronic).
:param int iEL: Index into sigmaR corresponding to the pump energy of the laser.
:param array RMg: Ground state Raman frequencies
:param array nquanta: M x N array containing the quanta of the N possible Raman modes for the M Raman lines to calculate. Use :py:func:`numpy.identity` to just calculate the fundamentals. Possible values are 0, 1, 2.
:param array sigmaR: Array of M Raman cross sections that have been calculated by :py:func:`getCrossSections` (in A**2 / molec).
:param float dw: Phenomenological FWHM linewidth of the Raman lines in cm-1 (default = 10 cm-1).
:param float alpha: Line shape parameter to be used for the Raman spectrum:
- 1 = Gaussian
- 0 = Lorentzian (default)
:returns: Calculated Raman spectrum (same shape as wn).
"""
spectrum = np.zeros(len(wn))
if iEL < 0 or iEL >= len(sigmaR[0]):
print "Error: iEL is out of range!"
return spectrum
# iterate over all M modes
for i, nM in enumerate(nquanta):
# get frequency of this mode
wR = np.sum(nM * RMg)
# add Lorentzian part of lineshape
spectrum = spectrum + (1.0 - alpha) * sigmaR[i][iEL] * 1e-16 * (dw / (2.0 * np.pi * ((wn - wR)**2 + dw**2 / 4.0)))
# add Gaussian part of lineshape
spectrum = spectrum + alpha * sigmaR[i][iEL] * 1e-16 * ((2.0 * np.sqrt(np.log(2) / np.pi)) / dw * np.exp(-4.0 * np.log(2.0) * (wn - wR)**2 / dw**2))
return spectrum
# -----------------------------------------------------------------------------------------------------------------------------------
# time dependent overlap integrals with equal ground and excited state vibrational frequencies
# the t00 overlap does not contain the factors exp(-1j wVIB t) nor exp(-1j E0/hbar t) as these are taken care of when assembling the cross section
# Myers eqs. (37) - (39)
# Delta = displacement in dimensionless coordinates
# eVIB = vibrational frequency (cm-1)
# t = time axis in fs
def t00A(t, Delta, eVIB):
"""Time dependent overlap integral between vibrational ground states of electronic ground and excited state with equal ground and excited state vibrational frequencies.
:param array t: Time axis in (fs).
:param float Delta: Displacement of excited state potential energy surface along this vibrational coordinate in dimensionless coordinates.
:param float eVIB: Vibrational frequency (cm-1).
:returns: 0-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (37) - (39).
"""
# The 0-0 overlap does not contain the factors :math:`e^{-j w_{VIB} t}` nor :math:`e^{-j E_0 / \\hbar t}` as these are taken care of when assembling the cross section.
return np.exp(-Delta**2 / 2.0 * (1.0 - np.exp(-1j * eVIB / hbar * t)))
def t10A(t, Delta, eVIB):
"""Time dependent overlap integral between vibrational ground and first excited state of electronic ground and excited state with equal ground and excited state vibrational frequencies.
:param array t: Time axis in (fs).
:param float Delta: Displacement of excited state potential energy surface along this vibrational coordinate in dimensionless coordinates.
:param float eVIB: Vibrational frequency (cm-1).
:returns: 1-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (37) - (39).
"""
return Delta / np.sqrt(2) * (np.exp(-1j * eVIB / hbar * t) - 1.0) # * t00A(t, Delta, eVIB)
def t20A(t, Delta, eVIB):
"""Time dependent overlap integral between vibrational ground and second excited state of electronic ground and excited state with equal ground and excited state vibrational frequencies.
:param array t: Time axis in (fs).
:param float Delta: Displacement of excited state potential energy surface along this vibrational coordinate in dimensionless coordinates.
:param float eVIB: Vibrational frequency (cm-1).
:returns: 2-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (37) - (39).
"""
return Delta**2 / (2 * np.sqrt(2)) * (np.exp(-1j * eVIB / hbar * t) - 1.0)**2 # * t00A(t, Delta, eVIB)
# -------------------------------------------------------------------------------------------------------------------------------------------------
# same with different frequency in ground and excited state
# Myers eqs. (42) - (44)
# Delta = displacement in dimensionless coordinates
# eg = ground state vibrational frequency (cm-1)
# ee = excited state vibrational frequency (cm-1)
# t = time axis in fs
def t00B(t, Delta, eg, ee):
"""Time dependent overlap integral between vibrational ground states of electronic ground and excited state with different ground and excited state vibrational frequencies.
:param array t: Time axis in (fs).
:param float Delta: Displacement of excited state potential energy surface along this vibrational coordinate in dimensionless coordinates.
:param float eg: Vibrational frequency in the ground state (cm-1).
:param float ee: Vibrational frequency in the excited state (cm-1).
:returns: 0-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (42) - (44).
"""
wg = eg / hbar
we = ee / hbar
swe = np.sin(we * t)
cwe = np.cos(we * t)
pt = we / wg * Delta * swe
qt = Delta * (1 - cwe)
# the log reduces to 0.5 * eg / hbar * t when eg = ee
# this is the factor that is taken out in the t00A case, as it cancels with the exp in the integral later on
# however, np.log returns values such that -pi < arg(log(..)) < pi
gt = 1j / 2.0 * np.log(1j * wg / we * swe + cwe) + pt * (qt - Delta) / 2.0 # skip -E0 t / hbar
# gt = gt + wg * t / 2.0 # add +w t / 2 using ground state frequency as this compensates the -w t / 2.0 term coming from the FFT
# add the following term to recover t00A for eg = ee
gt = gt - 1j / 2.0 * np.log(1j * np.sin(wg * t) + np.cos(wg * t))
at = -0.5 * 1j * (1j * cwe - (we / wg) * swe) / (1j * (wg / we) * swe + cwe)
a = at + 0.5
pp = pt - 2.0 * 1j * at * qt
gp = 1j * at * qt**2 - pt * qt + gt
return a**(-0.5) * np.exp(-pp**2 / (4.0 * a)) * np.exp(1j * gp)
def t10B(t, Delta, eg, ee):
"""Time dependent overlap integral between vibrational ground and first excited state of electronic ground and excited state with different ground and excited state vibrational frequencies.
:param array t: Time axis in (fs).
:param float Delta: Displacement of excited state potential energy surface along this vibrational coordinate in dimensionless coordinates.
:param float eg: Vibrational frequency in the ground state (cm-1).
:param float ee: Vibrational frequency in the excited state (cm-1).
:returns: 1-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (42) - (44).
"""
wg = eg / hbar
we = ee / hbar
swe = np.sin(we * t)
cwe = np.cos(we * t)
pt = we / wg * Delta * swe
qt = Delta * (1 - cwe)
at = -0.5 * 1j * (1j * cwe - (we / wg) * swe) / (1j * (wg / we) * swe + cwe)
a = at + 0.5
pp = pt - 2.0 * 1j * at * qt
return 2**(-0.5) * pp / (1j * a) # * t00B(t, Delta, eg, ee)
def t20B(t, Delta, eg, ee):
"""Time dependent overlap integral between vibrational ground and second excited state of electronic ground and excited state with different ground and excited state vibrational frequencies.
:param array t: Time axis in (fs).
:param float Delta: Displacement of excited state potential energy surface along this vibrational coordinate in dimensionless coordinates.
:param float eg: Vibrational frequency in the ground state (cm-1).
:param float ee: Vibrational frequency in the excited state (cm-1).
:returns: 2-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (42) - (44).
"""
wg = eg / hbar
we = ee / hbar
swe = np.sin(we * t)
cwe = np.cos(we * t)
pt = we / wg * Delta * swe
qt = Delta * (1 - cwe)
at = -0.5 * 1j * (1j * cwe - (we / wg) * swe) / (1j * (wg / we) * swe + cwe)
a = at + 0.5
pp = pt - 2.0 * 1j * at * qt
return -8**(-0.5) * (pp**2 / a**2 + 2. * (1. - 1. / a)) # * t00B(t, Delta, eg, ee)
# ----------------------------------------------------------------------------------------------------------------------------------
# same for linear dissociative excited state surfaces
# Myers eqs. (52) - (54)
# beta = slope of potential energy surface (dV / dq) in cm-1 (q is dimensionless coordinate)
# eVIB = vibrational frequency (cm-1)
def t00D(t, beta, eVIB):
"""Time dependent overlap integral between vibrational ground states of electronic ground and excited state with a linear dissociative excited state surface along this vibrational coordinate.
:param array t: Time axis in (fs).
:param float beta: Slope of excited state potential energy surface (dV / dq) in (cm-1) (q is dimensionless coordinate).
:param float eVIB: Vibrational frequency (cm-1).
:returns: 0-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (52) - (54).
"""
tmp = (1.0 + 1j * eVIB / hbar * t / 2.0)**(-0.5) * np.exp(-beta**2 * (6 * t**2 + 1j * eVIB / hbar * t**3) / (24 * hbar**2))
tmp = tmp * np.exp(1j * eVIB / hbar * t / 2.0) # add this term to compensate for the -1j w t / 2 term coming from the FFt
return tmp
def t10D(t, beta, eVIB):
"""Time dependent overlap integral between vibrational ground and first excited state of electronic ground and excited state with a linear dissociative excited state surface along this vibrational coordinate.
:param array t: Time axis in (fs).
:param float beta: Slope of excited state potential energy surface (dV / dq) in (cm-1) (q is dimensionless coordinate).
:param float eVIB: Vibrational frequency (cm-1).
:returns: 1-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (52) - (54).
"""
return -1j * 2**(-0.5) * (beta * t / hbar) # * t00D(t, beta, eVIB)
def t20D(t, beta, eVIB):
"""Time dependent overlap integral between vibrational ground and second excited state of electronic ground and excited state with a linear dissociative excited state surface along this vibrational coordinate.
:param array t: Time axis in (fs).
:param float beta: Slope of excited state potential energy surface (dV / dq) in (cm-1) (q is dimensionless coordinate).
:param float eVIB: Vibrational frequency (cm-1).
:returns: 2-0 overlap integral as function of time (same shape as t).
.. seealso:: Myers, Eqs. (52) - (54).
"""
return -2**(-0.5) * (beta**2 * t**2 / (2.0 * hbar**2) - 1j * eVIB / hbar * t / (2.0 + 1j * eVIB / hbar * t)) # * t00D(t, beta, eVIB)
# ---------------------------------------------------------------------------------------------------------------------------------
def getOverlaps(t, D, RMg, RMe, nquanta):
"""Calculate the time dependent overlap integrals / Franck-Condon factors :math:`<i|i(t)>_k` and :math:`<f|i(t)>_k`.
.. versionchanged:: 10-07-2015
Format of return value changed.
:param array t: Time axis in (fs).
:param array D: Array of N normalized displacements of excited state surfaces (deltas), or slope of linear dissociative excited state surface.
:param array RMg: N Raman ground state frequencies (cm-1).
:param array RMe: N Raman excited state frequencies (cm-1) or -1 if excited state surface is dissociative.
:param array nquanta: M x N array containing the quanta of the N possible Raman modes for the M Raman lines to calculate. Use :py:func:`numpy.identity` to just calculate the fundamentals. Possible values are 0 (no excitation), 1 (fundamental), 2 (first overtone).
:returns: M + 2 - dimensional array containing the Rayleigh, fluorescence and M Raman overlaps.
"""
ovlps = []
N = len(D)
M = nquanta.shape[0]
# Frank-Condon factors <i|i(t)>_k and <f|i(t)>_k
FC0 = []
FC0p = []
FC1 = []
FC2 = []
for i in range(N):
if(RMg[i] == RMe[i]):
FC0.append(t00A(t, D[i], RMg[i]))
FC0p.append(FC0[-1]) # fluorescence overlap is identical to absorption overlap when frequencies are equal
FC1.append(t10A(t, D[i], RMg[i]))
FC2.append(t20A(t, D[i], RMg[i]))
elif(RMe[i] == -1):
FC0.append(t00D(t, D[i], RMg[i]))
FC0p.append(np.zeros(len(t))) # fluorescence is negligible from dissociative surface
FC1.append(t10D(t, D[i], RMg[i]))
FC2.append(t20D(t, D[i], RMg[i]))
else:
FC0.append(t00B(t, D[i], RMg[i], RMe[i]))
FC0p.append(t00B(t, D[i], RMe[i], RMg[i])) # fluorescence overlap has excited state and ground state Raman frequencies switched
FC1.append(t10B(t, D[i], RMg[i], RMe[i]))
FC2.append(t20B(t, D[i], RMg[i], RMe[i]))
# go to numpy array..
FC0 = np.array(FC0)
FC0p = np.array(FC0p)
FC1 = np.array(FC1)
FC2 = np.array(FC2)
# Rayleigh / absorption overlap
oabs = 1.0 + 0.0 * 1j # reuse this term for the raman overlaps
for i in range(N):
oabs = oabs * FC0[i]
ovlps.append(oabs)
# fluorescence overlap
o = 1.0 + 0.0 * 1j
for i in range(N):
o = o * FC0p[i]
ovlps.append(o)
# actual Raman overlaps
for j in range(M):
o = 1.0 * oabs # all raman modes are based on this product and additional terms given by the excited modes
for i in range(N):
if(nquanta[j][i] == 1):
o = o * FC1[i]
elif(nquanta[j][i] == 2):
o = o * FC2[i]
ovlps.append(o)
return ovlps
# ---------------------------------------------------------------------------------------------------------------------------------
def getZeroZeroEnergy(t, E0):
"""Calculate the oscillation term in the time domain due to the electronic zero-zero energy E0.
:param array t: Time axis (fs).
:param float E0: Difference between excited and ground state vibrational ground state energies, *zero-zero energy* (cm-1).
"""
return np.exp(-1j * E0 / hbar * t)
# -----------------------------------------------------------------------------------------------------------------------------
# Calculate the damping terms as function of time t.
def getHomogeneousDamping(t, Gamma, alpha=0, lmbda=0):
"""Calculates the damping term arising from the homogeneous linewidth of the electronic transition. Offers phenomenological support for Stokes shift.
.. note:: Added phenomenological Stokes shift to input parameters on 10-12-2015. See for example *New J Phys* **11**, 015001 (2009), Eqs. (1) and (2).
:param array t: Time axis (fs).
:param float Gamma: Decay rate according to :math:`1 / \\tau` in (cm-1), where :math:`tau` is exponential dephasing time.
:param float alpha: Line shape parameter:
- 1 = Gaussian
- 0 = Lorentzian
:param float lmbda: Phenomenological Stokes shift (cm-1) which is added as imaginary part to g(t). Compared to the Brownian oscillator models, lmbda **is** the observed Stokes shift. (default = 0)
:returns: Damping term in the time domain, :math:`e^{-g(t) - i \lambda t / 2 \hbar}`.
"""
g = alpha * (Gamma**2 / hbar**2 * t**2) + (1 - alpha) * (Gamma / hbar * t) + 1j * lmbda / 2.0 * t / hbar
return np.exp(-g)
def getKuboDamping(t, Delta, Lambda):
"""Calculates the damping term using Kubo's *stochastic model*. This model describes the broadening, but does not yield solvent induced Stokes shifts.
:param array t: Time axis (fs).
:param float Delta: Magnitude of solvent energy gap fluctuations (cm-1). This parameter also controls the effective line shape:
- Delta >> Lambda = Lorentzian
- Delta << Lambda = Gaussian
:param float Lambda: Effective frequency of solvent fluctuations (cm-1).
:returns: Damping term in the time domain, :math:`e^{-g(t)}`.
.. seealso:: Myers, *J. Raman. Spectrosc.* **28**, 389 (1997)
"""
return np.exp(-(Delta / Lambda)**2 * (np.exp(-Lambda / hbar * t) + Lambda / hbar * t - 1.0))
def getBrownianDamping(t, kappa, T, egamma, cutoff=1e-6):
"""Calculate the damping term using Mukamel's Brownian oscillator model based on Myers Fortran code. The real part of g(t) leads to a Gaussian broadening of the spectra, while the imaginary part leads to a solvent induced Stokes shift.
:param array t: Time axis (fs).
:param float kappa: Lineshape parameter:
- kappa >> 1 = Lorentzian,
- kappa << 1 = Gaussian.
:param float T: Temperature in K.
:param float egamma: Electronic homogeneous linewidth (**FWHM**, cm-1).
:param float cutoff: Cutoff for sum over Brownian oscillators. Typically between 1e-6 (default) and 1e-8. Check for convergence by re-running with different values.
:returns: Damping term in the time domain, :math:`e^{-g(t)}`.
.. seealso:: Myers, *J. Raman. Spectrosc.* **28**, 389 (1997)
"""
temp = np.absolute(T)
# ----------------------------------------------------------
# 1: derive Mukamel's parameters from kappa, temp and egamma
# I do not have a reference for this part - it's taken from Myers fortran code
# Boltzmann beta
beta = 1.0 / (kB * temp) # 1/cm-1
# some 'a' parameter (this comes from Myers Fortran program)
a = (2.355 + 1.76 * kappa) / (1.0 + 0.85 * kappa + 0.88 * kappa**2)
# these are Mukamel's parameters in Myers, J. Raman. Spec. 28, 389 (1997), eqs. (35) to (38)
Lambda = kappa * egamma / a # cm-1
lmbda = beta * (Lambda / kappa)**2 / 2.0 # cm-1
# ----------------------------------------------------------
# 2: calculate the sum over n Brownian oscillators
vs = np.zeros(len(t)) # this is the sum over the n oscillators as function of time in (cm-1)**-3
n = 0
while(True):
n = n + 1
vn = 2.0 * np.pi * n / beta # cm-1
vinc = (np.exp(-vn / hbar * t) + vn / hbar * t - 1) / (vn * (vn**2 - Lambda**2))
vs = vs + vinc
if(np.amax(np.absolute(vinc[1:] / vs[1:])) < cutoff): # the first element of vs is always 0
break
# ----------------------------------------------------------
# 3: calculate the damping function g(t)
gexp = np.exp(-Lambda / hbar * t) + Lambda / hbar * t - 1.0 # dimensionless
greal = (lmbda / Lambda) / np.tan(beta * Lambda / 2.0) * gexp # dimensionless
greal = greal + 4.0 * lmbda * Lambda / beta * vs # dimensionless
gimag = -(lmbda / Lambda) * gexp # dimensionless
g = greal + 1j * gimag # dimensionless
return np.exp(-g)
def getBrownianDamping2(t, lmbda, Lambda, T=298.0, cutoff=1e-6):
"""Calculate pure electronic dephasing due to interaction with solvent using frictionally overdamped Brownian oscillator model.
The real part of g(t) leads to a Gaussian broadening of the spectra, while the imaginary part leads to a solvent induced Stokes shift.
:param array t: Time axis in fs.
:param float lmbda: Solvent contribution to reorganization energy (cm-1).
:param float Lambda: Inverse of characteristic time scale for solvent fluctuations (fs-1).
:param float T: Temperature (K, default = 298 K).
:param float cutoff: Cutoff value for summation over brownian oscillators (default 1e-6).
:returns: Damping term in the time domain, :math:`e^{-g(t)}`.
.. seealso:: This implementation is taken from Kulinowksi, *J Phys Chem* **99**, 9017 (1995), Eqs. (10a) to (10d).
"""
beta = 1.0 / (kB * np.absolute(T))
lmb = lmbda / hbar # convert to fs-1
# calculate real part as sum over oscillators
gR = 0.0
i = 1.0
while(1):
nun = 2.0 * np.pi / (hbar * beta) * i # frequency of ith oscillator
dg = (np.exp(-nun * t) + nun * t - 1.0) / (nun * (nun**2 - Lambda**2))
gR = gR + dg
i = i + 1.0
if np.sum(np.absolute(np.dg)) / np.sum(np.absolute(gR)) < cutoff:
break
gR = gR * 4.0 * lmb * Lambda / (hbar * beta)
gR = gR + (lmb / Lambda) * np.cot(hbar * beta * Lambda / 2.0) * (np.exp(-Lambda * t) + Lambda * t - 1.0)
# calculate imaginary part = Stokes shift
gI = -(lmb / Lambda) * (np.exp(-Lambda * t) - 1.0)
# assemble
g = gR + 1j * gI # dimensionless
return np.exp(-g)
def getBrownianDampingSlowMod(t, lmbda, T=298.0):
"""Calculate pure electronic dephasing due to interaction with solvent using frictionally overdamped Brownian oscillator model in the high-temperature and slow-modulation limit.
The real part of g(t) leads to a Gaussian broadening of the spectra, while the imaginary part leads to a solvent induced Stokes shift.
:param array t: Time axis in fs.
:param float lmbda: Solvent contribution to reorganization energy (cm-1).
:param float T: Temperature (K, default = 298 K).
:returns: Damping term in the time domain, :math:`e^{-g(t)}`.
.. seealso:: This implementation is taken from Kulinowksi, *J Phys Chem* **99**, 9017 (1995), Eq. (11).
"""
lmb = lmbda / hbar # convert to fs-1
return np.exp(-(lmb * kB * np.absolute(T) * t**2 / hbar + 1j * lmb * t))
# ---------------------------------------------------------------------------------------------------------------------------------
#
def applyInhomogeneousBroadening(wn, y, sig, alpha=1):
"""Convolute a spectrum with a Gaussian/Lorentzian to account for inhomogeneous broadening.
:param array wn: Frequency axis in same units as sig (cm-1).
:param array y: Input spectrum, same shape as wn.
:param float sig: Width of convolution function in same units as x (standard deviation of Gaussian distribution). Must not be zero.
:param float alpha: Lineshape parameter:
- 1 = Gaussian,
- 0 = Lorentzian.
:returns: Convoluted spectrum (same shape as y).
"""
ck = alpha / (sig * np.sqrt(2 * np.pi)) * np.exp(-(wn - (wn[-1] + wn[0]) / 2.0)**2 / (2.0 * sig**2))
ck += (1 - alpha) * sig / (np.pi * ((wn - (wn[-1] + wn[0]) / 2)**2 + sig**2))
# np.convolve uses a sum, whereas the function we want uses an integral; wn[1] - wn[0] is dwn
return (wn[1] - wn[0]) * np.convolve(y, ck, 'same')
# --------------------------------------------------------------------------------------------------------------------------------
def prefA(eEL, M, IOR, dt):
"""Return the prefactor for the absorption cross section calculation in (A**2 / molec).
:param array eEL: Laser excitation energy in (cm-1). May also be a single float value.
:param float M: Electronic transition dipole length in (A).
:param float IOR: Index of refraction of surrounding solvent / medium.
:param float dt: Time increment used for integration (fs).
:returns: Prefactor for absorption cross section calculation.
.. seealso:: Myers, Eq. (35).
"""
# to convert from esu to SI divide by 4 pi eps0
# the factor / 2 arises from the normalization of numpy of the rfft to match the amplitude of fft
# so rfft is not completely identical to half-sided FT integral
return 5.7579e-6 * M**2 * eEL * dt / IOR / 2.0
# -------------------------------------------------------------------------------------------------------------------------------
def prefR(eEL, M, eR, dt):
"""Return the prefactor for the Raman excitation profile calculation (A**2 / molec).
:param array eEL: Laser excitation energies in (cm-1). Can also be a single floating point value.
:param float M: Electronic transition dipole moment in (A).
:param float eR: Stokes shift of the Raman line in (cm-1).
:param float dt: Time increment for the integration (fs).
:returns: The prefactor for the Raman excitation profile calculation.
.. seealso:: Myers, Eq. (34) and following text.
"""
# get energy of stokes shifted photons
eES = eEL - eR
# the 1e-6 is for fs instead of ps in the integral and is consistent with Myers fortran code (it is different however from the 1e4 factor in Valley & Hoffman code!!)
# to convert from esu to SI divide by (4 pi eps0)**2
return 2.0831e-20 * 1e-6 * M**4 * eES**3 * eEL * dt**2
# --------------------------------------------------------------------------------------------------------------------------------
def prefF(eEF, M, IOR, dt):
"""Return the prefactor for the fluorescence efficiency calculation (unitless). See :py:func:`getCrossSections` for more details.
:param array eEF: Fluorescence energy in (cm-1). May also be a single float value.
:param float M: Electronic transition dipole length in (A).
:param float IOR: Index of refraction of surrounding solvent / medium.
:param float dt: Time increment used for integration (fs).
:returns: Prefactor for fluorescence efficiency calculation.
.. seealso:: Myers, *Chem. Phys.* **180**, 215 (1994), Eqs. (6) and (26).
"""
# to convert from esu to SI divide by 4 pi eps0
# the factor / 2 arises from the normalization of numpy of the rfft to match the amplitude of fft
# so rfft is not completely identical to half-sided FT integral
return 3.6656e-22 * IOR * M**2 * eEF**3 * dt / 2.0
# ----------------------------------------------------------------------------------------------------------------------------
def getCrossSections(t, wn, E0, ovlps, sshift, M, IOR, damp=1, sig=0, ialpha=1):
"""Calculate the absorption and Raman cross-sections and the fluorescence efficiency. The latter is a unitless quantity which may be used
to calculate the fluorescence rate (=rate of spontaneous emission) by integrating over the frequency axis (see Myers, *Chem. Phys.* **180**, 215 (1994) Eq. 6 and discussion).
.. note:: Changed shape of input parameters and shape of return values on 10-07-2015.
:param array t: Time axis in (fs). This axis is used for the calculation of the zero-zero energy term in the time domain.
:param array wn: Wavenumber axis in (cm-1). Same shape as t.
:param array E0: Zero-zero energy. This function then calculates the time domain part using `getZeroZeroEnergy`.
:param array ovlps: M + 2 Absorption, fluorescence and Raman overlap integrals.
:param float sshift: Vibrational freqencies of M Raman modes to calculate (cm-1).
:param float M: Electronic transition dipole length (A).
:param float IOR: Index of refraction of surrounding medium / solvent.
:param array damp: Damping function in the time domain. Same shape as t. Set to 1 if no damping is used (default).
:param float sig: Linewidth for inhomogeneous damping (standard deviation of Gaussian), set to zero if not used (default).
:param float ialpha: Lineshape parameter for inhomogeneous damping:
- 1 = Gaussian (default),
- 0 = Lorentzian.
:returns: Absorption (sigmaA), M Raman cross sections (sigmaR[M]), both in A**2 / mol., and fluorescence efficiency spectrum, kF (arrays have same shape as wn); all as function of excitation wavenumber.
"""
Npoints = len(wn)
dt = t[1] - t[0]
# caluclate zero-zero time domain part
tdpart = getZeroZeroEnergy(t, E0)
# absorption cross section - using the half sided FT (equivalent to rfft)
tmp = np.real(Npoints * np.fft.irfft(ovlps[0] * tdpart * damp, Npoints))
if(sig > 0):
tmp = applyInhomogeneousBroadening(wn, tmp, sig, ialpha)
sigmaA = prefA(wn, M, IOR, dt) * tmp
# fluorescence rate / intensity - using half sided FT - similar to absorption
# in order to account for the sign change, the zero-zero energy time domain part and the damping term had to be separated;
# use the tdpart conjugated and change irfft by hfft to get the factor exp(-1j w t)
# numpy does not normalize the forward FFT, so no factor Npoints
tmp = np.real(np.fft.hfft(ovlps[1] * np.conjugate(tdpart) * damp, Npoints))
if(sig > 0):
tmp = applyInhomogeneousBroadening(wn, tmp, sig, ialpha)
kF = prefF(wn, M, IOR, dt) * tmp
# Raman cross sections - using a standard FT
sigmaR = []
for i, ovlp in enumerate(ovlps[2:]): # iterate over all lines
tmp = np.absolute(Npoints * np.fft.ifft(ovlp * tdpart * damp, Npoints))**2 # use again the inverse transform to get "exp(1j w t)"
if(sig > 0):
tmp = applyInhomogeneousBroadening(wn, tmp, sig, ialpha)
sigmaR.append(prefR(wn, M, sshift[i], dt) * tmp)
return sigmaA, sigmaR, kF
| ddietze/FSRStools | rraman/__init__.py | Python | gpl-3.0 | 36,173 |
# Copyright (c) 2008 testtools developers. See LICENSE for details.
"""Test results and related things."""
__metaclass__ = type
__all__ = [
'ExtendedToOriginalDecorator',
'MultiTestResult',
'TestResult',
'ThreadsafeForwardingResult',
]
import datetime
import sys
import unittest
from testtools.compat import all, _format_exc_info, str_is_unicode, _u
# From http://docs.python.org/library/datetime.html
_ZERO = datetime.timedelta(0)
# A UTC class.
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
utc = UTC()
class TestResult(unittest.TestResult):
"""Subclass of unittest.TestResult extending the protocol for flexability.
This test result supports an experimental protocol for providing additional
data to in test outcomes. All the outcome methods take an optional dict
'details'. If supplied any other detail parameters like 'err' or 'reason'
should not be provided. The details dict is a mapping from names to
MIME content objects (see testtools.content). This permits attaching
tracebacks, log files, or even large objects like databases that were
part of the test fixture. Until this API is accepted into upstream
Python it is considered experimental: it may be replaced at any point
by a newer version more in line with upstream Python. Compatibility would
be aimed for in this case, but may not be possible.
:ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
"""
def __init__(self):
# startTestRun resets all attributes, and older clients don't know to
# call startTestRun, so it is called once here.
# Because subclasses may reasonably not expect this, we call the
# specific version we want to run.
TestResult.startTestRun(self)
def addExpectedFailure(self, test, err=None, details=None):
"""Called when a test has failed in an expected manner.
Like with addSuccess and addError, testStopped should still be called.
:param test: The test that has been skipped.
:param err: The exc_info of the error that was raised.
:return: None
"""
# This is the python 2.7 implementation
self.expectedFailures.append(
(test, self._err_details_to_string(test, err, details)))
def addError(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.errors.append((test,
self._err_details_to_string(test, err, details)))
def addFailure(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.failures.append((test,
self._err_details_to_string(test, err, details)))
def addSkip(self, test, reason=None, details=None):
"""Called when a test has been skipped rather than running.
Like with addSuccess and addError, testStopped should still be called.
This must be called by the TestCase. 'addError' and 'addFailure' will
not call addSkip, since they have no assumptions about the kind of
errors that a test can raise.
:param test: The test that has been skipped.
:param reason: The reason for the test being skipped. For instance,
u"pyGL is not available".
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
:return: None
"""
if reason is None:
reason = details.get('reason')
if reason is None:
reason = 'No reason given'
else:
reason = ''.join(reason.iter_text())
skip_list = self.skip_reasons.setdefault(reason, [])
skip_list.append(test)
def addSuccess(self, test, details=None):
"""Called when a test succeeded."""
def addUnexpectedSuccess(self, test, details=None):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"""Has this result been successful so far?
If there have been any errors, failures or unexpected successes,
return False. Otherwise, return True.
Note: This differs from standard unittest in that we consider
unexpected successes to be equivalent to failures, rather than
successes.
"""
return not (self.errors or self.failures or self.unexpectedSuccesses)
if str_is_unicode:
# Python 3 and IronPython strings are unicode, use parent class method
_exc_info_to_unicode = unittest.TestResult._exc_info_to_string
else:
# For Python 2, need to decode components of traceback according to
# their source, so can't use traceback.format_exception
# Here follows a little deep magic to copy the existing method and
# replace the formatter with one that returns unicode instead
from types import FunctionType as __F, ModuleType as __M
__f = unittest.TestResult._exc_info_to_string.im_func
__g = dict(__f.func_globals)
__m = __M("__fake_traceback")
__m.format_exception = _format_exc_info
__g["traceback"] = __m
_exc_info_to_unicode = __F(__f.func_code, __g, "_exc_info_to_unicode")
del __F, __M, __f, __g, __m
def _err_details_to_string(self, test, err=None, details=None):
"""Convert an error in exc_info form or a contents dict to a string."""
if err is not None:
return self._exc_info_to_unicode(err, test)
return _details_to_str(details, special='traceback')
def _now(self):
"""Return the current 'test time'.
If the time() method has not been called, this is equivalent to
datetime.now(), otherwise its the last supplied datestamp given to the
time() method.
"""
if self.__now is None:
return datetime.datetime.now(utc)
else:
return self.__now
def startTestRun(self):
"""Called before a test run starts.
New in Python 2.7. The testtools version resets the result to a
pristine condition ready for use in another test run. Note that this
is different from Python 2.7's startTestRun, which does nothing.
"""
super(TestResult, self).__init__()
self.skip_reasons = {}
self.__now = None
# -- Start: As per python 2.7 --
self.expectedFailures = []
self.unexpectedSuccesses = []
# -- End: As per python 2.7 --
def stopTestRun(self):
"""Called after a test run completes
New in python 2.7
"""
def time(self, a_datetime):
"""Provide a timestamp to represent the current time.
This is useful when test activity is time delayed, or happening
concurrently and getting the system time between API calls will not
accurately represent the duration of tests (or the whole run).
Calling time() sets the datetime used by the TestResult object.
Time is permitted to go backwards when using this call.
:param a_datetime: A datetime.datetime object with TZ information or
None to reset the TestResult to gathering time from the system.
"""
self.__now = a_datetime
def done(self):
"""Called when the test runner is done.
deprecated in favour of stopTestRun.
"""
class MultiTestResult(TestResult):
"""A test result that dispatches to many test results."""
def __init__(self, *results):
TestResult.__init__(self)
self._results = list(map(ExtendedToOriginalDecorator, results))
def __repr__(self):
return '<%s (%s)>' % (
self.__class__.__name__, ', '.join(map(repr, self._results)))
def _dispatch(self, message, *args, **kwargs):
return tuple(
getattr(result, message)(*args, **kwargs)
for result in self._results)
def startTest(self, test):
return self._dispatch('startTest', test)
def stopTest(self, test):
return self._dispatch('stopTest', test)
def addError(self, test, error=None, details=None):
return self._dispatch('addError', test, error, details=details)
def addExpectedFailure(self, test, err=None, details=None):
return self._dispatch(
'addExpectedFailure', test, err, details=details)
def addFailure(self, test, err=None, details=None):
return self._dispatch('addFailure', test, err, details=details)
def addSkip(self, test, reason=None, details=None):
return self._dispatch('addSkip', test, reason, details=details)
def addSuccess(self, test, details=None):
return self._dispatch('addSuccess', test, details=details)
def addUnexpectedSuccess(self, test, details=None):
return self._dispatch('addUnexpectedSuccess', test, details=details)
def startTestRun(self):
return self._dispatch('startTestRun')
def stopTestRun(self):
return self._dispatch('stopTestRun')
def time(self, a_datetime):
return self._dispatch('time', a_datetime)
def done(self):
return self._dispatch('done')
def wasSuccessful(self):
"""Was this result successful?
Only returns True if every constituent result was successful.
"""
return all(self._dispatch('wasSuccessful'))
class TextTestResult(TestResult):
"""A TestResult which outputs activity to a text stream."""
def __init__(self, stream):
"""Construct a TextTestResult writing to stream."""
super(TextTestResult, self).__init__()
self.stream = stream
self.sep1 = '=' * 70 + '\n'
self.sep2 = '-' * 70 + '\n'
def _delta_to_float(self, a_timedelta):
return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
a_timedelta.microseconds / 1000000.0)
def _show_list(self, label, error_list):
for test, output in error_list:
self.stream.write(self.sep1)
self.stream.write("%s: %s\n" % (label, test.id()))
self.stream.write(self.sep2)
self.stream.write(output)
def startTestRun(self):
super(TextTestResult, self).startTestRun()
self.__start = self._now()
self.stream.write("Tests running...\n")
def stopTestRun(self):
if self.testsRun != 1:
plural = 's'
else:
plural = ''
stop = self._now()
self._show_list('ERROR', self.errors)
self._show_list('FAIL', self.failures)
for test in self.unexpectedSuccesses:
self.stream.write(
"%sUNEXPECTED SUCCESS: %s\n%s" % (
self.sep1, test.id(), self.sep2))
self.stream.write("\nRan %d test%s in %.3fs\n" %
(self.testsRun, plural,
self._delta_to_float(stop - self.__start)))
if self.wasSuccessful():
self.stream.write("OK\n")
else:
self.stream.write("FAILED (")
details = []
details.append("failures=%d" % (
sum(map(len, (
self.failures, self.errors, self.unexpectedSuccesses)))))
self.stream.write(", ".join(details))
self.stream.write(")\n")
super(TextTestResult, self).stopTestRun()
class ThreadsafeForwardingResult(TestResult):
"""A TestResult which ensures the target does not receive mixed up calls.
This is used when receiving test results from multiple sources, and batches
up all the activity for a single test into a thread-safe batch where all
other ThreadsafeForwardingResult objects sharing the same semaphore will be
locked out.
Typical use of ThreadsafeForwardingResult involves creating one
ThreadsafeForwardingResult per thread in a ConcurrentTestSuite. These
forward to the TestResult that the ConcurrentTestSuite run method was
called with.
target.done() is called once for each ThreadsafeForwardingResult that
forwards to the same target. If the target's done() takes special action,
care should be taken to accommodate this.
"""
def __init__(self, target, semaphore):
"""Create a ThreadsafeForwardingResult forwarding to target.
:param target: A TestResult.
:param semaphore: A threading.Semaphore with limit 1.
"""
TestResult.__init__(self)
self.result = ExtendedToOriginalDecorator(target)
self.semaphore = semaphore
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.result)
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
try:
self.result.time(self._test_start)
self.result.startTest(test)
self.result.time(self._now())
try:
method(test, *args, **kwargs)
finally:
self.result.stopTest(test)
finally:
self.semaphore.release()
def addError(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addError,
test, err, details=details)
def addExpectedFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addExpectedFailure,
test, err, details=details)
def addFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addFailure,
test, err, details=details)
def addSkip(self, test, reason=None, details=None):
self._add_result_with_semaphore(self.result.addSkip,
test, reason, details=details)
def addSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addSuccess,
test, details=details)
def addUnexpectedSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
test, details=details)
def startTestRun(self):
self.semaphore.acquire()
try:
self.result.startTestRun()
finally:
self.semaphore.release()
def stopTestRun(self):
self.semaphore.acquire()
try:
self.result.stopTestRun()
finally:
self.semaphore.release()
def done(self):
self.semaphore.acquire()
try:
self.result.done()
finally:
self.semaphore.release()
def startTest(self, test):
self._test_start = self._now()
super(ThreadsafeForwardingResult, self).startTest(test)
def wasSuccessful(self):
return self.result.wasSuccessful()
class ExtendedToOriginalDecorator(object):
"""Permit new TestResult API code to degrade gracefully with old results.
This decorates an existing TestResult and converts missing outcomes
such as addSkip to older outcomes such as addSuccess. It also supports
the extended details protocol. In all cases the most recent protocol
is attempted first, and fallbacks only occur when the decorated result
does not support the newer style of calling.
"""
def __init__(self, decorated):
self.decorated = decorated
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.decorated)
def __getattr__(self, name):
return getattr(self.decorated, name)
def addError(self, test, err=None, details=None):
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addError(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addError(test, err)
def addExpectedFailure(self, test, err=None, details=None):
self._check_args(err, details)
addExpectedFailure = getattr(
self.decorated, 'addExpectedFailure', None)
if addExpectedFailure is None:
return self.addSuccess(test)
if details is not None:
try:
return addExpectedFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return addExpectedFailure(test, err)
def addFailure(self, test, err=None, details=None):
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addFailure(test, err)
def addSkip(self, test, reason=None, details=None):
self._check_args(reason, details)
addSkip = getattr(self.decorated, 'addSkip', None)
if addSkip is None:
return self.decorated.addSuccess(test)
if details is not None:
try:
return addSkip(test, details=details)
except TypeError:
# extract the reason if it's available
try:
reason = ''.join(details['reason'].iter_text())
except KeyError:
reason = _details_to_str(details)
return addSkip(test, reason)
def addUnexpectedSuccess(self, test, details=None):
outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
if outcome is None:
try:
test.fail("")
except test.failureException:
return self.addFailure(test, sys.exc_info())
if details is not None:
try:
return outcome(test, details=details)
except TypeError:
pass
return outcome(test)
def addSuccess(self, test, details=None):
if details is not None:
try:
return self.decorated.addSuccess(test, details=details)
except TypeError:
pass
return self.decorated.addSuccess(test)
def _check_args(self, err, details):
param_count = 0
if err is not None:
param_count += 1
if details is not None:
param_count += 1
if param_count != 1:
raise ValueError("Must pass only one of err '%s' and details '%s"
% (err, details))
def _details_to_exc_info(self, details):
"""Convert a details dict to an exc_info tuple."""
return (
_StringException,
_StringException(_details_to_str(details, special='traceback')),
None)
def done(self):
try:
return self.decorated.done()
except AttributeError:
return
def progress(self, offset, whence):
method = getattr(self.decorated, 'progress', None)
if method is None:
return
return method(offset, whence)
@property
def shouldStop(self):
return self.decorated.shouldStop
def startTest(self, test):
return self.decorated.startTest(test)
def startTestRun(self):
try:
return self.decorated.startTestRun()
except AttributeError:
return
def stop(self):
return self.decorated.stop()
def stopTest(self, test):
return self.decorated.stopTest(test)
def stopTestRun(self):
try:
return self.decorated.stopTestRun()
except AttributeError:
return
def tags(self, new_tags, gone_tags):
method = getattr(self.decorated, 'tags', None)
if method is None:
return
return method(new_tags, gone_tags)
def time(self, a_datetime):
method = getattr(self.decorated, 'time', None)
if method is None:
return
return method(a_datetime)
def wasSuccessful(self):
return self.decorated.wasSuccessful()
class _StringException(Exception):
"""An exception made from an arbitrary string."""
if not str_is_unicode:
def __init__(self, string):
if type(string) is not unicode:
raise TypeError("_StringException expects unicode, got %r" %
(string,))
Exception.__init__(self, string)
def __str__(self):
return self.args[0].encode("utf-8")
def __unicode__(self):
return self.args[0]
# For 3.0 and above the default __str__ is fine, so we don't define one.
def __hash__(self):
return id(self)
def __eq__(self, other):
try:
return self.args == other.args
except AttributeError:
return False
def _format_text_attachment(name, text):
if '\n' in text:
return "%s: {{{\n%s\n}}}\n" % (name, text)
return "%s: {{{%s}}}" % (name, text)
def _details_to_str(details, special=None):
"""Convert a details dict to a string.
:param details: A dictionary mapping short names to ``Content`` objects.
:param special: If specified, an attachment that should have special
attention drawn to it. The primary attachment. Normally it's the
traceback that caused the test to fail.
:return: A formatted string that can be included in text test results.
"""
empty_attachments = []
binary_attachments = []
text_attachments = []
special_content = None
# sorted is for testing, may want to remove that and use a dict
# subclass with defined order for items instead.
for key, content in sorted(details.items()):
if content.content_type.type != 'text':
binary_attachments.append((key, content.content_type))
continue
text = _u('').join(content.iter_text()).strip()
if not text:
empty_attachments.append(key)
continue
# We want the 'special' attachment to be at the bottom.
if key == special:
special_content = '%s\n' % (text,)
continue
text_attachments.append(_format_text_attachment(key, text))
if text_attachments and not text_attachments[-1].endswith('\n'):
text_attachments.append('')
if special_content:
text_attachments.append(special_content)
lines = []
if binary_attachments:
lines.append('Binary content:\n')
for name, content_type in binary_attachments:
lines.append(' %s (%s)\n' % (name, content_type))
if empty_attachments:
lines.append('Empty attachments:\n')
for name in empty_attachments:
lines.append(' %s\n' % (name,))
if (binary_attachments or empty_attachments) and text_attachments:
lines.append('\n')
lines.append('\n'.join(text_attachments))
return _u('').join(lines)
| lauria/Samba4 | lib/testtools/testtools/testresult/real.py | Python | gpl-3.0 | 23,481 |
import abc
import subprocess
import logging
from observables import BLOperator, MCObservable
from data import BLDataChannel, GIDataChannel
import util
class Channel(metaclass=abc.ABCMeta):
ISOSPIN_MAP = {
'singlet': "0",
'doublet': "1h",
'triplet': "1",
'quartet': "3h",
'quintet': "2",
'sextet': "5h"
}
def __init__(self, *, particle_type=None, isospin, strangeness=None, laph_query="laph_query",
sigmond_query="sigmond_query"):
self.particle_type = particle_type
self.strangeness = strangeness
self.isospin = isospin
self.laph_query = laph_query
self.sigmond_query = sigmond_query
# @ADH - I think I am going to have the DataHandler deal with these in the future
self.raw_data_channels = list()
@staticmethod
def initialize(*, data_file, laph_query="laph_query", sigmond_query="sigmond_query",
is_basic_laph=True):
if is_basic_laph:
query_result = subprocess.check_output([laph_query, '-i', data_file]).decode()
laph_xml = util.queryToXML(query_result)
operator = BLOperator.createFromXML(laph_xml.find(".//Operator"))
if 'special' in data_file.split('/'):
return SpecialChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, flavor=operator.flavor,
laph_query=laph_query, sigmond_query=sigmond_query)
elif operator.psq > 0:
return MovingChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, psq=operator.psq,
lg_irrep=operator.lg_irrep, laph_query=laph_query,
sigmond_query=sigmond_query)
else:
return AtRestChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, lg_irrep=operator.lg_irrep,
laph_query=laph_query, sigmond_query=sigmond_query)
else:
query_result = subprocess.check_output([sigmond_query, '-k', data_file]).decode()
try:
records = query_result.split('Record')
observable = MCObservable.createFromXML(util.queryToXML(records[1]))
if observable.psq > 0:
return MovingChannel(isospin=observable.isospin, psq=observable.psq,
lg_irrep=observable.lg_irrep, laph_query=laph_query,
sigmond_query=sigmond_query)
else:
return AtRestChannel(isospin=observable.isospin, lg_irrep=observable.lg_irrep,
laph_query=laph_query, sigmond_query=sigmond_query)
except IndexError:
logging.warning("%s contains no records", data_file)
except AttributeError:
logging.warning("%s contains Observables", data_file)
return None
def addRawDataChannel(self, path, is_basic_laph=True):
if is_basic_laph:
self.raw_data_channels.append(BLDataChannel(path, self.laph_query))
else:
self.raw_data_channels.append(GIDataChannel(path, self.sigmond_query))
@property
@abc.abstractmethod
def channel_string(self):
pass
@property
def is_special(self):
return isinstance(self, SpecialChannel)
@property
def is_atrest(self):
return isinstance(self, AtRestChannel)
@property
def is_moving(self):
return isinstance(self, MovingChannel)
def __hash__(self):
return hash(self.__repr__())
def __str__(self):
return self.channel_string
# @ADH - Should be checking that 'other' is an instance of an object
# derived from Channel. I'm not sure how to best do that right now.
# So, this will suffice for the moment.
def __eq__(self, other):
return self.__repr__() == other.__repr__()
def __ne__(self, other):
return self.__repr__() != other.__repr__()
def __lt__(self, other):
return self.__repr__() < other.__repr__()
def __gt__(self, other):
return self.__repr__() > other.__repr__()
def __le__(self, other):
return self.__repr__() <= other.__repr__()
def __ge__(self, other):
return self.__repr__() >= other.__repr__()
class SpecialChannel(Channel):
def __init__(self, *, particle_type, isospin, strangeness, flavor, laph_query="laph_query",
sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.flavor = flavor
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B"
elif self.particle_type == "fermion":
particle_type = "F"
strangeness = str(self.strangeness).replace('-', 'm')
return "{p_type}_{flavor}_I{isospin}_S{strangeness}_special".format(
p_type=particle_type, flavor=self.flavor, isospin=self.ISOSPIN_MAP[self.isospin],
strangeness=strangeness)
def __repr__(self):
return "SP_{}".format(self.channel_string)
class AtRestChannel(Channel):
def __init__(self, *, particle_type=None, isospin, strangeness=None, lg_irrep,
laph_query="laph_query", sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.psq = 0
self.lg_irrep = lg_irrep
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B_"
elif self.particle_type == "fermion":
particle_type = "F_"
else:
particle_type = ""
if self.strangeness is not None:
strangeness = "S{}_".format(self.strangeness).replace('-', 'm')
else:
strangeness = ""
return "{p_type}I{isospin}_{strangeness}P0_{irrep}".format(
p_type=particle_type, isospin=self.ISOSPIN_MAP[self.isospin], strangeness=strangeness,
irrep=self.lg_irrep)
def __repr__(self):
return "AR_{}".format(self.channel_string)
class MovingChannel(Channel):
def __init__(self, *, particle_type=None, isospin, strangeness=None, psq, lg_irrep,
laph_query="laph_query", sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.psq = psq
self.lg_irrep = lg_irrep
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B_"
elif self.particle_type == "fermion":
particle_type = "F_"
else:
particle_type = ""
if self.strangeness is not None:
strangeness = "S{}_".format(self.strangeness).replace('-', 'm')
else:
strangeness = ""
return "{p_type}I{isospin}_{strangeness}PSQ{psq}_{irrep}".format(
p_type=particle_type, isospin=self.ISOSPIN_MAP[self.isospin], strangeness=strangeness,
psq=self.psq, irrep=self.lg_irrep)
def __repr__(self):
return "MV_{}".format(self.channel_string)
| andrewhanlon/QCD_scripts | sigmond/channel.py | Python | gpl-3.0 | 7,175 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.data import flt, nowdate, getdate, cint
class MoneyTransfere(Document):
def on_submit(self):
self.validate_transfere()
def validate(self):
self.get_dummy_accounts()
def get_dummy_accounts(self):
dummy_to = frappe.db.get_values("Account", {"name": "حساب استلام من"+" - "+self.from_company + " - "+self.abbr_to,
"company": self.to_company,
"parent_account":"حساب استلام من"+" - "+self.abbr_to })
self.dummy_to=dummy_to[0][0]
dummy_from = frappe.db.get_values("Account", {"name": "حساب ارسال الي"+" - "+self.to_company + " - "+self.abbr,
"company": self.from_company,
"parent_account":"حساب ارسال"+" - "+self.abbr })
self.dummy_from=dummy_from[0][0]
def before_cancel(self):
pe = frappe.get_value("Payment Entry", filters = {"transfere_reference": self.name}, fieldname = "name")
if pe:
pe_doc = frappe.get_doc("Payment Entry", pe)
pe_doc.cancel()
je = frappe.get_value("Journal Entry Account", filters = {"reference_name": self.name}, fieldname = "parent")
if je:
je_doc = frappe.get_doc("Journal Entry", je)
je_doc.cancel()
def validate_transfere(self):
if self.from_company != self.to_company:
# sending_account = "حساب ارسال الى " + self.to_company
# receiving_account = "حساب استلام من " + self.from_company
# self.add_account_for_company(sending_account, self.to_company, "Liability")
# self.add_account_for_company(receiving_account, self.from_company, "Expense")
self.add_payment_entry(self.from_account, self.dummy_from, self.from_company)
self.add_journal_entry(self.to_account,self.dummy_to, self.to_company)
else:
self.add_payment_entry(self.from_account, self.to_account, self.from_company)
def add_account_for_company(self, account, company, r_type):
pass
# pacc_name = ""
# if r_type == "Expense":
# pacc_name = "حساب ارسال - E"
# elif r_type == "Liability":
# pacc_name = "حساب استقبال - o"
# # if not frappe.db.exists("Account", pacc_name):
# # pacc = frappe.new_doc("Account")
# # pacc.account_name = pacc_name
# # pacc.root_type = r_type
# # pacc.is_group = 1
# # pacc.parent_account = ""
# # pacc.company = company
# # pacc.flags.ignore_validate = True
# # pacc.insert()
# if not frappe.db.exists("Account", account):
# acc = frappe.new_doc("Account")
# acc.account_name = account
# acc.company = company
# acc.parent_account = pacc_name
# acc.is_group = 0
# acc.insert()
def add_payment_entry(self, paid_from, paid_to, company):
pe = frappe.new_doc("Payment Entry")
pe.payment_type = "Internal Transfer"
pe.company = company
pe.paid_from = paid_from
pe.paid_to = paid_to
pe.paid_amount = self.transfered_amount
pe.received_amount = self.transfered_amount
pe.posting_date = nowdate()
pe.mode_of_payment = self.mode_of_payment
pe.transfere_reference = self.name
pe.insert()
pe.submit()
# pe.setup_party_account_field()
# pe.set_missing_values()
# pe.set_exchange_rate()
# pe.set_amounts()
# self.assertEquals(pe.difference_amount, 500)
# pe.append("deductions", {
# "account": "_Test Exchange Gain/Loss - _TC",
# "cost_center": "_Test Cost Center - _TC",
# "amount": 500
# })
def add_journal_entry(self, account1, account2, company):
default_cost = frappe.get_value("Company", filters = {"name":company}, fieldname = "cost_center")
jv = frappe.new_doc("Journal Entry")
jv.posting_date = nowdate()
jv.company = company
jv.voucher_type = "Opening Entry"
jv.set("accounts", [
{
"account": account2,
"credit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}, {
"account": account1,
"debit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}
])
jv.insert()
jv.submit()
| ahmadRagheb/goldenHR | erpnext/accounts/doctype/money_transfere/money_transfere.py | Python | gpl-3.0 | 4,294 |
import numpy as np
def min_max_model(power, use, battery_capacity):
"""
Minimal maximum battery model, obsoleted
:param power: Pandas TimeSeries, total power from renewable system
:param use: float, unit W fixed load of the power system
:param battery_capacity: float, unit Wh battery capacity
:return: list, energy history in battery
"""
power = power.tolist()
energy = 0
energy_history = []
for p in power:
energy = min(battery_capacity, max(0, energy + (p - use) * 1))
energy_history.append(energy)
return energy_history
def soc_model_fixed_load(
power,
use,
battery_capacity,
depth_of_discharge=1,
discharge_rate=0.005,
battery_eff=0.9,
discharge_eff=0.8,
):
"""
Battery state of charge model with fixed load. (Obsolete)
:param power: Pandas TimeSeries of total power from renewable system
:param use: float unit W fixed load of the power system
:param battery_capacity: float unit Wh battery capacity
:param depth_of_discharge: float 0 to 1 maximum allowed discharge depth
:param discharge_rate: self discharge rate
:param battery_eff: optional 0 to 1 battery energy store efficiency default 0.9
:param discharge_eff: battery discharge efficiency 0 to 1 default 0.8
:return: tuple SOC: state of charge, energy history: E in battery,
unmet_history: unmet energy history, waste_history: waste energy history
"""
DOD = depth_of_discharge
power = power.tolist()
use_history = []
waste_history = []
unmet_history = []
energy_history = []
energy = 0
for p in power:
if p >= use:
use_history.append(use)
unmet_history.append(0)
energy_new = energy * (1 - discharge_rate) + (p - use) * battery_eff
if energy_new < battery_capacity:
energy = energy_new # battery energy got update
waste_history.append(0)
else:
waste_history.append(p - use)
energy = energy
elif p < use:
energy_new = energy * (1 - discharge_rate) + (p - use) / discharge_eff
if energy_new > (1 - DOD) * battery_capacity:
energy = energy_new
unmet_history.append(0)
waste_history.append(0)
use_history.append(use)
elif energy * (1 - discharge_rate) + p * battery_eff < battery_capacity:
energy = energy * (1 - discharge_rate) + p * battery_eff
unmet_history.append(use - p)
use_history.append(0)
waste_history.append(0)
else:
unmet_history.append(use - p)
use_history.append(0)
waste_history.append(p)
energy = energy
energy_history.append(energy)
if battery_capacity == 0:
SOC = np.array(energy_history)
else:
SOC = np.array(energy_history) / battery_capacity
return SOC, energy_history, unmet_history, waste_history, use_history
class Battery:
"""
A simple finite state based energy flow battery model.
"""
def __init__(self, capacity, config={}):
"""
Initialise the battery with a given capacity and configuration.
:param capacity: float, unit Wh
:param config: options including DOD, depth of discharge; sigma, self-discharge rate; eta_in, charge efficiency;
eta_out, discharge efficiency; init_charge, percentage of the battery pre-charge; where all values shall between 0
and 1
"""
self.capacity = capacity
self.config = config
self.set_parameters()
def set_parameters(self):
"""
Setup the parameters using the config file, options including DOD, depth of discharge; sigma, self-discharge rate;
eta_in, charge efficiency; eta_out, discharge efficiency; init_charge, percentage of the battery pre-charge;
where all values shall between 0 and 1.
"""
try:
self.depth_of_discharge = self.config['simulation']['battery']['DOD']
self.discharge_rate = self.config['simulation']['battery']['sigma']
self.battery_eff = self.config['simulation']['battery']['eta_in']
self.discharge_eff = self.config['simulation']['battery']['eta_out']
self.init_charge = self.config['simulation']['battery']['B0']
except KeyError:
print('Parameter is not found in config file, default values are used.')
self.depth_of_discharge = 1
self.discharge_rate = 0.005
self.battery_eff = 0.9
self.discharge_eff = 0.8
self.init_charge = 1
def run(self, power, use):
"""
Run the battery model with a list of power generation and usage.
:param power: list, power generation unit in W
:param use: list, power usage unit in W
:return: None
"""
DOD = self.depth_of_discharge
battery_capacity = self.capacity
discharge_rate = self.discharge_rate
discharge_eff = self.discharge_eff
battery_eff = self.battery_eff
use_history = []
waste_history = []
unmet_history = []
energy_history = []
SOC = []
energy = self.init_charge * self.capacity
for p, u in zip(power, use):
if p >= u:
use_history.append(u)
unmet_history.append(0)
energy_new = energy * (1 - discharge_rate) + (p - u) * battery_eff
if energy_new < battery_capacity:
energy = energy_new # battery energy got update
waste_history.append(0)
else:
waste_history.append(p - u)
energy = energy
elif p < u:
energy_new = energy * (1 - discharge_rate) + (p - u) / discharge_eff
if energy_new > (1 - DOD) * battery_capacity:
energy = energy_new
unmet_history.append(0)
waste_history.append(0)
use_history.append(u)
elif energy * (1 - discharge_rate) + p * battery_eff < battery_capacity:
energy = energy * (1 - discharge_rate) + p * battery_eff
unmet_history.append(u - p)
use_history.append(0)
waste_history.append(0)
else:
unmet_history.append(u - p)
use_history.append(0)
waste_history.append(p)
energy = energy
energy_history.append(energy)
SOC.append(energy / battery_capacity)
self.SOC = SOC
self.energy_history = energy_history
self.unmet_history = unmet_history
self.waste_history = waste_history
self.use_history = use_history
def battery_history(self):
"""
Return the history of the battery.
:return: np array, the SOC, energy in the battery, unmet power supply, wasted power and the supplied power unit in W
"""
history = np.vstack(
(
np.array(self.SOC),
np.array(self.energy_history),
np.array(self.unmet_history),
np.array(self.waste_history),
np.array(self.use_history),
)
)
return history
def lost_power_supply_probability(self):
"""
Return the lost power supply probability (LPSP) using the battery history.
:return: float, LPSP
"""
LPSP = 1 - self.unmet_history.count(0) / len(self.energy_history)
return LPSP
class Battery_managed:
"""
Battery managed is a the basic class for the demand load controllable battery model.
"""
def __init__(self, capacity, config={}):
"""
:param capacity: float, unit Wh
:param config: options including DOD, depth of discharge; sigma, self-discharge rate; eta_in, charge efficiency;
eta_out, discharge efficiency; init_charge, percentage of the battery pre-charge; where all values shall between 0
and 1
"""
self.capacity = capacity
self.config = config
self.set_parameters()
self.init_history()
self.init_simulation()
self.status = []
self.states_list = []
def set_parameters(self):
"""
Setup the parameters using the config file, options including DOD, depth of discharge; sigma, self-discharge rate;
eta_in, charge efficiency; eta_out, discharge efficiency; init_charge, percentage of the battery pre-charge;
where all values shall between 0 and 1.
"""
try:
self.depth_of_discharge = self.config['simulation']['battery']['DOD']
self.discharge_rate = self.config['simulation']['battery']['sigma']
self.battery_eff = self.config['simulation']['battery']['eta_in']
self.discharge_eff = self.config['simulation']['battery']['eta_out']
self.init_charge = self.config['simulation']['battery']['B0']
self.DOD = self.depth_of_discharge
except KeyError:
print('Parameter is not found in config file, default values are used.')
self.depth_of_discharge = 1
self.discharge_rate = 0.005
self.battery_eff = 0.9
self.discharge_eff = 0.8
self.init_charge = 1
self.DOD = self.depth_of_discharge
def reset(self):
"""
Reset the battery state to the start of simulation.
:return:
"""
self.init_history()
self.init_simulation()
def init_simulation(self):
self.energy = self.init_charge * self.capacity
def init_history(self):
self.supply_history = []
self.waste_history = []
self.unmet_history = []
self.battery_energy_history = []
self.SOC = []
def step(self, plan, generated, gym = False):
"""
Run the finite state battery model on one time step.
:param plan: float, planned power usage in W
:param generated: float, power generation unit in W
:param gym: optional, set True to using in OpenAI gym mode
:return: float, the supplied power in W
"""
if gym == True:
plan = plan[0][0]
if generated >= plan:
self.supply_history.append(plan)
self.unmet_history.append(0)
energy_new = self.energy * (1 - self.discharge_rate) + (generated - plan) * self.battery_eff
if energy_new < self.capacity:
self.energy = energy_new # battery energy got update
self.waste_history.append(0)
self.status.append("""Demand can be meet by generation, also battery is not full.
Supply {demand}, charge {diff}.""".format(demand=plan, diff=generated - plan)
)
self.state = 'charge'
else:
self.waste_history.append(generated - plan - (self.capacity - self.energy))
self.energy = self.capacity
self.status.append("""Demand can be meet by generation, but battery is already full.
Supply {demand}, charge battery to full waste {diff}.""".format(
demand=plan, diff=generated - plan)
)
self.state = 'float'
elif generated < plan:
energy_new = self.energy * (1 - self.discharge_rate) + (generated - plan) / self.discharge_eff
if energy_new > (1 - self.DOD) * self.capacity:
self.energy = energy_new
self.unmet_history.append(0)
self.waste_history.append(0)
self.supply_history.append(plan)
self.status.append("""Demand can not meet by generation, power in battery can make up difference.
Supply {demand} by discharge from battery""".format(demand=plan))
self.state = 'discharge'
elif self.energy * (1 - self.discharge_rate) + generated * self.battery_eff < self.capacity:
self.energy = self.energy * (1 - self.discharge_rate) + generated * self.battery_eff
self.unmet_history.append(plan - generated)
self.supply_history.append(0)
self.waste_history.append(0)
self.status.append("""Demand can not meet by generation, also power in battery can not make up difference.
Charge {diff} to battery to avoid waste""".format(diff=generated))
self.state = 'unmet'
else:
self.unmet_history.append(plan - generated)
self.supply_history.append(0)
self.waste_history.append(generated - (self.capacity - self.energy))
self.energy = self.capacity
self.status.append("""Demand can not meet by generation, also power in battery can not make up difference.
Charge {diff} to make battery full""".format(
diff=self.capacity-self.energy))
self.state = 'unmet'
self.states_list.append(self.state)
self.battery_energy_history.append(self.energy)
self.SOC.append(self.energy / self.capacity)
self.supply = self.supply_history[-1]
return self.supply
def history(self):
"""
Get the history of the managed battery.
:return: np array including the history of the battery: SOC, battery energy, unmet and wasted energy, supplied power
"""
battery_history = np.vstack(
(
np.array(self.SOC),
np.array(self.battery_energy_history),
np.array(self.unmet_history),
np.array(self.waste_history),
np.array(self.supply_history),
)
)
return battery_history
def observation(self):
"""
Observation
:return:
"""
battery_state = {
'current_energy': self.energy,
'usable_capacity': self.DOD * self.capacity,
}
return battery_state
def story_board(self):
"""
For the use of explainable AI in power management system.
:return: the status of battery
"""
return self.status
def lost_power_supply_probability(self):
"""
Get the lost power supply probability of the managed battery after run.
:return: float, LPSP
"""
LPSP = 1 - self.unmet_history.count(0) / len(self.SOC)
return LPSP
def copy(self):
"""
Make a copy of battery model.
:return: Copied version of battery with same capacity and configuration
"""
return Battery_managed(self.capacity, self.config)
class Soc_model_variable_load:
"""
Obsolete basic class.
"""
def __init__(self, battery, power, load):
self.battery = battery
self.battery.run(power, load)
def get_lost_power_supply_probability(self):
return self.battery.lost_power_supply_probability()
def get_battery_history(self):
return self.battery.battery_history()
def get_quality_performance_index(self):
pass
def soc_model_variable_load(
power,
use,
battery_capacity,
depth_of_discharge=1,
discharge_rate=0.005,
battery_eff=0.9,
discharge_eff=0.8,
):
"""
Battery state of charge model with fixed load.
:param power: Pandas TimeSeries of total power from renewable system
:param use: float unit W fixed load of the power system
:param battery_capacity: float unit Wh battery capacity
:param depth_of_discharge: float 0 to 1 maximum allowed discharge depth
:param discharge_rate: self discharge rate
:param battery_eff: optional 0 to 1 battery energy store efficiency default 0.9
:param discharge_eff: battery discharge efficiency 0 to 1 default 0.8
:return: tuple SOC: state of charge, energy history: E in battery,
unmet_history: unmet energy history, waste_history: waste energy history
"""
DOD = depth_of_discharge
power = power.tolist()
use = use.tolist()
use_history = []
waste_history = []
unmet_history = []
energy_history = []
energy = 0
for p, u in zip(power, use):
if p >= u:
use_history.append(u)
unmet_history.append(0)
energy_new = energy * (1 - discharge_rate) + (p - u) * battery_eff
if energy_new < battery_capacity:
energy = energy_new # battery energy got update
waste_history.append(0)
else:
waste_history.append(p - u)
energy = energy
elif p < u:
energy_new = energy * (1 - discharge_rate) + (p - u) / discharge_eff
if energy_new > (1 - DOD) * battery_capacity:
energy = energy_new
unmet_history.append(0)
waste_history.append(0)
use_history.append(use)
elif energy * (1 - discharge_rate) + p * battery_eff < battery_capacity:
energy = energy * (1 - discharge_rate) + p * battery_eff
unmet_history.append(u - p)
use_history.append(0)
waste_history.append(0)
else:
unmet_history.append(u - p)
use_history.append(0)
waste_history.append(p)
energy = energy
energy_history.append(energy)
if battery_capacity == 0:
SOC = np.array(energy_history)
else:
SOC = np.array(energy_history) / battery_capacity
return SOC, energy_history, unmet_history, waste_history, use_history
if __name__ == '__main__':
b1 = Battery(10)
b1.run([1, 1, 1], [1, 1, 1])
b1.run([1, 1, 1], [10, 10, 10])
print(b1.lost_power_supply_probability())
| tsaoyu/D3HRE | D3HRE/core/battery_models.py | Python | gpl-3.0 | 18,409 |
import re
class HeadingsParser():
"""
The HeadingParser parses the document for headings.
NOT YET: converts headings to raw latex headings in the correct way, so that they can be referrenced to later
see https://www.sharelatex.com/learn/Sections_and_chapters for info about the levels"""
def __init__(self):
super().__init__()
self.title = None
self.subtitle = None
self.heading = []
# regexes
self.title_start_marker_regex = re.compile(r'[=]{3,}')
self.title_end_marker_regex = re.compile(r'[=]{3,}')
self.title_content_regex = re.compile(
r'''
^ # beginning of line
[ ] # one whitespace
[A-Za-z0-9äöüÄÖÜ]+ # alphanumerical string, no whitespace
(?P<title>[A-Za-z0-9äöüÄÖÜ ]+) # alphanumerical string, whitespace ok
[A-Za-z0-9äöüÄÖÜ]+ # alphanumerical string, no whitespace
[ ] # one whitespace
$ # end of line
''', re.VERBOSE|re.UNICODE
)
self.subtitle_start_marker_regex = re.compile(r'[-]{3,}')
self.subtitle_end_marker_regex = re.compile(r'[-]{3,}')
self.subtitle_content_regex = re.compile(
r'''
^ # beginning of line
[ ] # one whitespace
[A-Za-z0-9äöüÄÖÜ]+ # alphanumerical string, no whitespace
(?P<subtitle>[A-Za-z0-9äöüÄÖÜ ]+) # alphanumerical string, whitespace ok
[A-Za-z0-9äöüÄÖÜ]+ # alphanumerical string, no whitespace
[ ] # one whitespace
$ # end of line
''', re.VERBOSE|re.UNICODE
)
# Headings cannot begin with whitespace
self.h_content_regex = re.compile(
r'''
^ # beginning of line
[A-Za-z0-9äöüÄÖÜß(] # alphanum
[A-Za-z0-9äöüÄÖÜß,() -]* # alphanum or space
[A-Za-z0-9äöüÄÖÜß)] # alphanum
$ # end of line
''', re.VERBOSE|re.UNICODE
)
# chapter
self.h1_underlining_regex = re.compile(r'[=]{3,}')
# section
self.h2_underlining_regex = re.compile(r'[-]{3,}')
# subsection
self.h3_underlining_regex = re.compile(r'[~]{3,}')
# subsubsection
self.h4_underlining_regex = re.compile(r'[\^]{3,}')
# paragraph
self.h5_underlining_regex = re.compile(r'[*]{3,}')
# subparagraph
self.h6_underlining_regex = re.compile(r'[.]{3,}')
def parse(self, rst_file_content):
self.title = self.find_title(rst_file_content)
self.subtitle_content_regex = self.find_subtitle(rst_file_content)
return self.find_heading_labels(rst_file_content)
def find_title(self, rst_file_content):
print('looking for title ...')
title = None
for lineno, line in enumerate(rst_file_content):
previous_line = ""
if lineno > 0:
previous_line = rst_file_content[lineno - 1]
next_line = ""
if lineno < len(rst_file_content) - 1:
next_line = rst_file_content[lineno + 1]
# title
if (
self.title_start_marker_regex.match(previous_line) and
self.title_end_marker_regex.match(next_line) and
(
len(self.title_start_marker_regex.match(previous_line).group()) ==
len(self.title_end_marker_regex.match(next_line).group())
) and
self.title_content_regex.match(line) and
not title
):
title = self.title_content_regex.match(line).group('title')
print('title is:|', title, '|', sep='')
break
if not title: print('Could not find title in document.')
return title
def find_subtitle(self, rst_file_content):
print('looking for subtitle ...')
subtitle = None
for lineno, line in enumerate(rst_file_content):
previous_line = ""
if lineno > 0:
previous_line = rst_file_content[lineno - 1]
next_line = ""
if lineno < len(rst_file_content) - 1:
next_line = rst_file_content[lineno + 1]
if (
self.subtitle_start_marker_regex.match(previous_line) and
self.subtitle_end_marker_regex.match(next_line) and
(
len(self.subtitle_start_marker_regex.match(previous_line).group()) ==
len(self.subtitle_end_marker_regex.match(next_line).group())
) and
self.subtitle_content_regex.match(line) and
not subtitle
):
subtitle = self.subtitle_content_regex.match(line).group('subtitle')
print('subtitle is:|', subtitle, '|', sep='')
break
if not subtitle: print('Could not find subtitle in document.')
return subtitle
def find_heading_labels(self, rst_file_content):
print('looking for headings ...')
headings_dict = {}
# heading_labels = []
for lineno, line in enumerate(rst_file_content):
# print('current line:', lineno)
# print('current line:', line)
# if line.startswith("Schlussfolgerungen"):
# print('current line:', line)
previous_line = ""
if lineno > 0:
previous_line = rst_file_content[lineno - 1]
next_line = ""
if lineno < len(rst_file_content) - 1:
next_line = rst_file_content[lineno + 1]
# headings level 1
# print('looking for h1 ...')
if (
(previous_line.isspace() or previous_line == '') and
self.h_content_regex.match(line) and
self.h1_underlining_regex.match(next_line) and
len(self.h_content_regex.match(line).group()) == len(self.h1_underlining_regex.match(next_line).group())
):
print('found a h1:', line)
print('replacing chapter heading')
headings_dict[line] = self.heading_to_label(line, 'chapter')
# heading_labels.append(self.heading_to_label(line, 'chapter'))
rst_file_content[lineno] = ':raw-latex:`\chapter{' + line + '}`'
rst_file_content[lineno + 1] = ':raw-latex:`\label{' + self.heading_to_label(line, 'chapter') + '}`'
# headings level 2
# print('looking for h2 ...')
if (
(previous_line.isspace() or previous_line == '') and
self.h_content_regex.match(line) and
self.h2_underlining_regex.match(next_line) and
len(self.h_content_regex.match(line).group()) == len(self.h2_underlining_regex.match(next_line).group())
):
print('found a h2:', line)
headings_dict[line] = self.heading_to_label(line, 'section')
# heading_labels.append(self.heading_to_label(line, 'section'))
rst_file_content[lineno] = ':raw-latex:`\section{' + line + '}`'
rst_file_content[lineno + 1] = ':raw-latex:`\label{' + self.heading_to_label(line, 'section') + '}`'
# headings level 3
# print('looking for h3 ...')
if (
(previous_line.isspace() or previous_line == '') and
self.h_content_regex.match(line) and
self.h3_underlining_regex.match(next_line) and
len(self.h_content_regex.match(line).group()) == len(self.h3_underlining_regex.match(next_line).group())
):
print('found a h3:', line)
# heading_labels.append(self.heading_to_label(line, 'subsection'))
headings_dict[line] = self.heading_to_label(line, 'subsection')
rst_file_content[lineno] = ':raw-latex:`\subsection{' + line + '}`'
rst_file_content[lineno + 1] = ':raw-latex:`\label{' + self.heading_to_label(line, 'subsection') + '}`'
# headings level 4
# print('looking for h4 ...')
if (
(previous_line.isspace() or previous_line == '') and
self.h_content_regex.match(line) and
self.h4_underlining_regex.match(next_line) and
len(self.h_content_regex.match(line).group()) == len(self.h4_underlining_regex.match(next_line).group())
):
print('found a h4:', line)
# heading_labels.append(self.heading_to_label(line, 'subsubsection'))
headings_dict[line] = self.heading_to_label(line, 'subsubsection')
rst_file_content[lineno] = ':raw-latex:`\subsubsection{' + line + '}`'
rst_file_content[lineno + 1] = ':raw-latex:`\label{' + self.heading_to_label(line, 'subsubsection') + '}`'
# headings level 5
# print('looking for h5 ...')
if (
(previous_line.isspace() or previous_line == '') and
self.h_content_regex.match(line) and
self.h5_underlining_regex.match(next_line) and
len(self.h_content_regex.match(line).group()) == len(self.h5_underlining_regex.match(next_line).group())
):
print('found a h5:', line)
# heading_labels.append(self.heading_to_label(line, 'paragraph'))
headings_dict[line] = self.heading_to_label(line, 'paragraph')
rst_file_content[lineno] = ':raw-latex:`\paragraph{' + line + '}`'
rst_file_content[lineno + 1] = ':raw-latex:`\label{' + self.heading_to_label(line, 'paragraph') + '}`'
# headings level 6
# print('looking for h6 ...')
if (
(previous_line.isspace() or previous_line == '') and
self.h_content_regex.match(line) and
self.h6_underlining_regex.match(next_line) and
len(self.h_content_regex.match(line).group()) == len(self.h6_underlining_regex.match(next_line).group())
):
print('found a h6:', line)
# heading_labels.append(self.heading_to_label(line, 'subparagraph'))
headings_dict[line] = self.heading_to_label(line, 'subparagraph')
rst_file_content[lineno] = ':raw-latex:`\subparagraph{' + line + '}`'
rst_file_content[lineno + 1] = ':raw-latex:`\label{' + self.heading_to_label(line, 'subparagraph') + '}`'
return headings_dict
def heading_to_label(self, heading_text, level):
heading_text = heading_text.lower()
replaced_chars = {
' ': '-',
'(': '',
')': ''
}
for key,value in replaced_chars.items():
heading_text = heading_text.replace(key, value)
return '{0}:{1}'.format(level, heading_text)
# self.chapter_delimiter_regex = re.compile(r'={3,}') # =============
# self.section_delimiter_regex = re.compile(r'-{3,}') # -------------
# self.subsection_delimiter_regex = re.compile(r'~{3,}') # ~~~~~~~~~~~~~
# self.subsubsection_delimiter_regex = re.compile(r'\^{3,}') # ^^^^^^^^^^^^^
# self.heading_text_regex = re.compile(
# r'''
# ^
# \s*
# (?P<title_text>
# [a-zA-Z0-9]
# [a-zA-Z0-9_ -]*
# [a-zA-Z0-9]
# )
# \s*
# $''',
# re.VERBOSE)
# self.heading_keys = []
# def parse_headings(self, rst_file_content):
# for lineno, line in enumerate(rst_file_content):
#
# # search for title
# if self.title_delimiter_regex.search(line) is not None:
# if (lineno >= 2):
# if (
# self.title_delimiter_regex.search(rst_file_content[lineno - 2]) is not None and
# self.heading_text_regex.search(rst_file_content[lineno - 1]) is not None
# ):
# title_text = self.heading_text_regex.findall(rst_file_content[lineno - 1])[0].strip()
# self.heading_keys.append(re.sub('\s+', '-', title_text.lower()))
# print('[DEBUG:HEADINGS]', self.heading_keys)
# print('[DEBUG:HEADINGS] !!! found a title in the document:', title_text, sep='')
#
# # TODO: elif subtitle
| ZelphirKaltstahl/rst-internal-links-to-raw-latex | RSTInternalLinks/HeadingsParser.py | Python | gpl-3.0 | 13,092 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <[email protected]> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
'''
Provides schema and insert queries for the practitioner table
information about the practitioners (dentists hygienists etc..)
'''
from lib_openmolar.common.db_orm import InsertableRecord
TABLENAME = "practitioners"
class DemoGenerator(object):
def __init__(self, database=None):
self.length = 4
self.record = InsertableRecord(database, TABLENAME)
self.record.remove(self.record.indexOf("time_stamp"))
def demo_queries(self):
'''
return a list of queries to populate a demo database
'''
## practitioner 1
self.record.setValue('user_id', 1)
self.record.setValue('type',"dentist")
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
self.record.clearValues()
## practitioner 2
self.record.setValue('user_id', 2)
self.record.setValue('type',"dentist")
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
self.record.clearValues()
## practitioner 3
self.record.setValue('user_id', 3)
self.record.setValue('type',"dentist")
self.record.setValue('speciality', 'Orthodontics')
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
self.record.clearValues()
## practitioner 4
self.record.setValue('user_id', 4)
self.record.setValue('type',"hygienist")
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
if __name__ == "__main__":
from lib_openmolar.admin.connect import DemoAdminConnection
sc = DemoAdminConnection()
sc.connect()
builder = DemoGenerator(sc)
print builder.demo_queries()
| rowinggolfer/openmolar2 | src/lib_openmolar/admin/db_orm/admin_practitioners.py | Python | gpl-3.0 | 3,483 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
import os
import traceback
from pysollib.mygettext import _
from pysollib.settings import TITLE
from pysollib.settings import VERSION
from pysollib.settings import TOOLKIT, USE_TILE
from pysollib.settings import DEBUG
from pysollib.mfxutil import print_err
if TOOLKIT == 'tk':
if USE_TILE:
from pysollib.tile import ttk
def init_tile(app, top):
# load available themes
d = os.path.join(app.dataloader.dir, 'themes')
if os.path.isdir(d):
top.tk.eval('global auto_path; lappend auto_path {%s}' % d)
for t in os.listdir(d):
if os.path.exists(os.path.join(d, t, 'pkgIndex.tcl')):
try:
top.tk.eval('package require ttk::theme::'+t)
# print 'load theme:', t
except Exception:
traceback.print_exc()
pass
def set_theme(app, top, theme):
# set theme
style = ttk.Style(top)
try:
style.theme_use(theme)
except Exception:
print_err(_('invalid theme name: ') + theme)
style.theme_use(app.opt.default_tile_theme)
def get_font_name(font):
# create font name
# i.e. "helvetica 12" -> ("helvetica", 12, "roman", "normal")
if (TOOLKIT == 'kivy'):
return "helvetica 12"
from six.moves.tkinter_font import Font
font_name = None
try:
f = Font(font=font)
except Exception:
print_err(_('invalid font name: ') + font)
if DEBUG:
traceback.print_exc()
else:
fa = f.actual()
font_name = (fa['family'],
fa['size'],
fa['slant'],
fa['weight'])
return font_name
def base_init_root_window(root, app):
# root.wm_group(root)
root.wm_title(TITLE + ' ' + VERSION)
root.wm_iconname(TITLE + ' ' + VERSION)
# set minsize
sw, sh = (root.winfo_screenwidth(), root.winfo_screenheight())
if sw < 640 or sh < 480:
root.wm_minsize(400, 300)
else:
root.wm_minsize(520, 360)
if TOOLKIT == 'gtk':
pass
if TOOLKIT == 'kivy':
pass
elif USE_TILE:
theme = app.opt.tile_theme
init_tile(app, root)
set_theme(app, root, theme)
else:
pass
class BaseTkSettings:
canvas_padding = (0, 0)
horizontal_toolbar_padding = (0, 0)
vertical_toolbar_padding = (0, 1)
toolbar_button_padding = (2, 2)
toolbar_label_padding = (4, 4)
if USE_TILE:
toolbar_relief = 'flat'
toolbar_borderwidth = 0
else:
toolbar_relief = 'raised'
toolbar_button_relief = 'flat'
toolbar_separator_relief = 'sunken'
toolbar_borderwidth = 1
toolbar_button_borderwidth = 1
| jimsize/PySolFC | pysollib/winsystems/common.py | Python | gpl-3.0 | 3,751 |
#! /usr/bin/env python
import sys
g = {}
n = {}
for line in sys.stdin:
(n1, n2, p, q, t, tg, x) = line.strip().split(' ')
t = int(t)
x = float(x)
key = ' '.join((n1,n2,p,q))
if not key in n:
n[key] = 0
g[key] = 0
n[key] += t
g[key] += x*t
for key in n:
print key, n[key], g[key]/n[key]
| vbeffara/Simulations | tools/massage-box.py | Python | gpl-3.0 | 341 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import redirect
from django.contrib.auth.views import logout as original_logout
from loginas import settings as la_settings
from loginas.utils import restore_original_login
def logout(request, next_page=None, template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME, extra_context=None):
"""
This can replace your default logout view. In you settings, do:
from django.core.urlresolvers import reverse_lazy
LOGOUT_URL = reverse_lazy('logout')
"""
original_session = request.session.get(la_settings.USER_SESSION_FLAG)
if original_session:
restore_original_login(request)
return redirect(la_settings.LOGOUT_REDIRECT)
else:
return original_logout(request, next_page, template_name, redirect_field_name, extra_context)
| CroceRossaItaliana/jorvik | autenticazione/viste.py | Python | gpl-3.0 | 986 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# list_files.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import traceback
import tempfile
import weka.core.jvm as jvm
from weka.flow.control import Flow
from weka.flow.source import ListFiles
from weka.flow.sink import Console
def main():
"""
Just runs some example code.
"""
# setup the flow
flow = Flow(name="list files")
# flow.print_help()
listfiles = ListFiles()
listfiles.config["dir"] = str(tempfile.gettempdir())
listfiles.config["list_files"] = True
listfiles.config["list_dirs"] = False
listfiles.config["recursive"] = False
listfiles.config["regexp"] = ".*r.*"
# listfiles.print_help()
flow.actors.append(listfiles)
console = Console()
console.config["prefix"] = "Match: "
# console.print_help()
flow.actors.append(console)
# run the flow
msg = flow.setup()
if msg is None:
print("\n" + flow.tree + "\n")
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
if __name__ == "__main__":
try:
jvm.start()
main()
except Exception, e:
print(traceback.format_exc())
finally:
jvm.stop()
| fracpete/python-weka-wrapper-examples | src/wekaexamples/flow/list_file.py | Python | gpl-3.0 | 1,948 |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class SeriesActors(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SeriesActors - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[SeriesActorsData]'
}
self.attribute_map = {
'data': 'data'
}
self._data = None
@property
def data(self):
"""
Gets the data of this SeriesActors.
:return: The data of this SeriesActors.
:rtype: list[SeriesActorsData]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this SeriesActors.
:param data: The data of this SeriesActors.
:type: list[SeriesActorsData]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| FireBladeNooT/Medusa_1_6 | lib/tvdbapiv2/models/series_actors.py | Python | gpl-3.0 | 3,002 |
#!/usr/bin/python3
import os
import sys
import subprocess
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lutris.util.wineregistry import WineRegistry
PREFIXES_PATH = os.path.expanduser("~/Games/wine/prefixes")
def get_registries():
registries = []
directories = os.listdir(PREFIXES_PATH)
directories.append(os.path.expanduser("~/.wine"))
for prefix in directories:
for path in os.listdir(os.path.join(PREFIXES_PATH, prefix)):
if path.endswith(".reg"):
registries.append(os.path.join(PREFIXES_PATH, prefix, path))
return registries
def check_registry(registry_path):
with open(registry_path, 'r') as registry_file:
original_content = registry_file.read()
try:
registry = WineRegistry(registry_path)
except:
sys.stderr.write("Error parsing {}\n".format(registry_path))
raise
content = registry.render()
if content != original_content:
wrong_path = os.path.join(os.path.dirname(__file__), 'error.reg')
with open(wrong_path, 'w') as wrong_reg:
wrong_reg.write(content)
print("Content of parsed registry doesn't match: {}".format(registry_path))
subprocess.call(["meld", registry_path, wrong_path])
sys.exit(2)
registries = get_registries()
for registry in registries:
check_registry(registry)
print("All {} registry files validated!".format(len(registries)))
| RobLoach/lutris | tests/check_prefixes.py | Python | gpl-3.0 | 1,465 |
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'okru',
'netutv',
'rapidvideo'
]
list_quality = ['default']
host = "http://www.anitoonstv.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Anime", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
if 'Novedades' in item.title:
patron_cat = '<div class="activos"><h3>(.+?)<\/h2><\/a><\/div>'
patron = '<a href="(.+?)"><h2><span>(.+?)<\/span>'
else:
patron_cat = '<li><a href=.+?>'
patron_cat += str(item.title)
patron_cat += '<\/a><div>(.+?)<\/div><\/li>'
patron = "<a href='(.+?)'>(.+?)<\/a>"
data = scrapertools.find_single_match(data, patron_cat)
matches = scrapertools.find_multiple_matches(data, patron)
for link, name in matches:
if "Novedades" in item.title:
url = link
title = name.capitalize()
else:
url = host + link
title = name
if ":" in title:
cad = title.split(":")
show = cad[0]
else:
if "(" in title:
cad = title.split("(")
if "Super" in title:
show = cad[1]
show = show.replace(")", "")
else:
show = cad[0]
else:
show = title
if "&" in show:
cad = title.split("xy")
show = cad[0]
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=show, action="episodios", show=show,
context=context1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)
show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")
scrapedplot = scrapertools.find_single_match(data, '<span>Descripcion.+?<\/span>(.+?)<br>')
i = 0
temp = 0
for link, cap, name in matches:
if int(cap) == 1:
temp = temp + 1
if int(cap) < 10:
cap = "0" + cap
season = temp
episode = int(cap)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# title = str(temp)+"x"+cap+" "+name
url = host + "/" + link
if "NO DISPONIBLE" not in name:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail,
plot=scrapedplot, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
data_vid = scrapertools.find_single_match(data1, '<div class="videos">(.+?)<\/div><div .+?>')
# name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>')
scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>')
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ")
server = server.lower().strip()
if "ok" == server:
server = 'okru'
if "netu" == server:
continue
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist
| pitunti/alfaPitunti | plugin.video.alfa/channels/anitoonstv.py | Python | gpl-3.0 | 6,699 |
#!/usr/bin/env python3
import argparse
import logging
import string
# Quiet scapy
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy import volatile # noqa: E402
from scapy import sendrecv # noqa: E402
from scapy import config # noqa: E402
from scapy.layers import l2 # noqa: E402
from scapy.layers import inet # noqa: E402
from scapy.layers import dhcp # noqa: E402
# Configuration requires these imports to properly initialize
from scapy import route # noqa: E402, F401
from scapy import route6 # noqa: E402, F401
def dhcp_flood(**kwargs):
iface = kwargs["interface"]
count = kwargs["count"]
unique_hexdigits = str.encode("".join(set(string.hexdigits.lower())))
packet = (
l2.Ether(dst="ff:ff:ff:ff:ff:ff") /
inet.IP(src="0.0.0.0", dst="255.255.255.255") /
inet.UDP(sport=68, dport=67) /
dhcp.BOOTP(chaddr=volatile.RandString(12, unique_hexdigits)) /
dhcp.DHCP(options=[("message-type", "discover"), "end"])
)
sendrecv.sendp(
packet,
iface=iface,
count=count
)
def print_dhcp_response(response):
print("Source: {}".format(response[l2.Ether].src))
print("Destination: {}".format(response[l2.Ether].dst))
for option in response[dhcp.DHCP].options:
if isinstance(option, tuple):
option, *values = option
else:
# For some reason some options are strings instead of tuples
option, *values = option, None
if option in ["end", "pad"]:
break
output = "Option: {} -> {}".format(option, values)
if option == "message-type" and len(values) == 1:
dhcp_type = dhcp.DHCPTypes.get(values[0], "unknown")
output = "{} ({})".format(output, dhcp_type)
print(output)
def dhcp_sniff(**kwargs):
sendrecv.sniff(filter="udp and (port 67 or 68)", prn=print_dhcp_response)
def parse_args():
p = argparse.ArgumentParser(description='''
All your IPs are belong to us.
''', formatter_class=argparse.RawTextHelpFormatter)
p.add_argument(
'-i',
'--interface',
action='store',
default=config.conf.iface,
help='network interface to use'
)
subparsers = p.add_subparsers(dest='command')
subparsers.required = True
flood = subparsers.add_parser('flood')
flood.add_argument(
'-c',
'--count',
action='store',
default=10,
type=int,
help='number of addresses to consume'
)
subparsers.add_parser('sniff')
args = p.parse_args()
return args
def main():
args = parse_args()
dispatch = {
"flood": dhcp_flood,
"sniff": dhcp_sniff,
}
dispatch[args.command](**vars(args))
if __name__ == "__main__":
main()
| mschwager/dhcpwn | dhcpwn.py | Python | gpl-3.0 | 2,829 |
# -*- coding: utf-8 -*-
import os
import pickle
import random
import time
import urllib
try:
import xbmc, xbmcgui
except:
pass
from platformcode import config, logger
LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default='')
from servers import torrent as torr
lt, e, e1, e2 = torr.import_libtorrent(LIBTORRENT_PATH)
from cache import Cache
from dispatcher import Dispatcher
from file import File
from handler import Handler
from monitor import Monitor
from resume_data import ResumeData
from server import Server
try:
BUFFER = int(config.get_setting("bt_buffer", server="torrent", default="50"))
except:
BUFFER = 50
config.set_setting("bt_buffer", "50", server="torrent")
DOWNLOAD_PATH = config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath"))
BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True)
RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True)
msg_header = 'Alfa BT Cliente Torrent'
class Client(object):
INITIAL_TRACKERS = ['udp://tracker.openbittorrent.com:80',
'udp://tracker.istole.it:80',
'udp://open.demonii.com:80',
'udp://tracker.coppersurfer.tk:80',
'udp://tracker.leechers-paradise.org:6969',
'udp://exodus.desync.com:6969',
'udp://tracker.publicbt.com:80',
'http://tracker.torrentbay.to:6969/announce',
'http://tracker.pow7.com/announce',
'udp://tracker.ccc.de:80/announce',
'udp://open.demonii.com:1337',
'http://9.rarbg.com:2710/announce',
'http://bt.careland.com.cn:6969/announce',
'http://explodie.org:6969/announce',
'http://mgtracker.org:2710/announce',
'http://tracker.best-torrents.net:6969/announce',
'http://tracker.tfile.me/announce',
'http://tracker1.wasabii.com.tw:6969/announce',
'udp://9.rarbg.com:2710/announce',
'udp://9.rarbg.me:2710/announce',
'udp://coppersurfer.tk:6969/announce',
'http://www.spanishtracker.com:2710/announce',
'http://www.todotorrents.com:2710/announce'
] ### Added some trackers from MCT
VIDEO_EXTS = {'.avi': 'video/x-msvideo', '.mp4': 'video/mp4', '.mkv': 'video/x-matroska',
'.m4v': 'video/mp4', '.mov': 'video/quicktime', '.mpg': 'video/mpeg', '.ogv': 'video/ogg',
'.ogg': 'video/ogg', '.webm': 'video/webm', '.ts': 'video/mp2t', '.3gp': 'video/3gpp',
'.rar': 'video/unrar'}
def __init__(self, url=None, port=None, ip=None, auto_shutdown=True, wait_time=20, timeout=5, auto_delete=True,
temp_path=None, is_playing_fnc=None, print_status=False):
# server
if port:
self.port = port
else:
self.port = random.randint(8000, 8099)
if ip:
self.ip = ip
else:
self.ip = "127.0.0.1"
self.server = Server((self.ip, self.port), Handler, client=self)
# Options
if temp_path:
self.temp_path = temp_path
else:
self.temp_path = DOWNLOAD_PATH
self.is_playing_fnc = is_playing_fnc
self.timeout = timeout
self.auto_delete = auto_delete
self.wait_time = wait_time
self.auto_shutdown = auto_shutdown
self.buffer_size = BUFFER
self.first_pieces_priorize = BUFFER
self.last_pieces_priorize = 5
self.state_file = "state"
try:
self.torrent_paramss = {'save_path': self.temp_path, 'storage_mode': lt.storage_mode_t.storage_mode_allocate}
except Exception, e:
try:
do = xbmcgui.Dialog()
e = e1 or e2
do.ok('ERROR en el cliente BT Libtorrent', 'Módulo no encontrado o imcompatible con el dispositivo.',
'Reporte el fallo adjuntando un "log".', str(e))
except:
pass
return
# State
self.has_meta = False
self.meta = None
self.start_time = None
self.last_connect = 0
self.connected = False
self.closed = False
self.file = None
self.files = None
self._th = None
self.seleccion = 0
self.index = 0
# Sesion
self._cache = Cache(self.temp_path)
self._ses = lt.session()
#self._ses.listen_on(0, 0) ### ALFA: it blocks repro of some .torrents
# Cargamos el archivo de estado (si existe)
""" ### ALFA: it blocks repro of some .torrents
if os.path.exists(os.path.join(self.temp_path, self.state_file)):
try:
f = open(os.path.join(self.temp_path, self.state_file), "rb")
state = pickle.load(f)
self._ses.load_state(state)
f.close()
except:
pass
"""
self._start_services()
# Monitor & Dispatcher
self._monitor = Monitor(self)
if print_status:
self._monitor.add_listener(self.print_status)
self._monitor.add_listener(self._check_meta)
self._monitor.add_listener(self.save_state)
self._monitor.add_listener(self.priorize_start_file)
self._monitor.add_listener(self.announce_torrent)
if self.auto_shutdown:
self._monitor.add_listener(self._auto_shutdown)
self._dispatcher = Dispatcher(self)
self._dispatcher.add_listener(self._update_ready_pieces)
# Iniciamos la URL
if url:
self.start_url(url)
def set_speed_limits(self, download=0, upload=0):
"""
Función encargada de poner límites a la velocidad de descarga o subida
"""
if isinstance(download, int) and download > 0:
self._th.set_download_limit(download * 1024)
if isinstance(upload, int) and download > 0:
self._th.set_upload_limit(upload * 1024)
def get_play_list(self):
"""
Función encargada de generar el playlist
"""
# Esperamos a lo metadatos
while not self.has_meta:
time.sleep(1)
# Comprobamos que haya archivos de video
if self.files:
if len(self.files) > 1:
return "http://" + self.ip + ":" + str(self.port) + "/playlist.pls"
else:
return "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(self.files[0].path)
def get_files(self):
"""
Función encargada de genera el listado de archivos
"""
# Esperamos a lo metadatos
while not self.has_meta:
time.sleep(1)
files = []
# Comprobamos que haya archivos de video
if self.files:
# Creamos el dict con los archivos
for file in self.files:
n = file.path
u = "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(n)
s = file.size
files.append({"name": n, "url": u, "size": s})
return files
def _find_files(self, files, search=None):
"""
Función encargada de buscar los archivos reproducibles del torrent
"""
self.total_size = 0
# Obtenemos los archivos que la extension este en la lista
videos = filter(lambda f: self.VIDEO_EXTS.has_key(os.path.splitext(f.path)[1]), files)
if not videos:
raise Exception('No video files in torrent')
for v in videos:
self.total_size += v.size ### ALFA
videos[videos.index(v)].index = files.index(v)
return videos
def set_file(self, f):
"""
Función encargada de seleccionar el archivo que vamos a servir y por tanto, priorizar su descarga
"""
# Seleccionamos el archivo que vamos a servir
fmap = self.meta.map_file(f.index, 0, 1)
self.file = File(f.path, self.temp_path, f.index, f.size, fmap, self.meta.piece_length(), self)
if self.seleccion < 0: ### ALFA
self.file.first_piece = 0 ### ALFA
self.file.last_piece = self.meta.num_pieces() ### ALFA
self.file.size = self.total_size ### ALFA
self.prioritize_file()
def prioritize_piece(self, pc, idx):
"""
Función encargada de priorizar una determinada pieza
"""
piece_duration = 1000
min_deadline = 2000
dl = idx * piece_duration + min_deadline
""" ### ALFA
try:
self._th.set_piece_deadline(pc, dl, lt.deadline_flags.alert_when_available)
except:
pass
"""
if idx == 0:
tail_pieces = 9
# Piezas anteriores a la primera se desactivan
if (self.file.last_piece - pc) > tail_pieces:
for i in xrange(self.file.first_piece, pc):
self._th.piece_priority(i, 0)
self._th.reset_piece_deadline(i)
# Piezas siguientes a la primera se activan
for i in xrange(pc + 1, self.file.last_piece + 1):
#self._th.piece_priority(i, 0)
self._th.piece_priority(i, 1)
def prioritize_file(self):
"""
Función encargada de priorizar las piezas correspondientes al archivo seleccionado en la funcion set_file()
"""
priorities = []
for i in xrange(self.meta.num_pieces()):
if i >= self.file.first_piece and i <= self.file.last_piece:
priorities.append(1)
else:
if self.index < 0:
priorities.append(1) ### ALFA
else:
priorities.append(0) ### ALFA
self._th.prioritize_pieces(priorities)
x = 0
for i, _set in enumerate(self._th.piece_priorities()):
if _set > 0: x += 1
#logger.info("***** Nº Pieza: %s: %s" % (i, str(_set)))
logger.info("***** Piezas %s : Activas: %s" % (str(i+1), str(x)))
logger.info("***** first_piece %s : last_piece: %s" % (str(self.file.first_piece), str(self.file.last_piece)))
def download_torrent(self, url):
"""
Función encargada de descargar un archivo .torrent
"""
from core import httptools
data = httptools.downloadpage(url).data
return data
def start_url(self, uri):
"""
Función encargada iniciar la descarga del torrent desde la url, permite:
- Url apuntando a un .torrent
- Url magnet
- Archivo .torrent local
"""
if self._th:
raise Exception('Torrent is already started')
if uri.startswith('http://') or uri.startswith('https://'):
torrent_data = self.download_torrent(uri)
info = lt.torrent_info(lt.bdecode(torrent_data))
tp = {'ti': info}
resume_data = self._cache.get_resume(info_hash=str(info.info_hash()))
if resume_data:
tp['resume_data'] = resume_data
elif uri.startswith('magnet:'):
tp = {'url': uri}
resume_data = self._cache.get_resume(info_hash=Cache.hash_from_magnet(uri))
if resume_data:
tp['resume_data'] = resume_data
elif os.path.isfile(uri):
if os.access(uri, os.R_OK):
info = lt.torrent_info(uri)
tp = {'ti': info}
resume_data = self._cache.get_resume(info_hash=str(info.info_hash()))
if resume_data:
tp['resume_data'] = resume_data
else:
raise ValueError('Invalid torrent path %s' % uri)
else:
raise ValueError("Invalid torrent %s" % uri)
tp.update(self.torrent_paramss)
self._th = self._ses.add_torrent(tp)
for tr in self.INITIAL_TRACKERS:
self._th.add_tracker({'url': tr})
self._th.set_sequential_download(True)
self._th.force_reannounce()
self._th.force_dht_announce()
self._monitor.start()
self._dispatcher.do_start(self._th, self._ses)
self.server.run()
def stop(self):
"""
Función encargada de de detener el torrent y salir
"""
self._dispatcher.stop()
self._dispatcher.join()
self._monitor.stop()
self.server.stop()
self._dispatcher.stop()
if self._ses:
self._ses.pause()
if self._th:
self.save_resume()
self.save_state()
self._stop_services()
self._ses.remove_torrent(self._th, self.auto_delete)
del self._ses
self.closed = True
def pause(self):
"""
Función encargada de de pausar el torrent
"""
self._ses.pause()
def _start_services(self):
"""
Función encargada de iniciar los servicios de libtorrent: dht, lsd, upnp, natpnp
"""
self._ses.add_dht_router("router.bittorrent.com", 6881)
self._ses.add_dht_router("router.bitcomet.com", 554)
self._ses.add_dht_router("router.utorrent.com", 6881)
self._ses.add_dht_router("dht.transmissionbt.com",6881) ### from MCT
self._ses.start_dht()
self._ses.start_lsd()
self._ses.start_upnp()
self._ses.start_natpmp()
def _stop_services(self):
"""
Función encargada de detener los servicios de libtorrent: dht, lsd, upnp, natpnp
"""
self._ses.stop_natpmp()
self._ses.stop_upnp()
self._ses.stop_lsd()
self._ses.stop_dht()
def save_resume(self):
"""
Función encargada guardar los metadatos para continuar una descarga mas rapidamente
"""
if self._th.need_save_resume_data() and self._th.is_valid() and self.meta:
r = ResumeData(self)
start = time.time()
while (time.time() - start) <= 5:
if r.data or r.failed:
break
time.sleep(0.1)
if r.data:
self._cache.save_resume(self.unique_file_id, lt.bencode(r.data))
@property
def status(self):
"""
Función encargada de devolver el estado del torrent
"""
if self._th:
s = self._th.status()
# Download Rate
s._download_rate = s.download_rate / 1024
# Progreso del archivo
if self.file:
pieces = s.pieces[self.file.first_piece:self.file.last_piece] ### ALFA
progress = float(sum(pieces)) / len(pieces)
s.pieces_len = len(pieces) ### ALFA
s.pieces_sum = sum(pieces) ### ALFA
#logger.info('***** Estado piezas: %s' % pieces)
else:
progress = 0
s.pieces_len = 0 ### ALFA
s.pieces_sum = 0 ### ALFA
s.progress_file = progress * 100
# Tamaño del archivo
s.file_name = '' ### ALFA
s.seleccion = '' ### ALFA
if self.file:
s.seleccion = self.seleccion ### ALFA
s.file_name = self.file.path ### ALFA
s.file_size = self.file.size / 1048576.0
else:
s.file_size = 0
# Estado del buffer
if self.file and self.file.cursor: # Con una conexion activa: Disponible vs Posicion del reproductor
percent = len(self.file.cursor.cache)
percent = percent * 100 / self.buffer_size
s.buffer = int(percent)
elif self.file: # Sin una conexion activa: Pre-buffer antes de iniciar
# El Pre-buffer consta de dos partes_
# 1. Buffer al inicio del archivo para que el reproductor empieze sin cortes
# 2. Buffer al final del archivo (en algunos archivos el reproductor mira el final del archivo antes de comenzar)
bp = []
# El tamaño del buffer de inicio es el tamaño del buffer menos el tamaño del buffer del final
first_pieces_priorize = self.buffer_size - self.last_pieces_priorize
# Comprobamos qué partes del buffer del inicio estan disponibles
for x in range(first_pieces_priorize):
if self._th.have_piece(self.file.first_piece + x):
bp.append(True)
else:
bp.append(False)
# Comprobamos qué partes del buffer del final estan disponibles
for x in range(self.last_pieces_priorize):
if self._th.have_piece(self.file.last_piece - x):
bp.append(True)
else:
bp.append(False)
s.buffer = int(sum(bp) * 100 / self.buffer_size)
else: # Si no hay ningun archivo seleccionado: No hay buffer
s.buffer = 0
# Tiempo restante para cerrar en caso de tener el timeout activo
if self.auto_shutdown:
if self.connected:
if self.timeout:
s.timeout = int(self.timeout - (time.time() - self.last_connect - 1))
if self.file and self.file.cursor:
s.timeout = self.timeout
if s.timeout < 0: s.timeout = "Cerrando"
else:
s.timeout = "---"
else:
if self.start_time and self.wait_time:
s.timeout = int(self.wait_time - (time.time() - self.start_time - 1))
if s.timeout < 0: s.timeout = "Cerrando"
else:
s.timeout = "---"
else:
s.timeout = "Off"
# Estado de la descarga
STATE_STR = ['En cola', 'Comprobando', 'Descargando metadata', \
'Descargando', 'Finalizado', 'Seeding', 'Allocating', 'Comprobando fastresume']
s.str_state = STATE_STR[s.state]
# Estado DHT
if self._ses.dht_state() is not None:
s.dht_state = "On"
s.dht_nodes = self._ses.status().dht_nodes
else:
s.dht_state = "Off"
s.dht_nodes = 0
# Cantidad de Trackers
s.trackers = len(self._th.trackers())
# Origen de los peers
s.dht_peers = 0
s.trk_peers = 0
s.pex_peers = 0
s.lsd_peers = 0
for peer in self._th.get_peer_info():
if peer.source & 1:
s.trk_peers += 1
if peer.source & 2:
s.dht_peers += 1
if peer.source & 4:
s.pex_peers += 1
if peer.source & 8:
s.lsd_peers += 1
return s
"""
Servicios:
- Estas funciones se ejecutan de forma automatica cada x tiempo en otro Thread.
- Estas funciones son ejecutadas mientras el torrent esta activo algunas pueden desactivarse
segun la configuracion como por ejemplo la escritura en el log
"""
def _auto_shutdown(self, *args, **kwargs):
"""
Servicio encargado de autoapagar el servidor
"""
if self.file and self.file.cursor:
self.last_connect = time.time()
self.connected = True
if self.is_playing_fnc and self.is_playing_fnc():
self.last_connect = time.time()
self.connected = True
if self.auto_shutdown:
# shudown por haber cerrado el reproductor
if self.connected and self.is_playing_fnc and not self.is_playing_fnc():
if time.time() - self.last_connect - 1 > self.timeout:
self.stop()
# shutdown por no realizar ninguna conexion
if (not self.file or not self.file.cursor) and self.start_time and self.wait_time and not self.connected:
if time.time() - self.start_time - 1 > self.wait_time:
self.stop()
# shutdown tras la ultima conexion
if (not self.file or not self.file.cursor) and self.timeout and self.connected and not self.is_playing_fnc:
if time.time() - self.last_connect - 1 > self.timeout:
self.stop()
def announce_torrent(self):
"""
Servicio encargado de anunciar el torrent
"""
self._th.force_reannounce()
self._th.force_dht_announce()
def save_state(self):
"""
Servicio encargado de guardar el estado
"""
state = self._ses.save_state()
f = open(os.path.join(self.temp_path, self.state_file), 'wb')
pickle.dump(state, f)
f.close()
def _update_ready_pieces(self, alert_type, alert):
"""
Servicio encargado de informar que hay una pieza disponible
"""
if alert_type == 'read_piece_alert' and self.file:
self.file.update_piece(alert.piece, alert.buffer)
def _check_meta(self):
"""
Servicio encargado de comprobar si los metadatos se han descargado
"""
if self.status.state >= 3 and self.status.state <= 5 and not self.has_meta:
# Guardamos los metadatos
self.meta = self._th.get_torrent_info()
# Obtenemos la lista de archivos del meta
fs = self.meta.files()
if isinstance(fs, list):
files = fs
else:
files = [fs.at(i) for i in xrange(fs.num_files())]
# Guardamos la lista de archivos
self.files = self._find_files(files)
# Si hay varios vídeos (no RAR), se selecciona el vídeo o "todos"
lista = []
seleccion = 0
for file in self.files:
if '.rar' in str(file.path):
seleccion = -9
lista += [os.path.split(str(file.path))[1]]
if len(lista) > 1 and seleccion >= 0:
d = xbmcgui.Dialog()
seleccion = d.select(msg_header + ": Selecciona el vídeo, o 'Cancelar' para todos", lista)
if seleccion < 0:
index = 0
self.index = seleccion
else:
index = seleccion
self.index = self.files[index].index
self.seleccion = seleccion
# Marcamos el primer archivo como activo
self.set_file(self.files[index])
# Damos por iniciada la descarga
self.start_time = time.time()
# Guardamos el .torrent en el cache
self._cache.file_complete(self._th.get_torrent_info())
self.has_meta = True
def priorize_start_file(self):
'''
Servicio encargado de priorizar el principio y final de archivo cuando no hay conexion
'''
if self.file and not self.file.cursor:
num_start_pieces = self.buffer_size - self.last_pieces_priorize # Cantidad de piezas a priorizar al inicio
num_end_pieces = self.last_pieces_priorize # Cantidad de piezas a priorizar al final
pieces_count = 0
# Priorizamos las ultimas piezas
for y in range(self.file.last_piece - num_end_pieces, self.file.last_piece + 1):
if not self._th.have_piece(y):
self.prioritize_piece(y, pieces_count)
pieces_count += 1
# Priorizamos las primeras piezas
for y in range(self.file.first_piece, self.file.last_piece + 1):
if not self._th.have_piece(y):
if pieces_count == self.buffer_size:
break
self.prioritize_piece(y, pieces_count)
pieces_count += 1
def print_status(self):
'''
Servicio encargado de mostrar en el log el estado de la descarga
'''
s = self.status ### ALFA
if self.seleccion >= 0:
archivo = self.seleccion + 1
else:
archivo = self.seleccion
logger.info(
'%.2f%% de %.1fMB %s | %.1f kB/s | #%s %d%% | AutoClose: %s | S: %d(%d) P: %d(%d)) | TRK: %d DHT: %d PEX: %d LSD %d | DHT:%s (%d) | Trakers: %d | Pieces: %d (%d)' % \
(s.progress_file, s.file_size, s.str_state, s._download_rate, archivo, s.buffer, s.timeout, s.num_seeds, \
s.num_complete, s.num_peers, s.num_incomplete, s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers,
s.dht_state, s.dht_nodes, s.trackers, s.pieces_sum, s.pieces_len)) ### ALFA
| alfa-jor/addon | plugin.video.alfa/lib/btserver/client.py | Python | gpl-3.0 | 26,890 |
# Copyright (C) 2012 Aaron Krebs [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from django.views.generic.simple import direct_to_template
from django.contrib.auth import views as auth_views
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from registration.views import register
urlpatterns = patterns('',
# urls for simple one-step registration
url(r'^register/$',
register,
{'backend': 'registration.backends.simple.SimpleBackend',
'template_name': 'registration/registration_form.hamlpy',
},
name='registration_register'
),
url(r'^register/closed/$',
direct_to_template,
{'template': 'registration/registration_closed.hamlpy'},
name='registration_disallowed'
),
url(r'^login/$',
auth_views.login,
{'template_name': 'registration/login.hamlpy'},
name='auth_login'
),
url(r'^logout/$',
auth_views.logout,
{'template_name': 'registration/logout.hamlpy'},
name='auth_logout'
),
url(r'^password/change/$',
auth_views.password_change,
{'template_name': 'registration/password_change_form.hamlpy',
# ugh, this is tied to the namespace; needs to be namespace-agnostic
# since the namspace is determined by the importing app
# TODO: see Issue #1
'post_change_redirect': reverse_lazy('registration:auth_password_change_done')
},
name='auth_password_change'
),
url(r'^password/change/done/$',
auth_views.password_change_done,
{'template_name': 'registration/password_change_done.hamlpy'},
name='auth_password_change_done'
),
url(r'^password/reset/$',
auth_views.password_reset,
{'template_name': 'registration/password_reset_form.hamlpy',
# same issue as above
'post_reset_redirect': reverse_lazy('registration:auth_password_reset_done'),
'email_template_name': 'registration/password_reset_email.hamlpy',
'subject_template_name': 'registration/password_reset_subject.hamlpy',
},
name='auth_password_reset'
),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
{'template_name': 'registration/password_reset_confirm.hamlpy',
# same issue as above
'post_reset_redirect': reverse_lazy('registration:auth_password_reset_complete'),
},
name='auth_password_reset_confirm'
),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
{'template_name': 'registration/password_reset_complete.hamlpy'},
name='auth_password_reset_complete'
),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
{'template_name': 'registration/password_reset_done.hamlpy'},
name='auth_password_reset_done'
),
)
| a-krebs/finances | finances/django_registration/urls.py | Python | gpl-3.0 | 3,598 |
############################################
# [config.py]
# CONFIGURATION SETTINGS FOR A PARTICULAR METER
#
#
# Set the long-form name of this meter
name = "*PEAK only"
#
# [Do not remove or uncomment the following line]
Cs={}
############################################
############################################
# STRUCTURE PARAMETERS
#
# Parameters subject to conscious control by the poet. Kiparsky & Hanson (1996)
# call these "formally independent of phonological structure." By contrast,
# "realization parameters"--e.g., the size of a metrical position, which positions
# are regulated, and other constraints--"determine the way the structure is
# linguistically manifested, and are dependent on the prosodic givens of languge."
#
#
####
# [Number of feet in a line]
#
#Cs['number_feet!=2'] = 1 # require dimeter
#Cs['number_feet!=3'] = 1 # require trimeter
#Cs['number_feet!=4'] = 1 # require tetrameter
#Cs['number_feet!=5'] = 1 # require pentameter
#Cs['number_feet!=6'] = 1 # require hexameter
#Cs['number_feet!=7'] = 1 # require heptameter
#
#
####
# [Headedness of the line]
#
#Cs['headedness!=falling'] = 1 # require a falling rhythm (e.g. trochaic, dactylic)
#Cs['headedness!=rising'] = 1 # require a rising rhythm (e.g., iambic, anapestic)
#
############################################
############################################
# REALIZATION PARAMETERS
#
# All subsequent constraints can be seen as "realization parameters."
# See note to "structure parameters" above for more information.
#
#############################################
# METRICAL PARSING: POSITION SIZE
#
# Select how many syllables are at least *possible* in strong or weak positions
# cf. Kiparsky & Hanson's "position size" parameter ("Parametric Theory" 1996)
#
#
######
# [Maximum position size]
#
# The maximum number of syllables allowed in strong metrical positions (i.e. "s")
maxS=2
#
# The maximum number of syllables allowed in weak metrical positions (i.e. "w")
maxW=2
#
#
######
# [Minimum position size]
#
# (Recommended) Positions are at minimum one syllable in size
splitheavies=0
#
# (Unrecommended) Allow positions to be as small as a single mora
# i.e. (a split heavy syllable can straddle two metrical positions)
#splitheavies=1
############################################
############################################
# METRICAL PARSING: METRICAL CONSTRAINTS
#
# Here you can configure the constraints used by the metrical parser.
# Each constraint is expressed in the form:
# Cs['(constraint name)']=(constraint weight)
# Constraint weights do not affect harmonic bounding (i.e. which parses
# survive as possibilities), but they do affect how those possibilities
# are sorted to select the "best" parse.
#
#
######
# [Constraints regulating the 'STRENGTH' of a syllable]
#
# A syllable is strong if it is a peak in a polysyllabic word:
# the syllables in 'liberty', stressed-unstressed-unstressed,
# are, in terms of *strength*, strong-weak-neutral, because
# the first syllable is more stressed than its neighbor;
# the second syllable less stressed; and the third equally stressed.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any weak syllables ("troughs"):
#Cs['strength.s=>-u']=1
#
# A weak metrical position may not contain any strong syllables ("peaks"):
# [Kiparsky and Hanson believe this is Shakespeare's meter]
Cs['strength.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one strong syllable:
#Cs['strength.s=>p']=3
#
# A weak metrical position should contain at least one weak syllable:
#Cs['strength.w=>u']=3
#
#
#
######
# [Constraints regulating the STRESS of a syllable]
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any unstressed syllables:
# [Kiparsky and Hanson believe this is Hopkins' meter]
#Cs['stress.s=>-u']=1
#
# A weak metrical position should not contain any stressed syllables:
#Cs['stress.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one stressed syllable:
#Cs['stress.s=>p']=2
#
# A weak metrical position must contain at least one unstressed syllable;
#Cs['stress.w=>u']=2
#
#
#
######
# [Constraints regulating the WEIGHT of a syllable]
#
# The weight of a syllable is its "quantity": short or long.
# These constraints are designed for "quantitative verse",
# as for example in classical Latin and Greek poetry.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any light syllables:
#Cs['weight.s=>-u']=2
#
# A weak metrical position should not contain any heavy syllables:
#Cs['weight.w=>-p']=2
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one heavy syllable:
#Cs['weight.s=>p']=2
#
# A weak metrical position must contain at least one light syllable;
#Cs['weight.w=>u']=2
#
#
#
######
# [Constraints regulating what's permissible as a DISYLLABIC metrical position]
# [(with thanks to Sam Bowman, who programmed many of these constraints)]
#
###
# [Based on weight:]
#
# A disyllabic metrical position should not contain more than a minimal foot:
# i.e. W-resolution requires first syllable to be light and unstressed.
Cs['footmin-w-resolution']=1
#
#
# A disyllabic metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-noHX']=1000
#
#
# A disyllabic STRONG metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-s-noHX']=1
#
# A disyllabic metrical position should be syllables weighted light-light:
#Cs['footmin-noLH-noHX']=1
#
###
# [Categorical:]
#
# A metrical position should not contain more than one syllable:
# [use to discourage disyllabic positions]
#Cs['footmin-none']=1
#
# A strong metrical position should not contain more than one syllable:
#Cs['footmin-no-s']=1
#
# A weak metrical position should not contain more than one syllable:
#Cs['footmin-no-w']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *first* or *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions,
# or an initial "extrametrical" syllable]
#Cs['footmin-none-unless-in-first-two-positions']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions]
#Cs['footmin-none-unless-in-second-position']=1
#
# A strong metrical position should not contain more than one syllable,
# *unless* it is preceded by a disyllabic *weak* metrical position:
# [use to implement the metrical pattern described by Derek Attridge,
# in The Rhythms of English Poetry (1982), and commented on by Bruce Hayes
# in his review of the book in Language 60.1 (1984).
# e.g. Shakespeare's "when.your|SWEET.IS|ue.your|SWEET.FORM|should|BEAR"
# [this implementation is different in that it only takes into account
# double-weak beats *preceding* -- due to the way in which the parser
# throws away bounded parses as it goes, it might not be possible for now
# to write a constraint referencing future positions]
#Cs['footmin-no-s-unless-preceded-by-ww']=10
# [The version that does reference future positions; but appears to be unstable]:
#Cs['attridge-ss-not-by-ww']=10
#
###
# [For disyllabic positions crossing a word boundary...
# (i.e. having two syllables, each from a different word)...
#
# ...allow only F-resolutions:
# (both words must be function words and be in a weak metrical position)
Cs['footmin-f-resolution']=1
#
# ...it should never cross a word boundary to begin with:
#Cs['footmin-wordbound']=1000
#
# ...both words should be function words:
#Cs['footmin-wordbound-bothnotfw']=1
#
# ...at least one word should be a function word:
#Cs['footmin-wordbound-neitherfw']=1
#
# ...the left-hand syllable should be a function-word:
#Cs['footmin-wordbound-leftfw']=1
#
# ...the right-hand syllable should be a function word:
#Cs['footmin-wordbound-rightfw']=1
#
# ...neither word should be a monosyllable:
#Cs['footmin-wordbound-nomono']=1
#
# ...neither word should be a LEXICAL monosyllable
# (i.e. function words and polysyllabic words ok)
#Cs['footmin-wordbound-lexmono']=1
###
# [Miscellaneous constraints relating to disyllabic positions]
#
# A disyllabic metrical position may contain a strong syllable
# of a lexical word only if the syllable is (i) light and
# (ii) followed within the same position by an unstressed
# syllable normally belonging to the same word.
# [written by Sam Bowman]
#Cs['footmin-strongconstraint']=1
#
# The final metrical position of the line should not be 'ww'
# [use to encourage "...LI|ber|TY" rather than "...LI|ber.ty"]
#Cs['posthoc-no-final-ww']=2
#
# The final metrical position of the line should not be 'w' or 'ww'
#Cs['posthoc-no-final-w']=2
#
# A line should have all 'ww' or all 'w':
# It works by:
# Nw = Number of weak positions in the line
# Mw = Maximum number of occurrences of 'w' metrical position
# Mww = Maximum number of occurrences of 'ww' metrical position
# M = Whichever is bigger, Mw or Mww
# V = Nw - M
# Violation Score = V * [Weight]
# [use to encourage consistency of meter across line]
# [feel free to make this a decimal number, like 0.25]
#Cs['posthoc-standardize-weakpos']=1
#
#
#
######
# [MISCELLANEOUS constraints]
#
# A function word can fall only in a weak position:
#Cs['functiontow']=2
#
# An initial syllable must be in a weak position:
#Cs['initialstrong']=2
#
# The first metrical position will not be evaluated
# for any of the strength/stress/weight correspondence constraints:
# [set to 1 to be true]
#Cs['extrametrical-first-pos']=1
#
# The first two metrical positions will not be evaluated
# for any of the strength/stress/weight correspondence constraints:
# [set to 1 to be true]
Cs['skip_initial_foot']=1
#
# A word should not be an elision [use to discourage elisions]:
#Cs['word-elision']=1
#
# A weak metrical position should not contain any syllables
# that are stressed and heavy: [Meter of Finnish "Kalevala"]
#Cs['kalevala.w=>-p']=1
#
# A strong metrical position should not contain any syllables
# that are stressed and light: [Meter of Finnish "Kalevala"]
#Cs['kalevala.s=>-u']=1
############################################
| quadrismegistus/prosodic | meters/strength_and_resolution.py | Python | gpl-3.0 | 10,457 |
#!/usr/bin/env python
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import grdab
class qa_measure_processing_rate(gr_unittest.TestCase):
"""
@brief QA for measure processing rate sink.
This class implements a test bench to verify the corresponding C++ class.
"""
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_measure_processing_rate(self):
src = blocks.null_source(gr.sizeof_gr_complex)
throttle = blocks.throttle(gr.sizeof_gr_complex, 1000000)
head = blocks.head(gr.sizeof_gr_complex, 200000)
sink = grdab.measure_processing_rate(gr.sizeof_gr_complex,100000)
self.tb.connect(src, throttle, head, sink)
self.tb.run()
rate = sink.processing_rate()
assert(rate > 900000 and rate < 1100000)
def test_002_measure_processing_rate(self):
src = blocks.null_source(gr.sizeof_char)
throttle = blocks.throttle(gr.sizeof_char, 10000000)
head = blocks.head(gr.sizeof_char, 1000000)
sink = grdab.measure_processing_rate(gr.sizeof_char,1000000)
self.tb.connect(src, throttle, head, sink)
self.tb.run()
rate = sink.processing_rate()
assert(rate > 8000000 and rate < 12000000)
if __name__ == '__main__':
gr_unittest.main()
| andrmuel/gr-dab | python/qa/qa_measure_processing_rate.py | Python | gpl-3.0 | 1,233 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest as ut
import espressomd
import espressomd.electrostatics
import espressomd.interactions
from espressomd import drude_helpers
class Drude(ut.TestCase):
@ut.skipIf(not espressomd.has_features("P3M", "THOLE", "LANGEVIN_PER_PARTICLE"), "Test needs P3M, THOLE and LANGEVIN_PER_PARTICLE")
def test(self):
"""
Sets up a BMIM PF6 pair separated in y-direction with fixed cores.
Adds the Drude particles and related features (intramolecular exclusion bonds, Thole screening)
via helper functions.
Calculates the induced dipole moment and the diagonals of the polarization tensor
and compares against reference results, which where reproduced with LAMMPS.
"""
box_l = 50
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.seed = system.cell_system.get_state()['n_nodes'] * [12]
np.random.seed(12)
#Reference Results, reproduced with LAMMPS
#Dipole Moments
ref_mu0_pf6 = [0.00177594, 0.16480996, -0.01605161]
ref_mu0_c1 = [0.00076652, 0.15238767, 0.00135291]
ref_mu0_c2 = [-0.00020222, 0.11084197, 0.00135842]
ref_mu0_c3 = [0.00059177, 0.23949626, -0.05238468]
ref_mu0_bmim = [0.00115606, 0.5027259, -0.04967335]
#Polarisation Tensor diagonals
ref_pol_pf6 = [
4.5535698335873445, 4.7558611769477697, 4.5546580162000554]
ref_pol_bmim = [
13.126868394164262, 14.392582501485913, 16.824150151623762]
#TIMESTEP
fs_to_md_time = 1.0e-2
time_step_fs = 0.5
time_step_ns = time_step_fs * 1e-6
dt = time_step_fs * fs_to_md_time
#COM TEMPERATURE
#Global thermostat temperature, for com and langevin.
#LangevinPerParticle temperature is set to 0 for drude and core to properly account for com forces.
# Like that, langevin thermostat can still be used for non-drude
# particles
SI_temperature = 300.0
gamma_com = 1.0
kb_kjmol = 0.0083145
temperature_com = SI_temperature * kb_kjmol
# COULOMB PREFACTOR (elementary charge)^2 / (4*pi*epsilon_0) in
# Angstrom * kJ/mol
coulomb_prefactor = 1.67101e5 * kb_kjmol
#POLARIZATION
#polarization = 1.0 #In (Angstrom^3)_CGS
# alpha_SI = 4*Pi*eps_0 alpha_CGS;
# 4*Pi*epsilon_0*Angstrom^3/((elementary charge)^2*Angstrom^2*N_A/kJ)
conv_pol_CGS_SI = 7.197586e-4
#alpha = conv_pol_CGS_SI*args.polarization
#DRUDE/TOTAL MASS
#lamoureux03 used values 0.1-0.8 g/mol for drude mass
mass_drude = 0.8
mass_tot = 100.0
mass_core = mass_tot - mass_drude
mass_red_drude = mass_drude * mass_core / mass_tot
#SPRING CONSTANT DRUDE
#Used 1000kcal/mol/A^2 from lamoureux03a table 1 p 3031
k_drude = 4184.0
# in kJ/mol/A^2
T_spring = 2.0 * np.pi * np.sqrt(mass_drude / k_drude)
#T_spring_fs = T_spring/fs_to_md_time
#Period of free oscillation: T_spring = 2Pi/w; w = sqrt(k_d/m_d)
#TEMP DRUDE
# Used T* = 1K from lamoureux03a p 3031 (2) 'Cold drude oscillators
# regime'
SI_temperature_drude = 1.0
temperature_drude = SI_temperature_drude * kb_kjmol
#GAMMA DRUDE
#Thermostat relaxation time should be similar to T_spring
gamma_drude = mass_red_drude / T_spring
system.cell_system.skin = 0.4
system.time_step = dt
#Forcefield
types = {"PF6": 0, "BMIM_C1": 1, "BMIM_C2": 2, "BMIM_C3":
3, "BMIM_COM": 4, "PF6_D": 5, "BMIM_C1_D": 6, "BMIM_C2_D": 7, "BMIM_C3_D": 8}
charges = {"PF6": -0.78, "BMIM_C1": 0.4374,
"BMIM_C2": 0.1578, "BMIM_C3": 0.1848, "BMIM_COM": 0}
polarizations = {"PF6": 4.653, "BMIM_C1":
5.693, "BMIM_C2": 2.103, "BMIM_C3": 7.409}
masses = {"PF6": 144.96, "BMIM_C1": 67.07,
"BMIM_C2": 15.04, "BMIM_C3": 57.12, "BMIM_COM": 0}
masses["BMIM_COM"] = masses["BMIM_C1"] + \
masses["BMIM_C2"] + masses["BMIM_C3"]
box_center = 0.5 * np.array(3 * [box_l])
system.min_global_cut = 3.5
#Place Particles
dmol = 5.0
#Test Anion
pos_pf6 = box_center + np.array([0, dmol, 0])
system.part.add(id=0, type=types["PF6"], pos=pos_pf6, q=charges[
"PF6"], mass=masses["PF6"], fix=[1, 1, 1])
pos_com = box_center - np.array([0, dmol, 0])
system.part.add(id=2, type=types["BMIM_C1"], pos=pos_com + [
0, -0.527, 1.365], q=charges["BMIM_C1"], mass=masses["BMIM_C1"], fix=[1, 1, 1])
system.part.add(id=4, type=types["BMIM_C2"], pos=pos_com + [
0, 1.641, 2.987], q=charges["BMIM_C2"], mass=masses["BMIM_C2"], fix=[1, 1, 1])
system.part.add(id=6, type=types["BMIM_C3"], pos=pos_com + [
0, 0.187, -2.389], q=charges["BMIM_C3"], mass=masses["BMIM_C3"], fix=[1, 1, 1])
system.thermostat.set_langevin(kT=temperature_com, gamma=gamma_com)
p3m = espressomd.electrostatics.P3M(
prefactor=coulomb_prefactor, accuracy=1e-4, mesh=[18, 18, 18], cao=5)
system.actors.add(p3m)
#Drude related Bonds
thermalized_dist_bond = espressomd.interactions.ThermalizedBond(
temp_com=temperature_com, gamma_com=gamma_com, temp_distance=temperature_drude, gamma_distance=gamma_drude, r_cut=1.0)
harmonic_bond = espressomd.interactions.HarmonicBond(
k=k_drude, r_0=0.0, r_cut=1.0)
system.bonded_inter.add(thermalized_dist_bond)
system.bonded_inter.add(harmonic_bond)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
0], 1, types["PF6_D"], polarizations["PF6"], mass_drude, coulomb_prefactor, 2.0)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
2], 3, types["BMIM_C1_D"], polarizations["BMIM_C1"], mass_drude, coulomb_prefactor, 2.0)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
4], 5, types["BMIM_C2_D"], polarizations["BMIM_C2"], mass_drude, coulomb_prefactor, 2.0)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
6], 7, types["BMIM_C3_D"], polarizations["BMIM_C3"], mass_drude, coulomb_prefactor, 2.0)
#Setup and add Drude-Core SR exclusion bonds
drude_helpers.setup_and_add_drude_exclusion_bonds(system)
#Setup intramol SR exclusion bonds once
drude_helpers.setup_intramol_exclusion_bonds(
system, [6, 7, 8], [1, 2, 3], [charges["BMIM_C1"], charges["BMIM_C2"], charges["BMIM_C3"]])
#Add bonds per molecule
drude_helpers.add_intramol_exclusion_bonds(
system, [3, 5, 7], [2, 4, 6])
#Thole
drude_helpers.add_all_thole(system)
def dipole_moment(id_core, id_drude):
pc = system.part[id_core]
pd = system.part[id_drude]
v = pd.pos - pc.pos
return pd.q * v
def measure_dipole_moments():
dm_pf6 = []
dm_C1 = []
dm_C2 = []
dm_C3 = []
system.integrator.run(115)
for i in range(100):
system.integrator.run(1)
dm_pf6.append(dipole_moment(0, 1))
dm_C1.append(dipole_moment(2, 3))
dm_C2.append(dipole_moment(4, 5))
dm_C3.append(dipole_moment(6, 7))
dm_pf6_m = np.mean(dm_pf6, axis=0)
dm_C1_m = np.mean(dm_C1, axis=0)
dm_C2_m = np.mean(dm_C2, axis=0)
dm_C3_m = np.mean(dm_C3, axis=0)
dm_sum_bmim = dm_C1_m + dm_C2_m + dm_C3_m
res = dm_pf6_m, dm_C1_m, dm_C2_m, dm_C3_m, dm_sum_bmim
return res
def setElectricField(E):
E = np.array(E)
for p in system.part:
p.ext_force = p.q * E
def calc_pol(mu0, muE, E):
pol = (muE - mu0) / E / conv_pol_CGS_SI
return pol
def measure_pol(Es, dim):
E = [0.0, 0.0, 0.0]
E[dim] = Es
setElectricField(E)
mux_pf6, mux_c1, mux_c2, mux_c3, mux_bmim = measure_dipole_moments(
)
return calc_pol(mu0_pf6[dim], mux_pf6[dim], Es), calc_pol(mu0_bmim[dim], mux_bmim[dim], Es)
mu0_pf6, mu0_c1, mu0_c2, mu0_c3, mu0_bmim = measure_dipole_moments()
eA_to_Debye = 4.8032047
atol = 1e-2
rtol = 1e-2
np.testing.assert_allclose(
ref_mu0_pf6, eA_to_Debye * mu0_pf6, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c1, eA_to_Debye * mu0_c1, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c2, eA_to_Debye * mu0_c2, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c3, eA_to_Debye * mu0_c3, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_bmim, eA_to_Debye * mu0_bmim, atol=atol, rtol=rtol)
pol_pf6 = []
pol_bmim = []
Efield = 96.48536 # = 1 V/A in kJ / (Avogadro Number) / Angstrom / elementary charge
res = measure_pol(Efield, 0)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
res = measure_pol(Efield, 1)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
res = measure_pol(Efield, 2)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
np.testing.assert_allclose(
ref_pol_pf6, pol_pf6, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_pol_bmim, pol_bmim, atol=atol, rtol=rtol)
if __name__ == "__main__":
ut.main()
| hmenke/espresso | testsuite/python/drude.py | Python | gpl-3.0 | 10,864 |
Subsets and Splits