code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import diaper
import fauxfactory
import pytest
from mgmtsystem import exceptions
from cfme.common.vm import VM
from cfme.configure.configuration import VMAnalysisProfile
from cfme.control.explorer import (
VMCompliancePolicy, VMCondition, PolicyProfile)
from cfme.web_ui import flash, toolbar
from fixtures.pytest_store import store
from utils import testgen, version
from utils.appliance import Appliance, ApplianceException, provision_appliance
from utils.log import logger
from utils.update import update
from utils.wait import wait_for
from urlparse import urlparse
from cfme import test_requirements
PREFIX = "test_compliance_"
pytestmark = [
# TODO: Problems with fleecing configuration - revisit later
pytest.mark.ignore_stream("upstream"),
pytest.mark.meta(server_roles=["+automate", "+smartstate", "+smartproxy"]),
pytest.mark.uncollectif(lambda provider: provider.type in {"scvmm"}),
pytest.mark.tier(3),
test_requirements.control
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.infra_providers(
metafunc, required_fields=["vm_analysis"])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
def wait_for_ssa_enabled():
wait_for(
lambda: not toolbar.is_greyed('Configuration', 'Perform SmartState Analysis'),
delay=10, handle_exception=True, num_sec=600, fail_func=lambda: toolbar.select("Reload"))
@pytest.yield_fixture(scope="module")
def compliance_vm(request, provider):
try:
ip_addr = urlparse(store.base_url).hostname
appl_name = provider.mgmt.get_vm_name_from_ip(ip_addr)
appliance = Appliance(provider.key, appl_name)
logger.info(
"The tested appliance (%s) is already on this provider (%s) so reusing it.",
appl_name, provider.key)
try:
appliance.configure_fleecing()
except (EOFError, ApplianceException) as e:
# If something was happening, restart and wait for the UI to reappear to prevent errors
appliance.ipapp.reboot()
pytest.skip(
"Error during appliance configuration. Skipping:\n{}: {}".format(
type(e).__name__, str(e)))
vm = VM.factory(appl_name, provider)
except exceptions.VMNotFoundViaIP:
logger.info("Provisioning a new appliance on provider %s.", provider.key)
appliance = provision_appliance(
vm_name_prefix=PREFIX + "host_",
version=str(version.current_version()),
provider_name=provider.key)
request.addfinalizer(lambda: diaper(appliance.destroy))
try:
appliance.configure(setup_fleece=True)
except (EOFError, ApplianceException) as e: # Add known exceptions as needed.
pytest.skip(
"Error during appliance configuration. Skipping:\n{}: {}".format(
type(e).__name__, str(e)))
vm = VM.factory(appliance.vm_name, provider)
if provider.type in {"rhevm"}:
request.addfinalizer(appliance.remove_rhev_direct_lun_disk)
# Do the final touches
with appliance.ipapp(browser_steal=True) as appl:
appl.set_session_timeout(86400)
provider.refresh_provider_relationships()
vm.wait_to_appear()
vm.load_details()
wait_for_ssa_enabled()
yield vm
@pytest.yield_fixture(scope="module")
def analysis_profile(compliance_vm):
ap = VMAnalysisProfile(
name="default", description="ap-desc", files=[],
categories=["check_software"])
if ap.exists:
ap.delete()
with ap:
yield ap
@pytest.fixture(scope="module")
def fleecing_vm(
request, compliance_vm, provider, analysis_profile):
logger.info("Provisioning an appliance for fleecing on %s", provider.key)
# TODO: When we get something smaller, use it!
appliance = provision_appliance(
vm_name_prefix=PREFIX + "for_fleece_",
version=str(version.current_version()),
provider_name=provider.key)
request.addfinalizer(lambda: diaper(appliance.destroy))
logger.info("Appliance %s provisioned", appliance.vm_name)
vm = VM.factory(appliance.vm_name, provider)
provider.refresh_provider_relationships()
vm.wait_to_appear()
return vm
def do_scan(vm, additional_item_check=None):
if vm.rediscover_if_analysis_data_present():
# policy profile assignment is lost so reassign
vm.assign_policy_profiles(*vm.assigned_policy_profiles)
def _scan():
return vm.get_detail(properties=("Lifecycle", "Last Analyzed")).lower()
original = _scan()
if additional_item_check is not None:
original_item = vm.get_detail(properties=additional_item_check)
vm.smartstate_scan(cancel=False, from_details=True)
flash.assert_message_contain(version.pick({
version.LOWEST: "Smart State Analysis initiated",
"5.5": "Analysis initiated for 1 VM and Instance from the CFME Database"}))
logger.info("Scan initiated")
wait_for(
lambda: _scan() != original,
num_sec=600, delay=5, fail_func=lambda: toolbar.select("Reload"))
if additional_item_check is not None:
wait_for(
lambda: vm.get_detail(properties=additional_item_check) != original_item,
num_sec=120, delay=5, fail_func=lambda: toolbar.select("Reload"))
logger.info("Scan finished")
def test_check_package_presence(request, fleecing_vm, ssh_client, analysis_profile):
"""This test checks compliance by presence of a certain cfme-appliance package which is expected
to be present on an appliance."""
# TODO: If we step out from provisioning a full appliance for fleecing, this might need revisit
condition = VMCondition(
"Compliance testing condition {}".format(fauxfactory.gen_alphanumeric(8)),
expression=("fill_find(field=VM and Instance.Guest Applications : Name, "
"skey=STARTS WITH, value=cfme-appliance, check=Check Count, ckey= = , cvalue=1)")
)
request.addfinalizer(lambda: diaper(condition.delete))
policy = VMCompliancePolicy("Compliance {}".format(fauxfactory.gen_alphanumeric(8)))
request.addfinalizer(lambda: diaper(policy.delete))
policy.create()
policy.assign_conditions(condition)
profile = PolicyProfile(
"Compliance PP {}".format(fauxfactory.gen_alphanumeric(8)),
policies=[policy]
)
request.addfinalizer(lambda: diaper(profile.delete))
profile.create()
fleecing_vm.assign_policy_profiles(profile.description)
request.addfinalizer(lambda: fleecing_vm.unassign_policy_profiles(profile.description))
with update(analysis_profile):
analysis_profile.categories = [
"check_services", "check_accounts", "check_software", "check_vmconfig", "check_system"]
do_scan(fleecing_vm)
assert fleecing_vm.check_compliance_and_wait()
def test_check_files(request, fleecing_vm, ssh_client, analysis_profile):
"""This test checks presence and contents of a certain file. Due to caching, an existing file
is checked.
"""
check_file_name = "/etc/sudo.conf"
check_file_contents = "sudoers_policy" # The file contains: `Plugin sudoers_policy sudoers.so`
condition = VMCondition(
"Compliance testing condition {}".format(fauxfactory.gen_alphanumeric(8)),
expression=("fill_find(VM and Instance.Files : Name, "
"=, {}, Check Any, Contents, INCLUDES, {})".format(
check_file_name, check_file_contents))
)
request.addfinalizer(lambda: diaper(condition.delete))
policy = VMCompliancePolicy("Compliance {}".format(fauxfactory.gen_alphanumeric(8)))
request.addfinalizer(lambda: diaper(policy.delete))
policy.create()
policy.assign_conditions(condition)
profile = PolicyProfile(
"Compliance PP {}".format(fauxfactory.gen_alphanumeric(8)),
policies=[policy]
)
request.addfinalizer(lambda: diaper(profile.delete))
profile.create()
fleecing_vm.assign_policy_profiles(profile.description)
request.addfinalizer(lambda: fleecing_vm.unassign_policy_profiles(profile.description))
with update(analysis_profile):
analysis_profile.files = [(check_file_name, True)]
analysis_profile.categories = [
"check_services", "check_accounts", "check_software", "check_vmconfig", "check_system"]
do_scan(fleecing_vm, ("Configuration", "Files"))
assert fleecing_vm.check_compliance_and_wait()
| kzvyahin/cfme_tests | cfme/tests/control/test_compliance.py | Python | gpl-2.0 | 8,549 |
# Licensed under the GPLv3 - see LICENSE
"""
Definitions for VLBI Mark5B Headers.
Implements a Mark5BHeader class used to store header words, and decode/encode
the information therein.
For the specification, see
http://www.haystack.edu/tech/vlbi/mark5/docs/Mark%205B%20users%20manual.pdf
"""
import numpy as np
import astropy.units as u
from astropy.time import Time
from ..vlbi_base.header import HeaderParser, VLBIHeaderBase, four_word_struct
from ..vlbi_base.utils import bcd_decode, bcd_encode, CRC
__all__ = ['CRC16', 'crc16', 'Mark5BHeader']
CRC16 = 0x18005
"""CRC polynomial used for Mark 5B Headers, as a check on the time code.
x^16 + x^15 + x^2 + 1, i.e., 0x18005.
See page 11 of http://www.haystack.mit.edu/tech/vlbi/mark5/docs/230.3.pdf
(defined there for VLBA headers).
This is also CRC-16-IBM mentioned in
https://en.wikipedia.org/wiki/Cyclic_redundancy_check
"""
crc16 = CRC(CRC16)
class Mark5BHeader(VLBIHeaderBase):
"""Decoder/encoder of a Mark5B Frame Header.
See page 15 of
http://www.haystack.edu/tech/vlbi/mark5/docs/Mark%205B%20users%20manual.pdf
Parameters
----------
words : tuple of int, or None
Four 32-bit unsigned int header words. If `None`, set to a tuple of
zeros for later initialisation.
kday : int or None
Explicit thousands of MJD of the observation time (needed to remove
ambiguity in the Mark 5B time stamp). Can instead pass an approximate
``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 500 days of the observation time, used to infer
the full MJD. Used only if ``kday`` is not given.
verify : bool, optional
Whether to do basic verification of integrity. Default: `True`.
Returns
-------
header : `Mark5BHeader`
"""
_header_parser = HeaderParser(
(('sync_pattern', (0, 0, 32, 0xABADDEED)),
('user', (1, 16, 16)),
('internal_tvg', (1, 15, 1)),
('frame_nr', (1, 0, 15)),
('bcd_jday', (2, 20, 12)),
('bcd_seconds', (2, 0, 20)),
('bcd_fraction', (3, 16, 16)),
('crc', (3, 0, 16))))
_sync_pattern = _header_parser.defaults['sync_pattern']
_struct = four_word_struct
_properties = ('payload_nbytes', 'frame_nbytes', 'kday', 'jday', 'seconds',
'fraction', 'time')
"""Properties accessible/usable in initialisation."""
kday = None
_payload_nbytes = 10000 # 2500 words
def __init__(self, words, kday=None, ref_time=None, verify=True, **kwargs):
super().__init__(words, verify=False, **kwargs)
if kday is not None:
self.kday = kday
elif ref_time is not None:
self.infer_kday(ref_time)
if verify:
self.verify()
def verify(self):
"""Verify header integrity."""
assert len(self.words) == 4
assert self['sync_pattern'] == self._sync_pattern
assert self.kday is None or (33000 < self.kday < 400000)
if self.kday is not None:
assert self.kday % 1000 == 0, "kday must be thousands of MJD."
def copy(self, **kwargs):
return super().copy(kday=self.kday, **kwargs)
@classmethod
def fromvalues(cls, *, verify=True, **kwargs):
"""Initialise a header from parsed values.
Here, the parsed values must be given as keyword arguments, i.e., for
any ``header = cls(<data>)``, ``cls.fromvalues(**header) == header``.
However, unlike for the :meth:`Mark5BHeader.fromkeys` class method,
data can also be set using arguments named after methods, such as
``jday`` and ``seconds``.
Given defaults:
sync_pattern : 0xABADDEED
Values set by other keyword arguments (if present):
bcd_jday : from ``jday`` or ``time``
bcd_seconds : from ``seconds`` or ``time``
bcd_fraction : from ``fraction`` or ``time`` (may need ``frame_rate``)
frame_nr : from ``time`` (may need ``frame_rate``)
"""
time = kwargs.pop('time', None)
frame_rate = kwargs.pop('frame_rate', None)
# Pop verify and pass on False so verify happens after time is set.
self = super().fromvalues(verify=False, **kwargs)
if time is not None:
self.set_time(time, frame_rate=frame_rate)
self.update() # Recalculate CRC.
if verify:
self.verify()
return self
def update(self, *, crc=None, verify=True, **kwargs):
"""Update the header by setting keywords or properties.
Here, any keywords matching header keys are applied first, and any
remaining ones are used to set header properties, in the order set
by the class (in ``_properties``).
Parameters
----------
crc : int or None, optional
If `None` (default), recalculate the CRC after updating.
verify : bool, optional
If `True` (default), verify integrity after updating.
**kwargs
Arguments used to set keywords and properties.
"""
if crc is not None:
return super().update(verify=verify, crc=crc, **kwargs)
super().update(verify=False, **kwargs)
# Do not use words 2 & 3 directly, so that this works also if part
# of a VDIF header, where the time information is in words 7 & 8.
stream = '{:012b}{:020b}{:016b}'.format(self['bcd_jday'],
self['bcd_seconds'],
self['bcd_fraction'])
stream = np.array([int(b) for b in stream], dtype=np.uint8)
crc = crc16(stream)
self['crc'] = int(''.join(['{:1d}'.format(c) for c in crc]), base=2)
if verify:
self.verify()
def infer_kday(self, ref_time):
"""Uses a reference time to set a header's ``kday``.
Parameters
----------
ref_time : `~astropy.time.Time`
Reference time within 500 days of the observation time.
"""
self.kday = np.round(ref_time.mjd - self.jday, decimals=-3).astype(int)
@property
def payload_nbytes(self):
"""Size of the payload in bytes."""
return self._payload_nbytes # Hardcoded in class definition.
@payload_nbytes.setter
def payload_nbytes(self, payload_nbytes):
if payload_nbytes != self._payload_nbytes: # 2500 words.
raise ValueError("Mark 5B payload has a fixed size of 10000 bytes "
"(2500 words).")
@property
def frame_nbytes(self):
"""Size of the frame in bytes."""
return self.nbytes + self.payload_nbytes
@frame_nbytes.setter
def frame_nbytes(self, frame_nbytes):
if frame_nbytes != self.nbytes + self.payload_nbytes:
raise ValueError("Mark 5B frame has a fixed size of 10016 bytes "
"(4 header words plus 2500 payload words).")
@property
def jday(self):
"""Last three digits of MJD (decoded from 'bcd_jday')."""
return bcd_decode(self['bcd_jday'])
@jday.setter
def jday(self, jday):
self['bcd_jday'] = bcd_encode(jday)
@property
def seconds(self):
"""Integer seconds on day (decoded from 'bcd_seconds')."""
return bcd_decode(self['bcd_seconds'])
@seconds.setter
def seconds(self, seconds):
self['bcd_seconds'] = bcd_encode(seconds)
@property
def fraction(self):
"""Fractional seconds (decoded from 'bcd_fraction').
The fraction is stored to 0.1 ms accuracy. Following mark5access, this
is "unrounded" to give the exact time of the start of the frame for any
total bit rate below 512 Mbps. For rates above this value, it is no
longer guaranteed that subsequent frames have unique rates.
Note to the above: since a Mark5B frame contains 80000 bits, the total
bit rate for which times can be unique would in principle be 800 Mbps.
However, standard VLBI only uses bit rates that are powers of 2 in MHz.
"""
ns = bcd_decode(self['bcd_fraction']) * 100000
# "Unround" the nanoseconds, and turn into fractional seconds.
return (156250 * ((ns + 156249) // 156250)) / 1e9
@fraction.setter
def fraction(self, fraction):
ns = round(fraction * 1.e9)
# From inspecting sample files, the fraction appears to be truncated,
# not rounded.
fraction = int(ns / 100000)
self['bcd_fraction'] = bcd_encode(fraction)
def get_time(self, frame_rate=None):
"""Convert year, BCD time code to Time object.
Calculate time using `jday`, `seconds`, and `fraction` properties
(which reflect the bcd-encoded 'bcd_jday', 'bcd_seconds' and
'bcd_fraction' header items), plus `kday` from the initialisation. See
http://www.haystack.edu/tech/vlbi/mark5/docs/Mark%205B%20users%20manual.pdf
Note that some non-compliant files do not have 'bcd_fraction' set.
For those, the time can still be calculated using the header's
'frame_nr' by passing in a frame rate.
Furthermore, fractional seconds are stored only to 0.1 ms accuracy.
In the code, this is "unrounded" to give the exact time of the start
of the frame for any total bit rate below 512 Mbps. For higher rates,
it is no longer guaranteed that subsequent frames have unique
`fraction`, and one should pass in an explicit frame rate instead.
Parameters
----------
frame_rate : `~astropy.units.Quantity`, optional
Used to calculate the fractional second from the frame number
instead of from the header's `fraction`.
Returns
-------
`~astropy.time.Time`
"""
frame_nr = self['frame_nr']
if frame_nr == 0:
fraction = 0.
elif frame_rate is None:
fraction = self.fraction
if fraction == 0.:
raise ValueError('header does not provide correct fractional '
'second (it is zero for non-zero frame '
'number). Please pass in a frame_rate.')
else:
fraction = (frame_nr / frame_rate).to_value(u.s)
return Time(self.kday + self.jday, (self.seconds + fraction) / 86400,
format='mjd', scale='utc', precision=9)
def set_time(self, time, frame_rate=None):
"""
Convert Time object to BCD timestamp elements and 'frame_nr'.
For non-integer seconds, the frame number will be calculated if not
given explicitly. Doing so requires the frame rate.
Parameters
----------
time : `~astropy.time.Time`
The time to use for this header.
frame_rate : `~astropy.units.Quantity`, optional
For calculating 'frame_nr' from the fractional seconds.
"""
self.kday = int(time.mjd // 1000) * 1000
self.jday = int(time.mjd - self.kday)
seconds = time - Time(self.kday + self.jday, format='mjd')
int_sec = int(seconds.sec)
fraction = seconds - int_sec * u.s
# Round to nearest ns to handle timestamp difference errors.
if abs(fraction) < 1. * u.ns:
frame_nr = 0
frac_sec = 0.
elif abs(1. * u.s - fraction) < 1. * u.ns:
int_sec += 1
frame_nr = 0
frac_sec = 0.
else:
if frame_rate is None:
raise ValueError("cannot calculate frame rate. Pass it "
"in explicitly.")
frame_nr = int(round((fraction * frame_rate).to(u.one).value))
fraction = frame_nr / frame_rate
if abs(fraction - 1. * u.s) < 1. * u.ns:
int_sec += 1
frame_nr = 0
frac_sec = 0.
else:
frac_sec = fraction.to(u.s).value
self.seconds = int_sec
self.fraction = frac_sec
self['frame_nr'] = frame_nr
time = property(get_time, set_time)
| cczhu/baseband | baseband/mark5b/header.py | Python | gpl-3.0 | 12,216 |
# -*- coding: utf-8 -*-
__all__ = ['const'] | Suwings/Yeinw | src/Suwings/__init__.py | Python | gpl-3.0 | 46 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *************************************************************************** #
# #
# Version 0.1 - 2013-08-01 #
# Copyright (C) 2013 Marco Crippa #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #
# #
# *************************************************************************** #
#http://www.youtube.com/watch?v=4xLtXCm-Q2o
from gimpfu import *
def valencia( img, draw ):
current_f=pdb.gimp_context_get_foreground()
img.disable_undo()
pdb.gimp_selection_all
sel_size=pdb.gimp_selection_bounds(img)
w=sel_size[3]-sel_size[1]
h=sel_size[4]-sel_size[2]
pdb.gimp_selection_clear(img)
lg=pdb.gimp_layer_group_new(img)
pdb.gimp_image_insert_layer(img, lg, None, 0)
pdb.gimp_item_set_name(lg,"Valencia Effect")
#copy image
drawCopy2=pdb.gimp_layer_new_from_drawable(draw,img)
pdb.gimp_image_insert_layer(img, drawCopy2, lg, 0)
pdb.gimp_item_set_name(drawCopy2,"valenciaBG")
#copy image
drawCopy=pdb.gimp_layer_new_from_drawable(draw,img)
pdb.gimp_image_insert_layer(img, drawCopy, lg, 0)
pdb.gimp_item_set_name(drawCopy,"valenciaColor")
pdb.gimp_layer_set_mode(drawCopy,3)
pdb.gimp_selection_all
pdb.gimp_context_set_foreground((246,221,173))
pdb.gimp_edit_fill(drawCopy,0)
pdb.gimp_selection_clear(img)
#merge layer
m=pdb.gimp_image_merge_down(img,drawCopy,1)
#adjust curves colors
pdb.gimp_curves_spline(m, 0, 8, (0,50, 75,110, 175,220, 255,255) )
#adjust levels colors
pdb.gimp_levels(m, 3, 0, 255, 1.0, 126, 255)
#add white border
Wborder = pdb.gimp_layer_new(img,w,h,1,"whiteBorder",100.0,0)
pdb.gimp_image_insert_layer(img, Wborder, lg, 0)
pdb.gimp_image_set_active_layer(img,Wborder)
pdb.gimp_context_set_foreground((255,255,255))
pdb.gimp_image_select_rectangle(img, 0, 0,0, w,h )
dimBorder=int( (w/100)*2 )
if dimBorder<5:
dimBorder=5
pdb.gimp_selection_shrink(img, dimBorder)
pdb.gimp_selection_invert(img)
pdb.gimp_edit_fill(Wborder,0)
pdb.gimp_selection_clear(img)
#add black border
Bborder = pdb.gimp_layer_new(img,w,h,1,"blackBorder",100.0,0)
pdb.gimp_image_insert_layer(img, Bborder, lg, 0)
pdb.gimp_image_set_active_layer(img,Bborder)
pdb.gimp_context_set_foreground((0,0,0))
pdb.gimp_image_select_rectangle(img, 0, 0,0, w,h )
dimBorder=int( (w/100)*2 )
if dimBorder<5:
dimBorder=5
pdb.gimp_selection_shrink(img, dimBorder)
pdb.gimp_selection_invert(img)
pdb.gimp_edit_fill(Bborder,0)
pdb.gimp_selection_clear(img)
img.enable_undo()
register( "gimp_instagram_valencia",
"Add Instagram Valencia effect",
"Add Instagram Valencia effect",
"Marco Crippa",
"(©) 2013 Marco Crippa",
"2013-01-08",
"<Image>/Filters/Instagram/Valencia",
'RGB*',
[],
'',
valencia)
main()
| lenonr/dev_xfce | Auto_Config/base/.gimp-2.8/plug-ins/gimp_instagram_valencia.py | Python | gpl-3.0 | 4,118 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-04 21:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('contacts', models.ManyToManyField(to='management.Contact')),
],
options={
'ordering': ('name',),
},
),
]
| charityscience/csh-sms | management/migrations/0001_initial.py | Python | gpl-3.0 | 1,061 |
import re
import requests
import lxml.html
import scrapelib
from pupa.scrape import Person, Scraper
abbr = {"D": "Democratic", "R": "Republican"}
class MIPersonScraper(Scraper):
def scrape(self, chamber=None, session=None):
if chamber == "upper":
yield from self.scrape_upper(chamber)
elif chamber == "lower":
yield from self.scrape_lower(chamber)
else:
yield from self.scrape_upper(chamber)
yield from self.scrape_lower(chamber)
def scrape_lower(self, chamber):
url = "http://www.house.mi.gov/mhrpublic/frmRepList.aspx"
table = ["website", "district", "name", "party", "location", "phone", "email"]
data = self.get(url).text
doc = lxml.html.fromstring(data)
# skip two rows at top
for row in doc.xpath('//table[@id="grvRepInfo"]/*'):
tds = row.xpath(".//td")
if len(tds) == 0:
continue
metainf = {}
for i in range(0, len(table)):
metainf[table[i]] = tds[i]
district = str(int(metainf["district"].text_content().strip()))
party = metainf["party"].text_content().strip()
phone = metainf["phone"].text_content().strip()
email = metainf["email"].text_content().strip()
name = metainf["name"].text_content().strip()
if name == "Vacant" or re.match(r"^District \d{1,3}$", name):
self.warning(
"District {} appears vacant, and will be skipped".format(district)
)
continue
leg_url = metainf["website"].xpath("./a")[0].attrib["href"]
office = metainf["location"].text_content().strip()
office = re.sub(
" HOB",
" Anderson House Office Building\n124 North Capitol Avenue\nLansing, MI 48933",
office,
)
office = re.sub(" CB", " State Capitol Building\nLansing, MI 48909", office)
try:
photo_url = self.get_photo_url(leg_url)[0]
except (scrapelib.HTTPError, IndexError):
photo_url = ""
self.warning("no photo url for %s", name)
person = Person(
name=name,
district=district,
party=abbr[party],
primary_org="lower",
image=photo_url,
)
person.add_link(leg_url)
person.add_source(leg_url)
person.add_contact_detail(
type="address", value=office, note="Capitol Office"
)
person.add_contact_detail(type="voice", value=phone, note="Capitol Office")
person.add_contact_detail(type="email", value=email, note="Capitol Office")
yield person
def scrape_upper(self, chamber):
url = "http://www.senate.michigan.gov/senatorinfo_list.html"
url_to_append = "http://www.senate.michigan.gov/_images/"
data = self.get(url).text
doc = lxml.html.fromstring(data)
doc.make_links_absolute(url)
for row in doc.xpath('//table[not(@class="calendar")]//tr')[1:]:
if len(row) != 7:
continue
# party, dist, member, office_phone, office_fax, office_loc
party, dist, member, contact, phone, fax, loc = row.getchildren()
if (
party.text_content().strip() == ""
or "Lieutenant Governor" in member.text_content()
):
continue
party = abbr[party.text]
district = dist.text_content().strip()
name = member.text_content().strip()
name = re.sub(r"\s+", " ", name)
surname = re.split(", | ", name)
name = " ".join(name.split(", ")[::-1])
surname[0] = re.sub("[']", "", surname[0])
try:
self.head(url_to_append + surname[0] + ".png")
photo_url = url_to_append + surname[0] + ".png"
except scrapelib.HTTPError:
try:
self.head(url_to_append + surname[0] + ".jpg")
photo_url = url_to_append + surname[0] + ".jpg"
except scrapelib.HTTPError:
photo_url = ""
if name == "Vacant":
self.info("district %s is vacant", district)
continue
leg_url = member.xpath("a/@href")[0]
office_phone = phone.text
office_fax = fax.text
office_loc = loc.text
office_loc = re.sub(
" Farnum Bldg",
" Farnum Office Building\n125 West Allegan Street\nLansing, MI 48933",
office_loc,
)
office_loc = re.sub(
" Capitol Bldg",
" State Capitol Building\nLansing, MI 48909",
office_loc,
)
# email addresses aren't on the list page anymore but they
# are on the page linked off "Contact Me"
# data has a typo in a row
email = None
contact_url = [
a for a in row.xpath(".//a") if a.text in ("Contact Me", "Conact Me")
][0].get("href")
try:
contact_html = self.get(contact_url).text
contact_doc = lxml.html.fromstring(contact_html)
header_email = contact_doc.xpath("//a[@class='header_email']")
if header_email:
email = header_email[0].text
else:
# not using the most common template, but maybe they
# dropped their email on the page somewhere
links = contact_doc.xpath("//a") or []
text_email = [
a for a in links if "mailto:" in (a.get("href") or "")
]
if text_email:
email = text_email[0].text
except requests.exceptions.TooManyRedirects:
self.warning("Contact Link Not Working for %s" % name)
person = Person(
name=name,
district=district,
party=party,
primary_org="upper",
image=photo_url,
)
person.add_link(leg_url)
person.add_source(leg_url)
person.add_contact_detail(
type="address", value=office_loc, note="Capitol Office"
)
person.add_contact_detail(
type="voice", value=office_phone, note="Capitol Office"
)
person.add_contact_detail(
type="fax", value=office_fax, note="Capitol Office"
)
if email:
person.add_contact_detail(
type="email", value=email, note="Capitol Office"
)
yield person
def get_photo_url(self, url):
data = self.get(url).text
doc = lxml.html.fromstring(data)
doc.make_links_absolute(url)
return doc.xpath(
'//div[contains(@class, "headshotTop")]//img/@src'
) + doc.xpath( # housedems.com
'//div[contains(@class, "widget_sp_image")]//img/@src'
) # gophouse.org
| openstates/openstates | openstates/mi/people.py | Python | gpl-3.0 | 7,404 |
#!/usr/bin/env python
"""
@file generator_blpulse.py
@brief Bandlimited pulse generation
@author gm
@copyright gm 2016
This file is part of SoundTailor
SoundTailor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SoundTailor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SoundTailor. If not, see <http://www.gnu.org/licenses/>.
"""
'''
Note that all of this mimics C++ code for testing/prototyping purpose.
Hence it may not really seems "pythonic" and not intended to be in any way.
'''
from bandlimited_impulse import BLPostFilter
from generator_blsawtooth import BLSawtooth
from generators_common import GeneratorInterface, IncrementAndWrap
class BLPulse(GeneratorInterface):
"""
Implements a band limited variable pulse width signal generator
"""
def __init__(self, sampling_rate, with_postfilter=True):
super(BLPulse, self).__init__(sampling_rate)
self._gen1 = BLSawtooth(sampling_rate, False)
self._gen2 = BLSawtooth(sampling_rate, False)
if with_postfilter:
self._post_filter = BLPostFilter()
def SetPhase(self, phase):
self._gen1.SetPhase(phase)
self._gen2.SetPhase(phase)
def SetFrequency(self, frequency):
self._gen1.SetFrequency(frequency)
self._gen2.SetFrequency(frequency)
def SetPulseWidth(self, pulse_width):
phase1 = self._gen1.ProcessSample()
self._gen1.SetPhase(phase1)
offset = pulse_width * 1.0
self._gen2.SetPhase(IncrementAndWrap(phase1, offset))
self._update = True
self.ProcessSample()
def ProcessSample(self):
out1 = self._gen1.ProcessSample()
out2 = self._gen2.ProcessSample()
out = 0.5 * (out1 - out2)
if hasattr(self, '_post_filter'):
return self._post_filter.process_sample(out)
else:
return out
if __name__ == "__main__":
import numpy
import pylab
from utilities import GetPredictedLength, GenerateSquareData, GetMetadata, PrintMetadata, WriteWav
sampling_freq = 48000
# Prime, as close as possible to the upper bound of 4kHz
freq = 3989.0
length = GetPredictedLength(freq / sampling_freq, 8)
pulse_width = 0.5
# Change phase
generated_data = numpy.zeros(length)
ref_data = numpy.zeros(length)
nopostfilter_data = numpy.zeros(length)
generator_ref = BLPulse(sampling_freq)
generator_ref.SetPulseWidth(pulse_width)
generator_ref.SetFrequency(freq)
generator_nopostfilter = BLPulse(sampling_freq, False)
generator_nopostfilter.SetPulseWidth(pulse_width)
generator_nopostfilter.SetFrequency(freq)
for idx in range(length):
ref_data[idx] = generator_ref.ProcessSample()
nopostfilter_data[idx] = generator_nopostfilter.ProcessSample()
generator_left = BLPulse(sampling_freq)
generator_left.SetPulseWidth(pulse_width)
generator_left.SetFrequency(freq)
for idx in range(length / 2):
generated_data[idx] = generator_left.ProcessSample()
generator_right = BLPulse(sampling_freq)
generator_right.SetPhase(generated_data[length / 2 - 1])
generator_right.SetPulseWidth(pulse_width)
generator_right.SetFrequency(freq)
generator_right.ProcessSample()
for idx in range(length / 2, length):
generated_data[idx] = generator_right.ProcessSample()
print(PrintMetadata(GetMetadata(ref_data)))
# pylab.plot(generator_ref._table, label = "table")
pylab.plot(ref_data, label = "pulse")
pylab.plot(generated_data, label = "pieces_data")
# pylab.plot(nopostfilter_data, label="pulse_nopf")
pylab.legend()
pylab.show()
WriteWav(ref_data, "bl_pulse", sampling_freq)
| G4m4/soundtailor | scripts/generator_blpulse.py | Python | gpl-3.0 | 4,122 |
#!/usr/bin/env Django_py3
"""mysite_tue URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| gmf1045/Python_Sample_codes | mysite_tue/urls.py | Python | gpl-3.0 | 794 |
"""
Omeka API Script to find word/office documents and add HTML renditions of them to items.
Requires word2html to be in a directory parallel to this or in the python path
"""
from omekaclient import OmekaClient
from omekautils import get_omeka_config
from omekautils import create_stream_logger
from sys import stdout
import argparse
import json
import os
import tempfile
#Hacky stuff as this is a one off
import sys
sys.path
#Change this to point to where you downloaded word2html from the WordDown project
sys.path.append('../jischtml5/tools/commandline')
import word2html
logger = create_stream_logger('converting', stdout)
config = get_omeka_config()
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--key', default=None, help='Omeka API Key')
parser.add_argument('-u', '--api_url', default=None, help='Omeka API Endpoint URL (hint, ends in /api)')
parser.add_argument('-d', '--delete_html', action='store_true', help='Delete html docs')
parser.add_argument('-n', '--do_not_convert', action='store_true', help='Do not convert')
args = vars(parser.parse_args())
endpoint = args['api_url'] if args['api_url'] <> None else config['api_url']
apikey = args['key'] if args['api_url'] <> None else config['key']
omeka_client = OmekaClient(endpoint.encode("utf-8"), logger, apikey)
resp, cont = omeka_client.get("items")
items = json.loads(cont)
temp_dir = tempfile.mkdtemp()
os.chmod(temp_dir, 0o2770) #Sets group permissions and "sticky bit"
num_docs_found = 0
num_html_uploaded = 0
num_html_deleted = 0
for item in items:
logger.info('Looking at %s', item['id'])
#First pass - delete HTML if required
if args['delete_html']:
for f in omeka_client.get_files_for_item(item['id']):
fname = f['original_filename']
name, ext = os.path.splitext(fname)
if ext.lower() == ".html":
logger.info("Deleting html file: %s", f['id'])
num_html_deleted += 1
omeka_client.delete('files', f['id'])
#Second pass do the conversion if required
if not args['do_not_convert']:
for f in omeka_client.get_files_for_item(item['id']):
fname = f['original_filename']
name, ext = os.path.splitext(fname)
if ext.lower() in [".docx", ".doc", ".odt", ".rtf"]:
num_docs_found += 1
res, data = omeka_client.get_file(f['file_urls']['original'])
download_file = os.path.join(temp_dir, fname)
out = open(download_file, 'wb')
out.write(data)
out.close()
logger.info("Converting office doc file %s to HTML", f['id'])
out_dir, x = os.path.split(download_file)
html_file = os.path.join(temp_dir, name + ".html")
word2html.convert(download_file, html_file , True, True, False)
if omeka_client.post_file_from_filename(html_file, item['id']):
num_html_uploaded += 1
logger.info("Uploaded %s successfully", f['id'])
logger.info("********************")
logger.info("SUMMARY:")
logger.info("Deleted %s HTML", num_html_deleted)
logger.info("Docs found: %s", num_docs_found)
logger.info("HTML files converted and added: %s", num_html_uploaded)
if num_docs_found == num_html_uploaded:
logger.info("No errors detected")
else:
logger.error("Number of docs does not match number of HTML files uploaded")
| ptsefton/omeka-python-utils | convert_word_docs_to_html.py | Python | gpl-3.0 | 3,529 |
import sys
import os
import itk
import numpy as np
if len(sys.argv) < 5 or len(sys.argv) > 6:
print("Usage: " + sys.argv[0] + "inputImage outputFOLDER numberOfIterations conductance [timeStep=0.0625]")
sys.exit(1)
print("Anisotropic Denoising %s" % sys.argv[1])
input_filename = sys.argv[1]
basename = os.path.basename(input_filename)
filename = os.path.splitext(basename)[0]
output_folder = sys.argv[2]
output_extension = "nrrd"
iters = int(sys.argv[3])
conductance = float(sys.argv[4])
time_step = 1.0/(2.0**4) # 0.0625
if len(sys.argv) == 6:
time_step = float(sys.argv[5])
output_filename = os.path.join(output_folder, filename +
"_AnisDenoise_t" + str(time_step) +
"_N" + str(iters) +
"_c" + str(conductance) +
"." + output_extension)
PixelType = itk.F
Dimension = 3
ImageType = itk.Image[PixelType, Dimension]
reader = itk.ImageFileReader[ImageType].New(FileName=input_filename)
reader.Update()
denoiser = itk.CurvatureAnisotropicDiffusionImageFilter.New(Input=reader.GetOutput())
denoiser.Update()
itk.ImageFileWriter.New(Input=denoiser.GetOutput(), FileName=output_filename).Update()
from subprocess import Popen
testDriver = "/home/phc/tmp/IsotropicWaveletsTestDriver"
pin = Popen([testDriver, 'runViewImage', input_filename, 'input'])
pout = Popen([testDriver, 'runViewImage', output_filename, 'denoised'])
| phcerdan/ITKfilters | scripts-python/denoise_input.py | Python | gpl-3.0 | 1,451 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from weblate.trans.models import Change
from weblate.lang.models import Language
from weblate.trans.views.helper import (
get_translation, get_subproject, get_project
)
class ChangesFeed(Feed):
"""Generic RSS feed for Weblate changes."""
def get_object(self, request):
return request.user
def title(self):
return _('Recent changes in %s') % settings.SITE_TITLE
def description(self):
return _('All recent changes made using Weblate in %s.') % (
settings.SITE_TITLE
)
def link(self):
return reverse('home')
def items(self, obj):
return Change.objects.last_changes(obj)[:10]
def item_title(self, item):
return item.get_action_display()
def item_description(self, item):
return str(item)
def item_author_name(self, item):
return item.get_user_display(False)
def item_pubdate(self, item):
return item.timestamp
class TranslationChangesFeed(ChangesFeed):
"""RSS feed for changes in translation."""
# Arguments number differs from overridden method
# pylint: disable=W0221
def get_object(self, request, project, subproject, lang):
return get_translation(request, project, subproject, lang)
def title(self, obj):
return _('Recent changes in %s') % obj
def description(self, obj):
return _('All recent changes made using Weblate in %s.') % obj
def link(self, obj):
return obj.get_absolute_url()
def items(self, obj):
return Change.objects.filter(
translation=obj
)[:10]
class SubProjectChangesFeed(TranslationChangesFeed):
"""RSS feed for changes in subproject."""
# Arguments number differs from overridden method
# pylint: disable=W0221
def get_object(self, request, project, subproject):
return get_subproject(request, project, subproject)
def items(self, obj):
return Change.objects.filter(
translation__subproject=obj
)[:10]
class ProjectChangesFeed(TranslationChangesFeed):
"""RSS feed for changes in project."""
# Arguments number differs from overridden method
# pylint: disable=W0221
def get_object(self, request, project):
return get_project(request, project)
def items(self, obj):
return Change.objects.filter(
translation__subproject__project=obj
)[:10]
class LanguageChangesFeed(TranslationChangesFeed):
"""RSS feed for changes in language."""
# Arguments number differs from overridden method
# pylint: disable=W0221
def get_object(self, request, lang):
return get_object_or_404(Language, code=lang)
def items(self, obj):
return Change.objects.filter(
translation__language=obj
)[:10]
| lem9/weblate | weblate/trans/feeds.py | Python | gpl-3.0 | 3,830 |
# -*- coding: utf-8 -*-
from santaclara_editor.santaclara_lang.tags import Tag
class Table(list):
def __init__(self,style=""):
list.__init__(self)
self.style=style
def html(self):
S=u"<center><table"
if self.style:
S+=u' class="'+self.style+'"'
S+=u">\n"
for row in self:
S+=row.html()
S+=u"</table></center>\n"
return(S)
class Row(list):
def __init__(self):
list.__init__(self)
self.prop=u""
def html(self):
S=u"<tr"
if self.prop: S+=u" "+self.prop
S+=u">\n"
for cell in self:
S+=cell.html()
S+=u"</tr>\n"
return(S)
def set_properties(self,txt):
txt=txt[1:-1]
token=txt.split(",")
p=[]
for s in token:
x=s.split("=")
if len(x)==1:
p.append(s)
continue
p.append(x[0]+'="'+x[1]+'"')
self.prop=" ".join(p)
class Cell(unicode):
def __new__(cls,txt=u""):
self=unicode.__new__(Cell,txt)
self.prop=u""
self.td=u"td"
return(self)
def html(self):
S=u"<"+self.td
if self.prop: S+=u" "+self.prop
S+=u">\n"
S+=self+u"</"+self.td+">\n"
return(S)
def set_properties(self,txt):
txt=txt[1:-1]
token=txt.split(",")
p=[]
for s in token:
if s==u"th":
self.td="th"
continue
x=s.split("=")
if len(x)==1:
p.append(s)
continue
p.append(x[0]+'="'+x[1]+'"')
self.prop=" ".join(p)
def __add__(self,other):
res=unicode.__add__(self,other)
ret=Cell(res)
ret.prop=self.prop
ret.td=self.td
return ret
def __mod__(self,other):
res=unicode.__mod__(self,other)
ret=Cell(res)
ret.prop=self.prop
ret.td=self.td
return ret
def __mul__(self,other):
res=unicode.__mul__(self,other)
ret=Cell(res)
ret.prop=self.prop
ret.td=self.td
return ret
def __rmod__(self,other):
res=unicode.__rmod__(self,other)
ret=Cell(res)
ret.prop=self.prop
ret.td=self.td
return ret
def __rmul__(self,other):
res=unicode.__rmul__(self,other)
ret=Cell(res)
ret.prop=self.prop
ret.td=self.td
return ret
class TableTag(Tag):
def __init__(self,lang,padre):
Tag.__init__(self,lang,padre,inline=False)
lab=r'[a-zA-Z0-9,=:]+'
prop_td=r'\('+lab+r'\)'
prop_tr=r'\{'+lab+r'\}'
txt=r'[^\r\n\|:]+|\r?\n'
sep=r':|\|'
regexp=r'('+sep+'|'+prop_td+'|'+prop_tr+'|'+txt+')'
self.tokenizer=re.compile(regexp)
self.re_prop_td=re.compile(prop_td)
self.re_prop_tr=re.compile(prop_tr)
def output(self,autoescape,outtype="html"):
txt=Tag.output(self,autoescape,outtype)
txt=txt.replace(u"\:",u'§a§')
txt=txt.replace(u'\|',u'§b§')
txt=txt.replace(u'\{',u'§c§')
txt=txt.replace(u'\}',u'§d§')
txt=txt.replace(u'\(',u'§e§')
txt=txt.replace(u'\)',u'§f§')
txt=self.filter(txt)
txt=txt.replace(u'§a§',u":")
txt=txt.replace(u'§b§',u'|')
txt=txt.replace(u'§c§',u'{')
txt=txt.replace(u'§d§',u'}')
txt=txt.replace(u'§e§',u'(')
txt=txt.replace(u'§f§',u')')
return(txt)
def filter(self,txt):
L=self.tokenizer.findall(txt)
if self.args:
T=Table(self.args[0])
else:
T=Table()
cell=Cell()
row=Row()
for t in L:
if t==":":
row.append(cell)
cell=Cell()
continue
if t=="|":
row.append(cell)
T.append(row)
cell=Cell()
row=Row()
continue
if self.re_prop_td.match(t):
cell.set_properties(t)
continue
if self.re_prop_tr.match(t):
row.set_properties(t)
continue
cell+=t
if cell:
row.append(cell)
T.append(row)
elif row:
T.append(row)
return(T.html())
| chiara-paci/santaclara-editor | santaclara_editor/santaclara_lang/tables.py | Python | gpl-3.0 | 4,455 |
#!/sur/bin/env python
# -*- coding: utf8 -*-
"""
Profection dialog.
"""
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
__all__ = ['ProfectionDialog']
class ProfectionDialog(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
tr = self.tr
self.setWindowTitle(tr('Add Degrees'))
layout = QGridLayout(self)
self.setLayout(layout)
# input value
layout.addWidget(QLabel(tr('Value')), 0, 0)
self.valueEdit = QDoubleSpinBox(self)
self.valueEdit.setRange(-360, 360)
self.valueEdit.setSuffix(tr('\xb0', 'Degrees'))
self.valueEdit.setDecimals(6)
self.valueEdit.setButtonSymbols(QAbstractSpinBox.PlusMinus)
self.valueEdit.setValue(30)
layout.addWidget(self.valueEdit, 0, 1)
# profection mode
self.profMode = QCheckBox(tr('Profection'), self)
self.connect(self.profMode, SIGNAL('stateChanged(int)'),
self.setProfMode)
layout.addWidget(self.profMode, 1, 0)
# profection unit
self.profUnit = QComboBox(self)
self.profUnit.setEditable(False)
self.profUnit.setDisabled(True)
units = [tr('Per year'), tr('Per day'), tr('Per hour')]
self.profUnit.addItems(units)
layout.addWidget(self.profUnit, 1, 1)
# datetime
layout.addWidget(QLabel(tr('DateTime')), 2, 0)
self.datetimeEdit = QDateTimeEdit(QDateTime.currentDateTime(), self)
self.datetimeEdit.setCalendarPopup(True)
self.datetimeEdit.setDisplayFormat(tr('yyyy-MM-dd hh:mm:ss',
'Datetime format'))
self.datetimeEdit.setMinimumDateTime(QDateTime(-5400, 1, 1, 0, 0))
self.datetimeEdit.setMaximumDateTime(QDateTime(5400, 1, 1, 0, 0))
self.datetimeEdit.setButtonSymbols(QAbstractSpinBox.PlusMinus)
self.datetimeEdit.setDisabled(True)
layout.addWidget(self.datetimeEdit, 2, 1)
# buttons
buttonsLayout = QHBoxLayout()
layout.addLayout(buttonsLayout, 3, 0, 1, 2)
cancelButton = QPushButton(tr('Cancel'), self)
self.connect(cancelButton, SIGNAL('clicked()'), self.reject)
buttonsLayout.addWidget(cancelButton)
okButton = QPushButton(tr('Ok'), self)
okButton.setDefault(True)
self.connect(okButton, SIGNAL('clicked()'), self.accept)
buttonsLayout.addWidget(okButton)
def setProfMode(self, i):
"""Enable/disable profection."""
if self.profMode.isChecked():
self.profUnit.setEnabled(True)
self.datetimeEdit.setEnabled(True)
else:
self.profUnit.setDisabled(True)
self.datetimeEdit.setDisabled(True)
def exec_(self):
"""Return ok, value, profection, profection unit, datetime."""
ok = QDialog.exec_(self)
if ok:
ret = (QDialog.Accepted, self.valueEdit.value(),
self.profMode.isChecked(), self.profUnit.currentIndex(),
self.datetimeEdit.dateTime().toPyDateTime())
return ret
else:
return QDialog.Rejected, 0, False, -1, 0
def main():
app = QApplication(sys.argv)
main = ProfectionDialog()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
# End.
| astrorigin/oroboros | oroboros/gui/profectiondialog.py | Python | gpl-3.0 | 2,889 |
# Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'double_vector.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert.cwrap import CWrapper
from ert.util import UTIL_LIB, VectorTemplate
class DoubleVector(VectorTemplate):
default_format = "%8.4f"
def __init__(self, default_value=0, initial_size=0):
super(DoubleVector, self).__init__(default_value, initial_size)
cwrapper = CWrapper(UTIL_LIB)
CWrapper.registerObjectType("double_vector", DoubleVector)
DoubleVector.cNamespace().alloc = cwrapper.prototype("c_void_p double_vector_alloc( int , double )")
DoubleVector.cNamespace().alloc_copy = cwrapper.prototype("double_vector_obj double_vector_alloc_copy( double_vector )")
DoubleVector.cNamespace().strided_copy = cwrapper.prototype("double_vector_obj double_vector_alloc_strided_copy( double_vector , int , int , int)")
DoubleVector.cNamespace().free = cwrapper.prototype("void double_vector_free( double_vector )")
DoubleVector.cNamespace().iget = cwrapper.prototype("double double_vector_iget( double_vector , int )")
DoubleVector.cNamespace().safe_iget = cwrapper.prototype("double double_vector_safe_iget(double_vector , int )")
DoubleVector.cNamespace().iset = cwrapper.prototype("double double_vector_iset( double_vector , int , double)")
DoubleVector.cNamespace().size = cwrapper.prototype("int double_vector_size( double_vector )")
DoubleVector.cNamespace().append = cwrapper.prototype("void double_vector_append( double_vector , double )")
DoubleVector.cNamespace().idel_block = cwrapper.prototype("void double_vector_idel_block( double_vector , int , int )")
DoubleVector.cNamespace().pop = cwrapper.prototype("double double_vector_pop( double_vector )")
DoubleVector.cNamespace().idel = cwrapper.prototype("void double_vector_idel( double_vector , int )")
DoubleVector.cNamespace().lshift = cwrapper.prototype("void double_vector_lshift( double_vector , int )")
DoubleVector.cNamespace().rshift = cwrapper.prototype("void double_vector_rshift( double_vector , int )")
DoubleVector.cNamespace().insert = cwrapper.prototype("void double_vector_insert( double_vector , int , double)")
DoubleVector.cNamespace().fprintf = cwrapper.prototype("void double_vector_fprintf( double_vector , FILE , char* , char*)")
DoubleVector.cNamespace().sort = cwrapper.prototype("void double_vector_sort( double_vector )")
DoubleVector.cNamespace().rsort = cwrapper.prototype("void double_vector_rsort( double_vector )")
DoubleVector.cNamespace().reset = cwrapper.prototype("void double_vector_reset( double_vector )")
DoubleVector.cNamespace().get_read_only = cwrapper.prototype("bool double_vector_get_read_only( double_vector )")
DoubleVector.cNamespace().set_read_only = cwrapper.prototype("void double_vector_set_read_only( double_vector , bool )")
DoubleVector.cNamespace().get_max = cwrapper.prototype("double double_vector_get_max( double_vector )")
DoubleVector.cNamespace().get_min = cwrapper.prototype("double double_vector_get_min( double_vector )")
DoubleVector.cNamespace().get_max_index = cwrapper.prototype("int double_vector_get_max_index( double_vector , bool)")
DoubleVector.cNamespace().get_min_index = cwrapper.prototype("int double_vector_get_min_index( double_vector , bool)")
DoubleVector.cNamespace().shift = cwrapper.prototype("void double_vector_shift( double_vector , double )")
DoubleVector.cNamespace().scale = cwrapper.prototype("void double_vector_scale( double_vector , double )")
DoubleVector.cNamespace().div = cwrapper.prototype("void double_vector_div( double_vector , double )")
DoubleVector.cNamespace().inplace_add = cwrapper.prototype("void double_vector_inplace_add( double_vector , double_vector )")
DoubleVector.cNamespace().inplace_mul = cwrapper.prototype("void double_vector_inplace_mul( double_vector , double_vector )")
DoubleVector.cNamespace().assign = cwrapper.prototype("void double_vector_set_all( double_vector , double)")
DoubleVector.cNamespace().memcpy = cwrapper.prototype("void double_vector_memcpy(double_vector , double_vector )")
DoubleVector.cNamespace().set_default = cwrapper.prototype("void double_vector_set_default( double_vector , double)")
DoubleVector.cNamespace().get_default = cwrapper.prototype("double double_vector_get_default( double_vector )")
DoubleVector.cNamespace().element_size = cwrapper.prototype("int double_vector_element_size( double_vector )")
DoubleVector.cNamespace().permute = cwrapper.prototype("void double_vector_permute(double_vector, permutation_vector)")
DoubleVector.cNamespace().sort_perm = cwrapper.prototype("permutation_vector_obj double_vector_alloc_sort_perm(double_vector)")
DoubleVector.cNamespace().rsort_perm = cwrapper.prototype("permutation_vector_obj double_vector_alloc_rsort_perm(double_vector)")
DoubleVector.cNamespace().contains = cwrapper.prototype("bool double_vector_contains(double_vector, double)")
DoubleVector.cNamespace().select_unique = cwrapper.prototype("void double_vector_select_unique(double_vector)")
DoubleVector.cNamespace().element_sum = cwrapper.prototype("double double_vector_sum(double_vector)")
DoubleVector.cNamespace().get_data_ptr = cwrapper.prototype("double* double_vector_get_ptr(double_vector)")
DoubleVector.cNamespace().count_equal = cwrapper.prototype("int double_vector_count_equal(double_vector, double)")
| iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/util/double_vector.py | Python | gpl-3.0 | 6,272 |
#
# Copyright (C) 2011-2017 Ary Pablo Batista <[email protected]>, Pablo Barenbaum <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import re
import colorsys
import webbrowser
import tempfile
import atexit
import shutil
from .parse import *
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def python_major_version():
return sys.version_info[0]
if python_major_version() < 3:
from StringIO import StringIO
else:
from io import StringIO
try:
import hashlib as md5
except ImportError:
import md5
#### Various utility functions
VERSION = 0, 1, 5
def version_number():
return '%s.%s.%s' % VERSION
class SourceException(Exception):
def __init__(self, msg, area):
self.msg = msg
self.area = area
def __repr__(self):
s = ''
if self.area:
s += '\n%s\n' % (self.area,)
s += '%s\n' % (indent(self.msg),)
return s
def error_type(self):
return 'Error'
class StaticException(SourceException):
pass
class DynamicException(SourceException):
pass
def trim(x):
return x.strip(' \t\r\n')
_blanks = re.compile('[ \t\r\n]+')
def trim_blanks(x):
return trim(_blanks.sub(' ', x))
def nonempty(x):
return x != ''
def read_file(fn):
f = open(fn, 'r')
c = f.read()
f.close()
return c
## set
def set_new(xs=[]):
d = {}
for x in xs: d[x] = 1
return d
def set_add(s, x):
s[x] = 1
def set_add_change(s, x):
if x not in s:
s[x] = 1
return True
return False
def set_extend(s1, s2):
for k, v in s2.items():
s1[k] = v
def set_remove(s1, s2):
for k, v in s2.items():
if k in s1:
del s1[k]
## tokset
## Set with an associated token.
## When joining two sets, it always keeps
## the token that occurs first in the input.
tokset_key = lambda tree: tree.pos_begin.start
tokset_new = set_new
def tokset_new_from_dict(d):
return d
def tokset_empty(s):
return len(s) == 0
def tokset_extend_change(s1, s2):
change = False
for k, v in s2.items():
if k not in s1:
s1[k] = v
change = True
elif tokset_key(v) < tokset_key(s1[k]):
s1[k] = v
return change
tokset_extend = tokset_extend_change
def tokset_union(s1, s2):
res = {}
for k, v in s1.items():
res[k] = v
tokset_extend_change(res, s2)
return res
def tokset_difference(s1, s2):
res = {}
for k, v in s1.items():
if k not in s2:
res[k] = v
return res
def seq_sorted(xs, key=lambda x: x):
if python_major_version() < 3:
xs = [(key(x), x) for x in xs]
xs.sort(lambda a, b: cmp(a[0], b[0]))
return [xy[1] for xy in xs]
else:
return list(sorted(xs, key=key))
def seq_reversed(xs):
ys = []
for x in xs:
ys.insert(0, x)
return ys
def seq_no_repeats(xs):
res = []
for x in xs:
if x not in res:
res.append(x)
return res
def seq_insert(xs, idx, ys):
assert idx >= 0
for y in ys:
xs.insert(idx, y)
idx += 1
def is_int(x):
if not x:
return False
if not isinstance(x, str):
return False
if len(x) == 0:
return False
for c in x:
if c not in '0123456789':
return False
return True
##
def dict_min_value(xs, key=lambda x: x):
for x in seq_sorted(xs.values(), key):
return x
def indent(msg, n=4):
return '\n'.join([n * ' ' + m for m in msg.split('\n')])
def expand_tabs(x):
return x.replace('\t', ' ')
def std_warn(x):
sys.stderr.write(repr(x))
def show_string(s):
s = s[1:-1]
r = ''
conv = {
'a': '\a',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v',
'\\': '\\',
'\"': '\"'
}
i = 0
while i < len(s):
if s[i] == '\\' and i + 1 < len(s):
c = s[i + 1]
r += conv.get(c, c)
i += 2
else:
r += s[i]
i += 1
return r
##
def md5sum(s):
return md5.md5(s).hexdigest()
##
def hsv(h, s, v):
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return '#%.2x%.2x%.2x' % (int(r * 255), int(g * 255), int(b * 255))
## Parser for option switches
def default_options(option_switches):
opt = {}
for o in option_switches:
o = o.split(' ')
sw = o[0][2:]
if sw[:3] == 'no-':
neg = True
sw = sw[3:]
else:
neg = False
if len(o) == 1:
opt[sw] = neg
else:
opt[sw] = []
return opt
def parse_options(option_switches, args, max_args=None):
arguments = []
opt = default_options(option_switches)
i = 1
n = len(args)
while i < len(args):
o = None
for oi in option_switches:
oi = oi.split(' ')
if oi[0] == args[i]:
o = oi
break
if o is None:
if len(arguments) == max_args:
return False
arguments.append(args[i])
i += 1
continue
sw = o[0][2:]
if len(o) == 1:
if sw[:3] == 'no-':
neg = True
sw = sw[3:]
else:
neg = False
opt[sw] = not neg
i += 1
else:
k = 1
i += 1
while k < len(o):
if i >= n: return False
opt[sw].append(args[i])
i += 1
k += 1
return arguments, opt
##
TEMP_HTML_DIR = None
def temp_html_dir():
global TEMP_HTML_DIR
if TEMP_HTML_DIR is None:
TEMP_HTML_DIR = tempfile.mkdtemp()
atexit.register(_temp_html_dir_cleanup)
return TEMP_HTML_DIR
def _temp_html_dir_cleanup():
if TEMP_HTML_DIR is not None:
shutil.rmtree(TEMP_HTML_DIR)
def open_html(fn):
webbrowser.open(fn, autoraise=True)
def open_temp_html(prefix, contents):
fn = os.path.join(temp_html_dir(), prefix + '.html')
f = open(fn, 'w')
f.write(contents)
f.close()
open_html(fn)
##
def read_stripped_lines(f):
def next_line():
while True:
l = f.readline()
if l == '' or l.strip(' \t\r\n') == '%%':
return False
l = l.strip(' \t\r\n').split('#')[0].strip(' \t\r\n')
if l != '':
return re.sub('[ \t]+', ' ', l)
lines = []
while True:
l = next_line()
if not l:
break
lines.append(l)
return lines
# Reading one char
class _Getch:
"""Gets a single character from standard input. Does not echo to the screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
| arypbatista/gobspy | gobspy/lang/utils/__init__.py | Python | gpl-3.0 | 8,234 |
#!/usr/bin/python3
import sys
import os
import inspect
import visa
import matplotlib.pyplot as pyplot
import skrf as rf
#############################
# Importing local libraries #
#############################
import instr_tests as instr
try:
vna = instr.instruments.vna.AgilentE5061B("10.0.18.54")
except:
sys.stdout.write("\nUnable to reach the vector network analyzer (Agilent E5061B) through the " +
"network. Exiting...\n\n")
exit()
sys.stdout.write("\nRunning test...\n\n")
#vna.freq_range(100000,1e9)
#vna.set_center_frequency(500e6)
#vna.set_span(0)
vna.set_data_format("SLOG")
vna.write("SENS1:CORR:STAT 1")
vna.freq_range(5e6,550e6)
slog = (vna.get_slog_data())
freq = vna.get_frequency_data()
pyplot.plot(freq,slog[1])
pyplot.savefig('wave1.png')
#zone_freq = [100e3, 200e6, 450e6, 550e6, 800e6, 1e9]
#zone_type = [-1, 0, 1, 0, -1]
#zone_mag = [-60, 0, -60, 0,-40]
#s21 = vna.get_s21_data()
#print(instr.functions.vna.mag_lvl_test(freq,s21, zone_freq, zone_type, zone_mag))
#print(vna.get_reflection_impedance())
#vna.save_csv("SLOG_s11")
vna.save_s1p("S","dB", "test_file")
ntwk = rf.Network('test_file.s1p')
ntwk.plot_s_smith()
pyplot.savefig('smith.png')
pyplot.close()
ntwk.plot_s_complex()
pyplot.savefig('complex.png')
pyplot.close()
ntwk.plot_s_db()
pyplot.savefig('db.png')
print("\nok")
| lnls-dig/instr_tests | instr_tests/examples/example_vna.py | Python | gpl-3.0 | 1,360 |
""" This is a test of using SandboxStoreClient in the WMS
In order to run this test we need the following DBs installed:
- SandboxMetadataDB
And the following services should also be on:
- SandboxStore
And a SandboxSE should be configured, something like:
SandboxStore
{
LocalSE = FedericoSandboxSE
Port = 9196
BasePath = /home/toffo/Rumenta/
Authorization
{
Default = authenticated
FileTransfer
{
Default = all
}
}
}
A user proxy is also needed to submit,
and the Framework/ProxyManager need to be running with a such user proxy already uploaded.
Suggestion: for local testing, run this with::
python -m pytest -c ../pytest.ini -vv tests/Integration/WorkloadManagementSystem/Test_SandboxStoreClient.py
"""
import DIRAC
DIRAC.initialize() # Initialize configuration
from DIRAC.tests.Utilities.utils import find_all
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.DB.SandboxMetadataDB import SandboxMetadataDB
gLogger.setLevel("DEBUG")
def test_SSCChain():
"""full test of functionalities"""
ssc = SandboxStoreClient()
smDB = SandboxMetadataDB()
exeScriptLocation = find_all("exe-script.py", "..", "/DIRAC/tests/Integration")[0]
fileList = [exeScriptLocation]
res = ssc.uploadFilesAsSandbox(fileList)
assert res["OK"] is True, res["Message"]
# SEPFN = res['Value'].split( '|' )[1]
res = ssc.uploadFilesAsSandboxForJob(fileList, 1, "Input")
assert res["OK"] is True, res["Message"]
res = ssc.downloadSandboxForJob(1, "Input") # to run this we need the RSS on
print(res) # for debug...
assert res["OK"] is True, res["Message"]
# only ones needing the DB
res = smDB.getUnusedSandboxes()
print(res)
assert res["OK"] is True, res["Message"]
# smDB.getSandboxId(SEName, SEPFN, requesterName, requesterGroup)
# # cleaning
# res = smDB.deleteSandboxes(SBIdList)
# assert res['OK'] is True
def test_SandboxMetadataDB():
smDB = SandboxMetadataDB()
seNameToUse = "ProductionSandboxSE"
sbPath = "/sb/pfn/1.tar.bz2"
assignTo = {"adminusername": "dirac_admin"}
res = smDB.registerAndGetSandbox(
"adminusername", "/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser", "dirac_admin", seNameToUse, sbPath, 123
)
assert res["OK"], res["Message"]
sbURL = f"SB:{seNameToUse}|{sbPath}"
assignTo = dict([(key, [(sbURL, assignTo[key])]) for key in assignTo])
res = smDB.assignSandboxesToEntities(assignTo, "adminusername", "dirac_admin", "enSetup")
assert res["OK"], res["Message"]
| DIRACGrid/DIRAC | tests/Integration/WorkloadManagementSystem/Test_SandboxStoreClient.py | Python | gpl-3.0 | 2,767 |
# gus.mb, an open source flow solver.
# Copyright (C) 2016 Hiromasa Kato <hiromasa at gmail.com>
#
# This file is part of gus.mb.
#
# gus.mb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gus.mb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
# $Id: Pre.py 320 2014-08-25 02:28:50Z kato $
import wx
import vtk
from vtk.wx.wxVTKRenderWindow import *
from wxVTKRenderWindowInteractor import *
import numpy as n
import CGNSFile
import Roster
from SceneObject import SceneObject
from PatchSelectionPanel import *
class MainFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "Albastru Pre", size = (1024, 768))
splitter = wx.SplitterWindow(self, wx.ID_ANY)
# Scene
self.vtkPanel = wxVTKRenderWindowInteractor(splitter, -1)
ren = vtk.vtkRenderer()
ren.SetBackground(0.9, 0.9, 1.0);
self.vtkPanel.GetRenderWindow().AddRenderer(ren)
Roster.Renderer = ren
Roster.RenderWindow = self.vtkPanel.GetRenderWindow()
# Patch list
self.patchPanel = PatchSelectionPanel(splitter)
splitter.SplitVertically(self.patchPanel, self.vtkPanel, 200)
class App(wx.App):
MenuData = [
["File", -1, [
["&New", -1, None],
["&Open", -1, None],
["&Save", -1, None],
["Save &as", -1, None],
["&Quit", -1, None],
]
],
["Edit", -1, [
["Dummy", -1, None],
]
],
]
def OnInit(self):
frame = MainFrame(None)
menuBar = wx.MenuBar()
self._InstallMenu(menuBar, self.MenuData)
frame.SetMenuBar(menuBar)
tb = frame.CreateToolBar((wx.TB_HORIZONTAL | wx.NO_BORDER))
new_bmp = wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_TOOLBAR)
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR)
save_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR)
tb.AddLabelTool(wx.NewId(), "New", new_bmp)
tb.AddLabelTool(wx.NewId(), "Open", open_bmp)
tb.AddLabelTool(wx.NewId(), "Save", save_bmp)
tb.Realize()
self.SetTopWindow(frame)
frame.Show()
return True
def _InstallMenu(self, parent, data):
for name, actionID, action in data:
if isinstance(action, list):
menu = wx.Menu()
self._InstallMenu(menu, action)
parent.Append(menu, name)
else:
parent.Append(actionID, name)
def CanonizeRange(r):
rr = list(r)
for i, j in [[0, 3], [1, 4], [2, 5]]:
if rr[i] > rr[j]:
rr[i], rr[j] = rr[j], rr[i]
return rr
def PickEventCallback(obj, event):
actor = obj.GetActor()
if actor:
picked = Roster.FindPatchByActor(actor)
patchName = picked[1]
print patchName
Roster.UnselectAllPatches()
Roster.SelectPatch(patchName)
def RenamePatch(patch):
i = 2
while True:
candidateName = "%s_%d" % (patch["Name"], i)
if Roster.FindPatchByName(candidateName) == None:
print "Duplicate patch %s was renamed to %s" % (patch["Name"], candidateName)
patch["Name"] = candidateName
break
i += 1
def CoarsenPatchMesh(xyz):
shape = xyz.shape
shapeCoarse = list(shape)
stepI = max(1, shape[1] / 10)
stepJ = max(1, shape[2] / 10)
stepK = max(1, shape[3] / 10)
xyzCoarse = xyz[:, ::stepI, ::stepJ, ::stepK]
print "Coarsened:", xyz.shape, " -> ", xyzCoarse.shape
return xyzCoarse
def Main(cgnsMeshFileName):
app = App()
#app.MainLoop()
#return
style = vtk.vtkInteractorStyleTrackballCamera()
Roster.RenderWindow.GetInteractor().SetInteractorStyle(style)
"""
picker = vtk.vtkPropPicker()
picker.AddObserver("EndPickEvent", PickEventCallback)
Roster.RenderWindow.GetInteractor().SetPicker(picker)
"""
axesActor = vtk.vtkAxesActor()
markerWidget = vtk.vtkOrientationMarkerWidget()
markerWidget.SetOrientationMarker(axesActor)
markerWidget.SetInteractor(Roster.RenderWindow.GetInteractor())
if cgnsMeshFileName != None:
cgnsf = CGNSFile.CGNSFile(cgnsMeshFileName)
zones = cgnsf.ReadZones()
Roster.Zones = zones
#ren = vtk.vtkRenderer()
ren = Roster.Renderer
B = 1
for zone in zones:
xyz = cgnsf.ReadZoneCoord(B, zone["Zone"])
for boco in zone["Bocos"]:
if Roster.FindPatchByName(boco["Name"]) != None:
RenamePatch(boco)
r = CanonizeRange(boco["Range"])
xyz2 = xyz[:, r[0] - 1:r[3], r[1] - 1:r[4], r[2] - 1:r[5]]
xyzCoarse = CoarsenPatchMesh(xyz2)
so = SceneObject(xyzCoarse)
actors = so.ActorSurface, so.ActorOutline
actors[0].GetProperty().SetColor(1.0, 1.0, 1.0)
for actor in actors:
ren.AddActor(actor)
#Roster.Patches.append([zone, boco, actors])
Roster.RegisterPatch(zone, boco, actors)
if True:
for c1to1 in zone["1to1s"]:
if not c1to1.IsPeriodic():
continue
if Roster.FindPatchByName(c1to1["Name"]) != None:
RenamePatch(c1to1)
"""
for i in range(2,100):
candidateName = "%s_%d" % (c1to1["Name"], i)
if Roster.FindPatchByName(candidateName) == None:
print "Duplicate patch %s was renamed to %s" % (c1to1["Name"], candidateName)
c1to1["Name"] = candidateName
break
"""
r = CanonizeRange(c1to1["Range"])
xyz2 = xyz[:, r[0] - 1:r[3], r[1] - 1:r[4], r[2] - 1:r[5]]
xyzCoarse = CoarsenPatchMesh(xyz2)
so = SceneObject(xyzCoarse)
actors = so.ActorSurface, so.ActorOutline
actors[0].GetProperty().SetColor(0.0, 0.0, 1.0)
for actor in actors:
ren.AddActor(actor)
Roster.RegisterPatch(zone, c1to1, actors)
del xyz
markerWidget.SetEnabled(True)
markerWidget.InteractiveOn()
Roster.RosterModified()
app.MainLoop()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
Main(sys.argv[1])
else:
Main(None)
| butakun/gus.mb | IO/Pre.py | Python | gpl-3.0 | 6,058 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import shutil
from django.test import TestCase
from weblate.trans.ssh import get_host_keys, create_ssh_wrapper, ssh_file
from weblate.trans.tests.utils import get_test_file
from weblate.trans.tests import OverrideSettings
from weblate.trans.data import check_data_writable
from weblate import appsettings
TEST_HOSTS = get_test_file('known_hosts')
class SSHTest(TestCase):
'''
Tests for customized admin interface.
'''
@OverrideSettings(DATA_DIR=OverrideSettings.TEMP_DIR)
def test_parse(self):
check_data_writable()
shutil.copy(TEST_HOSTS, os.path.join(appsettings.DATA_DIR, 'ssh'))
hosts = get_host_keys()
self.assertEqual(len(hosts), 50)
@OverrideSettings(DATA_DIR=OverrideSettings.TEMP_DIR)
def test_create_ssh_wrapper(self):
check_data_writable()
filename = os.path.join(
appsettings.DATA_DIR, 'ssh', 'ssh-weblate-wrapper'
)
create_ssh_wrapper()
with open(filename, 'r') as handle:
data = handle.read()
self.assertTrue(ssh_file('known_hosts') in data)
self.assertTrue(ssh_file('id_rsa') in data)
self.assertTrue(appsettings.DATA_DIR in data)
self.assertTrue(
os.access(filename, os.X_OK)
)
| miyataken999/weblate | weblate/trans/tests/test_ssh.py | Python | gpl-3.0 | 2,079 |
from endpoint.auth import AuthMiddleware
from endpoint.data import application as data_app
from endpoint.events import application as events_app
from endpoint.page import application as page_app
from os import environ
from sys import exit
from werkzeug.wsgi import DispatcherMiddleware
import logging
logging.basicConfig(level=logging.INFO)
try:
endpoint_url = environ['ENDPOINT_URL']
except KeyError as err:
logging.error("You'll need to specify a %s environment variable." % str(err))
exit(2)
else:
application = AuthMiddleware(DispatcherMiddleware(page_app, {
'%s' % endpoint_url: data_app,
'%s/0.3/event' % endpoint_url: events_app
}))
| aquametalabs/aquameta | experimental/uwsgi-endpoint/endpoint/wsgi.py | Python | gpl-3.0 | 682 |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import topology.data as td
def run_coffee_mug_pca_example():
X, y = td.coffee_mug(bottom_label=0, side_label=0, handle_label=1)
c = ['r' if l else 'b' for l in y]
# Nontrivial rotation around the x-axis
angle = np.pi / 4.0
rotation_matrix = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
X = rotation_matrix.dot(X.T).T
# Perform PCA 3D down to 2D
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_pca[:,0], X_pca[:,1], c=c)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
plt.savefig('images/coffee_mug_pca.png')
plt.show()
def run_pail_pca_example():
X, y = td.pail(bottom_label=0, side_label=0, handle_label=1)
c = ['r' if l else 'b' for l in y]
# Nontrivial rotation around the x-axis
angle = np.pi / 4.0
rotation_matrix = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
X = rotation_matrix.dot(X.T).T
# Perform PCA 3D down to 2D:
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_pca[:,0], X_pca[:,1], c=c)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
plt.savefig('images/pail_pca.png')
plt.show()
if __name__ == '__main__':
run_coffee_mug_pca_example()
run_pail_pca_example()
| nonabelian/tda_dionysus | scripts/pca_demo.py | Python | gpl-3.0 | 1,605 |
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
'''
Module for generation of environments for experiments over Yahoo finance data
using BEST and TOPK operators
'''
import csv
import os
from yfimport import TRANSACTION_FILE
# Experiment parameters
RAN = 'range'
SLI = 'slide'
OPE = 'operator'
TOPK = 'top'
BEST = 'best'
ALGORITHM = 'algorithm'
# Result fields
RUNTIME = 'runtime'
MEMORY = 'memory'
# List of ranges
RANGE_LIST = [2, 3, 4, 5, 6]
# Default range
RANGE_DEFAULT = 4
# List of slides
SLIDE_LIST = [1, 2, 3, 4]
# Default slide
SLIDE_DEFAULT = 1
# List of operator
OPERATOR_LIST = [BEST, TOPK]
# Default operator
OPERATOR_DEFAULT = BEST
# Top-k variation (-1 for best operator)
TOPK_LIST = [1, 35, 70, 140, 280]
# List of algorithms
ALGORITHM_LIST = ['inc_ancestors', 'inc_graph', 'inc_partition', 'partition']
# Directories
MAIN_DIR = 'streampref'
DATA_DIR = MAIN_DIR + os.sep + 'data'
DETAILS_DIR = MAIN_DIR + os.sep + 'details'
SUMMARY_DIR = MAIN_DIR + os.sep + 'summary'
RESULT_DIR = MAIN_DIR + os.sep + 'result'
QUERY_DIR = MAIN_DIR + os.sep + 'queries'
ENV_DIR = MAIN_DIR + os.sep + 'env'
DIR_LIST = [MAIN_DIR, DATA_DIR, DETAILS_DIR, SUMMARY_DIR, RESULT_DIR,
QUERY_DIR, ENV_DIR]
# Yahoo imported file
DATA_FILE = DATA_DIR + os.sep + 'transaction.csv'
# Number or experiment runs
RUN_COUNT = 5
# Command for experiment run
RUN_COMMAND = \
"streampref -r'|' -p {alg} -e {env} -d {det} -m {ite}"
# Command for calculation of confidence interval
CONFINTERVAL_COMMAND = \
'confinterval.py -i {inf} -o {outf} -k {keyf}'
# Command for sort stream file download
SORT_COMMAND = \
'cat ' + TRANSACTION_FILE + ' | sort -g > ' + DATA_FILE
# Default registration of tables and streams
REGISTER_DEFAULT = '''
REGISTER STREAM transactions (symbol STRING, sector STRING, country STRING,
price FLOAT, volume INTEGER, method INTEGER, rate FLOAT)
INPUT '{dfile}';
REGISTER QUERY preferred_stocks
INPUT '{qdir}/{qfile}.cql'
;
'''
# Default query
QUERY_DEFAULT = '''
SELECT {topk} *
FROM transactions [RANGE {ran} SECOND, SLIDE {sli} SECOND]
ACCORDING TO PREFERENCES
IF sector = 'Basic Materials' THEN rate < 0.25 BETTER rate >= 0.25
[method, symbol, price]
AND
IF sector = 'Technology' THEN rate < 0.35 BETTER rate >= 0.35
[method, symbol, price]
AND
IF rate >= 0.35 THEN country = 'Brazil' BETTER country = 'France'
[symbol, price]
AND
IF rate >= 0.35 THEN volume > 1000 BETTER volume <= 1000
[symbol, price]
;
'''
def gen_env_file(experiment_conf):
'''
Generate environment file for range and slide
'''
exp_id = get_experiment_id(experiment_conf)
text = REGISTER_DEFAULT.format(qdir=QUERY_DIR,
qfile=exp_id,
dfile=DATA_FILE)
filename = ENV_DIR + os.sep + get_experiment_id(experiment_conf) + '.env'
out_file = open(filename, 'w')
out_file.write(text)
out_file.close()
def gen_query_file(experiment_conf):
'''
Generate query file for range and slide
'''
topk_option = ''
if experiment_conf[OPE] == TOPK:
topk_option = 'TOPK(' + str(experiment_conf[TOPK]) + ')'
text = QUERY_DEFAULT.format(topk=topk_option,
ran=experiment_conf[RAN],
sli=experiment_conf[SLI])
filename = QUERY_DIR + os.sep + get_experiment_id(experiment_conf) + '.cql'
out_file = open(filename, 'w')
out_file.write(text)
out_file.close()
def gen_files(experiment_list):
'''
Generate all files (queries and environments)
'''
# Copy imported data file and sort by timestamp
os.system(SORT_COMMAND)
if not os.path.isfile(DATA_FILE):
print 'Error copying data file\n' + \
'Make sure that import tool was executed'
# Generate query files
for exp_conf in experiment_list:
gen_query_file(exp_conf)
# Generate environment files
for exp_conf in experiment_list:
gen_env_file(exp_conf)
def get_experiment_id(experiment_conf):
'''
Return the ID of an experiment
'''
operation = 'best'
if experiment_conf[OPE] == TOPK:
operation = TOPK + str(experiment_conf[TOPK])
return RAN + str(experiment_conf[RAN]) + \
SLI + str(experiment_conf[SLI]) + operation
def get_detail_file(algorithm, experiment_id, count):
'''
Get filename for experiment details
'''
return DETAILS_DIR + os.sep + algorithm + '-' + \
experiment_id + '.' + str(count) + '.csv'
def run(experiment_conf, count, algorithm, iterations):
'''
Run experiment for range and slide
'''
exp_id = get_experiment_id(experiment_conf)
detail_file = get_detail_file(algorithm, exp_id, count)
env_file = ENV_DIR + os.sep + exp_id + '.env'
if not os.path.isfile(detail_file):
command = RUN_COMMAND.format(alg=algorithm, env=env_file,
det=detail_file, ite=iterations)
print command
os.system(command)
def get_max_iteration():
'''
Get maximum iteration of data file
'''
from yfimport import read_csv_file, TRANSACTION_HEADER, TS
rec_list = read_csv_file(DATA_FILE, TRANSACTION_HEADER)
last_rec = rec_list[-1]
return int(last_rec[TS])
def run_experiments(experiment_list):
'''
Run all experiments
'''
iterations = get_max_iteration()
for count in range(RUN_COUNT):
for exp_conf in experiment_list:
for alg in ALGORITHM_LIST:
run(exp_conf, count + 1, alg, iterations)
def summarize_all():
'''
Summarize all results
'''
# Summarize experiments for BEST operator
variation = {}
variation[RAN] = RANGE_LIST
variation[SLI] = SLIDE_LIST
default_values = {RAN: RANGE_DEFAULT, SLI: SLIDE_DEFAULT, OPE: BEST}
for parameter in variation:
summarize(parameter, variation[parameter], default_values)
# Summarize experiments for TOPK operator
variation = {TOPK: TOPK_LIST}
default_values = {RAN: RANGE_DEFAULT, SLI: SLIDE_DEFAULT, OPE: TOPK}
for parameter in variation:
summarize(parameter, variation[parameter], default_values)
def write_file(filename, record_list, key_field):
'''
Write record_list to file
'''
if len(record_list):
field_list = [field for field in record_list[0].keys()
if field != key_field]
field_list.sort()
field_list.insert(0, key_field)
output_file = open(filename, 'w')
writer = csv.DictWriter(output_file, field_list)
header = {field: field for field in field_list}
writer.writerow(header)
for rec in record_list:
writer.writerow(rec)
output_file.close()
def summarize(parameter, value_list, default_values):
'''
Summarize experiments about range variation
'''
time_list = []
mem_list = []
exp_conf = default_values.copy()
for value in value_list:
exp_conf[parameter] = value
for rcount in range(RUN_COUNT):
time_rec = {parameter: value}
mem_rec = {parameter: value}
for alg in ALGORITHM_LIST:
dfile = get_detail_file(alg, get_experiment_id(exp_conf),
rcount + 1)
runtime, memory = get_summaries(dfile)
time_rec[alg] = runtime
mem_rec[alg] = memory
time_list.append(time_rec)
mem_list.append(mem_rec)
fname = SUMMARY_DIR + os.sep + 'runtime_' + parameter + '.csv'
write_file(fname, time_list, parameter)
fname = SUMMARY_DIR + os.sep + 'memory_' + parameter + '.csv'
write_file(fname, mem_list, parameter)
def get_summaries(detail_file):
'''
Import a result file to database
'''
if not os.path.isfile(detail_file):
print 'File does not exists: ' + detail_file
return (float('NaN'), float('NaN'))
in_file = open(detail_file, 'r')
reader = csv.DictReader(in_file, skipinitialspace=True)
sum_time = 0.0
sum_memory = 0.0
count = 0
for rec in reader:
sum_time += float(rec[RUNTIME])
sum_memory += float(rec[MEMORY])
count += 1
in_file.close()
return (sum_time, sum_memory / count)
def create_directories():
'''
Create default directories if they do not exists
'''
for directory in DIR_LIST:
if not os.path.exists(directory):
os.mkdir(directory)
def add_experiment(experiment_list, experiment):
'''
Add an experiment into experiment list
'''
if experiment not in experiment_list:
experiment_list.append(experiment.copy())
def gen_experiment_list():
'''
Generate the list of experiments
'''
exp_list = []
# Default parameters configuration (for BEST operator)
def_conf = {RAN: RANGE_DEFAULT, SLI: SLIDE_DEFAULT, OPE: BEST}
# Attributes number variation (no deletions)
for range_val in RANGE_LIST:
conf = def_conf.copy()
conf[RAN] = range_val
add_experiment(exp_list, conf)
for slide_val in SLIDE_LIST:
conf = def_conf.copy()
conf[SLI] = slide_val
add_experiment(exp_list, conf)
# Default parameters configuration (for TOPK operator)
def_conf = {RAN: RANGE_DEFAULT, SLI: SLIDE_DEFAULT, OPE: TOPK}
for topk_value in TOPK_LIST:
conf = def_conf.copy()
conf[TOPK] = topk_value
add_experiment(exp_list, conf)
return exp_list
def confidence_interval(parameter, in_file, out_file):
'''
Calculate final result with confidence interval
'''
if not os.path.isfile(in_file):
print 'File does not exists: ' + in_file
return
command = CONFINTERVAL_COMMAND.format(inf=in_file, outf=out_file,
keyf=parameter)
print command
os.system(command)
if not os.path.isfile(out_file):
print 'Output file not found: ' + out_file
print "Check if 'confinterval.py' is in path"
def confidence_interval_all():
'''
Calculate confidence interval for all summarized results
'''
# Deletions and insertions
par_list = [RAN, SLI, TOPK]
for parameter in par_list:
in_file = SUMMARY_DIR + os.sep + 'runtime_' + parameter + '.csv'
out_file = RESULT_DIR + os.sep + 'runtime_' + parameter + '.csv'
confidence_interval(parameter, in_file, out_file)
in_file = SUMMARY_DIR + os.sep + 'memory_' + parameter + '.csv'
out_file = RESULT_DIR + os.sep + 'memory_' + parameter + '.csv'
confidence_interval(parameter, in_file, out_file)
def get_arguments(print_help=False):
'''
Get arguments
'''
import argparse
parser = argparse.ArgumentParser('YFRun')
parser.add_argument('-g', '--gen', action="store_true",
default=False,
help='Generate files')
parser.add_argument('-r', '--run', action="store_true",
default=False,
help='Run experiments')
parser.add_argument('-s', '--summarize', action="store_true",
default=False,
help='Summarize results')
if print_help:
parser.print_help()
args = parser.parse_args()
return args
def main():
'''
Main routine
'''
args = get_arguments()
csv.register_dialect('table', delimiter='|', skipinitialspace=True)
create_directories()
exp_list = gen_experiment_list()
if args.gen:
print 'Generating files'
gen_files(exp_list)
elif args.run:
print 'Running experiments'
run_experiments(exp_list)
elif args.summarize:
print 'Summarizing results'
summarize_all()
print 'Calculating confidence intervals'
confidence_interval_all()
else:
get_arguments(True)
if __name__ == '__main__':
main()
| ribeiromarcos/yfimport | pref.py | Python | gpl-3.0 | 11,968 |
import pytest
from datetime import datetime
from nextgisweb.audit.util import es_index
def one(es, index):
result = es.search(index=index, body={"query": {"match_all": {}}}, size=1)
return result["hits"]["hits"][0]["_source"]
@pytest.fixture(scope='module', autouse=True)
def skip_without_es(ngw_env):
if not hasattr(ngw_env.audit, 'es'):
pytest.skip("Elasticsearch is not available")
yield
@pytest.fixture(scope="module")
def index(ngw_env):
return es_index(datetime.now())
@pytest.fixture(autouse=True, scope="function")
def delete_index(ngw_env, index):
yield
ngw_env.audit.es.indices.delete(index)
@pytest.mark.parametrize("method", ["GET", "POST", "PUT", "DELETE"])
def test_audit_request_method(method, index, ngw_env, ngw_webtest_app):
getattr(ngw_webtest_app, method.lower())("/api/resource/0", expect_errors=True)
ngw_env.audit.es.indices.refresh(index=index)
assert one(ngw_env.audit.es, index)["request"]["method"] == method
@pytest.mark.parametrize("path", ["/api/resource/0", "/resource/0"])
def test_audit_request_path(path, index, ngw_env, ngw_webtest_app):
ngw_webtest_app.get(path, expect_errors=True)
ngw_env.audit.es.indices.refresh(index=index)
assert one(ngw_env.audit.es, index)["request"]["path"] == path
def test_audit_user(index, ngw_env, ngw_webtest_app):
ngw_webtest_app.get("/api/resource/0", expect_errors=True)
ngw_env.audit.es.indices.refresh(index=index)
assert one(ngw_env.audit.es, index)["user"]["id"] == 1
assert one(ngw_env.audit.es, index)["user"]["keyname"] == "guest"
assert one(ngw_env.audit.es, index)["user"]["display_name"] == "Guest"
@pytest.mark.parametrize("path, route_name", [
("/api/resource/0", "resource.item"),
("/resource/0", "resource.show"),
("/admin", None),
])
def test_audit_response_route_name(path, route_name, index, ngw_env, ngw_webtest_app):
ngw_webtest_app.get(path, expect_errors=True)
ngw_env.audit.es.indices.refresh(index=index)
assert one(ngw_env.audit.es, index)["response"].get("route_name") == route_name
@pytest.mark.parametrize("path", ["/api/resource/0", "/api/resource/-1"])
def test_audit_response_status_code(path, index, ngw_env, ngw_webtest_app):
response = ngw_webtest_app.get(path, expect_errors=True)
ngw_env.audit.es.indices.refresh(index=index)
assert one(ngw_env.audit.es, index)["response"]["status_code"] == response.status_code
| nextgis/nextgisweb | nextgisweb/audit/test/test_audit.py | Python | gpl-3.0 | 2,453 |
from flask_script import Manager,prompt_bool,prompt
from harrier.core import db
import harrier.model as model
manager = Manager(usage="Perform database operations")
@manager.command
def drop():
"Drops all database tables"
if prompt_bool("**DANGER AREA** Are you sure you want to proceed?"):
if prompt_bool("You are about to delete the ENTIRE database. Are you sure?"):
db.drop_all()
@manager.command
def create():
"Creates database tables"
if prompt_bool("**DANGER AREA** Are you sure you want to proceed?"):
if prompt_bool("You are about to create the ENTIRE database from scratch. Are you sure?"):
db.create_all()
@manager.command
def rebuild():
"Rebuild database tables"
if prompt_bool("**DANGER AREA** Are you sure you want to proceed?"):
if prompt_bool("You are about to rebuild the ENTIRE database from scratch. Are you sure?"):
db.drop_all()
db.create_all()
| ubccr/harrier | harrier/manage/database.py | Python | gpl-3.0 | 965 |
import h5py
import numpy as np
import pylab as pl
from matplotlib.mlab import PCA
from mpl_toolkits.mplot3d import Axes3D
name = "koendata.h5"
file = h5py.File(name, 'r')
def dataProcessing(file, rat, date):
object = file[rat][date]["valueMatrix"]
data = np.array(object)
pca = PCA(data)
print pca.fracs[0], pca.fracs[1], pca.fracs[2], pca.fracs[3]
pl.close('all')
fig1 = pl.figure()
ax = Axes3D(fig1)
ax.scatter(pca.Y[::1,0], pca.Y[::1,1], pca.Y[::1,2], 'bo')
ax.set_xlim([-10,20])
ax.set_ylim([-15,15])
ax.set_zlim([-15,10])
pl.savefig("3D_" +rat + "_" + date+".png")
pl.close('all')
pl.xlim([-10,20])
pl.ylim([-15,15])
pl.scatter(pca.Y[::1,0], pca.Y[::1,1])
pl.savefig("2D_" + rat + "_" + date + ".png")
for rat in file.keys():
for date in file[rat]:
try:
dataProcessing(file, rat, date)
except ValueError:
print "Probably NaN"
| CINPLA/bigdata | koen.py | Python | gpl-3.0 | 964 |
from util import hook, user, database
import os
import sys
import re
import json
import time
import subprocess
@hook.command(autohelp=False, adminonly=True)
def gadmins(inp, notice=None, bot=None):
"admins -- Lists bot's global admins."
if bot.config["admins"]:
notice("Admins are: %s." % ", ".join(bot.config["admins"]))
else:
notice("There are no users with global admin powers.")
return
@hook.command(adminonly=True)
def gadmin(inp, notice=None, bot=None, config=None, db=None):
"gadmin <add|del> <nick|host> -- Make <nick|host> an global admin." \
"(you can delete multiple admins at once)"
inp = inp.lower()
command = inp.split()[0]
targets = inp.split()[1:]
if 'add' in command:
for target in targets:
target = user.get_hostmask(target,db)
if target in bot.config["admins"]:
notice("%s is already a global admin." % target)
else:
notice("%s is now a global admin." % target)
bot.config["admins"].append(target)
bot.config["admins"].sort()
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
return
elif 'del' in command:
for target in targets:
if target in bot.config["admins"]:
notice("%s is no longer a global admin." % target)
bot.config["admins"].remove(target)
bot.config["admins"].sort()
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
else:
notice("%s is not a global admin." % target)
return
################################
### Ignore/Unignore Commands ###
@hook.command(permissions=["op_lock", "op"], adminonly=True, autohelp=False)
def gignored(inp, notice=None, bot=None, chan=None, db=None):
"""ignored [channel]-- Lists ignored channels/nicks/hosts."""
if bot.config["ignored"]:
notice("Global ignores are: %s." % ", ".join(bot.config["ignored"]))
else:
notice("There are no global ignores.")
return
@hook.command(permissions=["op_lock", "op"], adminonly=True, autohelp=False)
def gignore(inp, notice=None, bot=None, chan=None, db=None):
"""gignore <nick|host> -- Makes the bot ignore nick|host."""
ignorelist = bot.config["ignored"]
targets = inp.split()
for target in targets:
target = user.get_hostmask(target,db)
if (user.is_globaladmin(target,db,bot)):
notice("[Global]: {} is an admin and cannot be ignored.".format(inp))
else:
if ignorelist and target in ignorelist:
notice("[Global]: {} is already ignored.".format(target))
else:
bot.config["ignored"].append(target)
bot.config["ignored"].sort()
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
notice("[Global]: {} has been ignored.".format(target))
return
# if ignorelist and target in ignorelist:
# notice(u"[{}]: {} is already ignored.".format(chan, target))
# else:
# ignorelist = '{} {}'.format(target,ignorelist)
# database.set(db,'channels','ignored',ignorelist,'chan',chan)
# notice(u"[{}]: {} has been ignored.".format(chan,target))
# return
@hook.command(permissions=["op_lock", "op"], adminonly=True, autohelp=False)
def gunignore(inp, notice=None, bot=None, chan=None, db=None):
"""unignore [channel] <nick|host> -- Makes the bot listen to <nick|host>."""
ignorelist = bot.config["ignored"]
targets = inp.split()
for target in targets:
target = user.get_hostmask(target,db)
if ignorelist and target in ignorelist:
bot.config["ignored"].remove(target)
bot.config["ignored"].sort()
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
notice("[Global]: {} has been unignored.".format(target))
else:
notice("[Global]: {} is not ignored.".format(target))
return
@hook.command("quit", autohelp=False, permissions=["botcontrol"], adminonly=True)
@hook.command(autohelp=False, permissions=["botcontrol"],adminonly=True)
def stop(inp, nick=None, conn=None):
"""stop [reason] -- Kills the bot with [reason] as its quit message."""
if inp:
conn.cmd("QUIT", ["Killed by {} ({})".format(nick, inp)])
else:
conn.cmd("QUIT", ["Killed by {}.".format(nick)])
time.sleep(5)
os.execl("./bot", "bot", "stop")
@hook.command(autohelp=False, permissions=["botcontrol"], adminonly=True)
def restart(inp, nick=None, conn=None, bot=None):
"""restart [reason] -- Restarts the bot with [reason] as its quit message."""
for botcon in bot.conns:
if inp:
bot.conns[botcon].cmd("QUIT", ["Restarted by {} ({})".format(nick, inp)])
else:
bot.conns[botcon].cmd("QUIT", ["Restarted by {}.".format(nick)])
time.sleep(5)
#os.execl("./bot", "bot", "restart")
args = sys.argv[:]
args.insert(0, sys.executable)
os.execv(sys.executable, args)
@hook.command(autohelp=False, permissions=["botcontrol"], adminonly=True)
def clearlogs(inp, input=None):
"""clearlogs -- Clears the bots log(s)."""
subprocess.call(["./bot", "clear"])
@hook.command(autohelp=False, permissions=["botcontrol"], adminonly=True)
def join(inp, conn=None, notice=None, bot=None):
"""join <channel> -- Joins <channel>."""
for target in inp.split(" "):
if not target.startswith("#"):
target = "#{}".format(target)
notice("Attempting to join {}...".format(target))
conn.join(target)
channellist = bot.config["connections"][conn.name]["channels"]
if not target.lower() in channellist:
channellist.append(target.lower())
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
return
@hook.command(autohelp=False, permissions=["botcontrol"], adminonly=True)
def part(inp, conn=None, chan=None, notice=None, bot=None):
"""part <channel> -- Leaves <channel>.
If [channel] is blank the bot will leave the
channel the command was used in."""
if inp: targets = inp
else: targets = chan
channellist = bot.config["connections"][conn.name]["channels"]
for target in targets.split(" "):
if not target.startswith("#"):
target = "#{}".format(target)
if target in conn.channels:
notice("Attempting to leave {}...".format(target))
conn.part(target)
channellist.remove(target.lower().strip())
print('Deleted {} from channel list.'.format(target))
else:
notice("Not in {}!".format(target))
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
return
@hook.command(autohelp=False, permissions=["botcontrol"], adminonly=True)
def cycle(inp, conn=None, chan=None, notice=None):
"""cycle <channel> -- Cycles <channel>.
If [channel] is blank the bot will cycle the
channel the command was used in."""
if inp:
target = inp
else:
target = chan
notice("Attempting to cycle {}...".format(target))
conn.part(target)
conn.join(target)
return
@hook.command(permissions=["botcontrol"], adminonly=True)
def nick(inp, notice=None, conn=None):
"""nick <nick> -- Changes the bots nickname to <nick>."""
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
notice("Invalid username!")
return
notice("Attempting to change nick to \"{}\"...".format(inp))
conn.set_nick(inp)
return
@hook.command(permissions=["botcontrol"], adminonly=True)
def raw(inp, conn=None, notice=None):
"""raw <command> -- Sends a RAW IRC command."""
notice("Raw command sent.")
conn.send(inp)
@hook.command(permissions=["botcontrol"], adminonly=True)
def say(inp, conn=None, chan=None):
"""say [channel] <message> -- Makes the bot say <message> in [channel].
If [channel] is blank the bot will say the <message> in the channel
the command was used in."""
inp = inp.split(" ")
if inp[0][0] == "#":
message = " ".join(inp[1:])
out = "PRIVMSG {} :{}".format(inp[0], message)
else:
message = " ".join(inp[0:])
out = "PRIVMSG {} :{}".format(chan, message)
conn.send(out)
@hook.command(adminonly=True)
def msg(inp, conn=None, chan=None, notice=None):
"msg <user> <message> -- Sends a Message."
user = inp.split()[0]
message = inp.replace(user,'').strip()
out = "PRIVMSG %s :%s" % (user, message)
conn.send(out)
@hook.command("act", permissions=["botcontrol"], adminonly=True)
@hook.command(permissions=["botcontrol"], adminonly=True)
def me(inp, conn=None, chan=None):
"""me [channel] <action> -- Makes the bot act out <action> in [channel].
If [channel] is blank the bot will act the <action> in the channel the
command was used in."""
inp = inp.split(" ")
if inp[0][0] == "#":
message = ""
for x in inp[1:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG {} :\x01ACTION {}\x01".format(inp[0], message)
else:
message = ""
for x in inp[0:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG {} :\x01ACTION {}\x01".format(chan, message)
conn.send(out)
@hook.command(channeladminonly=True)
def set(inp, conn=None, chan=None, db=None, notice=None):
"set <field> <nick> <value> -- Admin override for setting database values. " \
"Example: set location infinity 80210 - " \
"set lastfm infinity spookieboogie"
inpsplit = inp.split(" ")
if len(inpsplit) is 2:
field = inp.split(" ")[0].strip()
value = inp.split(" ")[1].strip()
if 'voteban' in field or \
'votekick' in field:
database.set(db,'channels',field, value,'chan',chan)
notice("Set {} to {}.".format(field, value))
return
elif len(inpsplit) >= 3:
field = inp.split(" ")[0].strip()
nick = inp.split(" ")[1].strip()
value = inp.replace(field,'').replace(nick,'').strip()
if field and nick and value:
if 'del' in value or 'none' in value: value = ''
if 'location' in field or \
'fines' in field or\
'lastfm' in field or \
'desktop' in field or \
'battlestation' in field or\
'birthday' in field or\
'waifu' in field or\
'greeting' in field or\
'snapchat' in field:
#if type(value) is list: value = value[0]
if value.lower() is 'none': database.set(db,'users',field, '','nick',nick)
else: database.set(db,'users',field, value,'nick',nick)
notice("Set {} for {} to {}.".format(field, nick, value))
return
notice("Could not set {}.".format(field))
return
@hook.command(adminonly=True, autohelp=False)
def db(inp,db=None):
split = inp.split(' ')
action = split[0]
if "init" in action:
result = db.execute("create table if not exists users(nick primary key, host, location, greeting, lastfm, fines, battlestation, desktop, horoscope, version)")
db.commit()
return result
elif "addcol" in action:
table = split[1]
col = split[2]
if table is not None and col is not None:
db.execute("ALTER TABLE {} ADD COLUMN {}".format(table,col))
db.commit
return "Added Column"
| bytebit-ch/uguubot | plugins/core_admin_global.py | Python | gpl-3.0 | 11,753 |
"""
com_dht11.py v1.0.0
Auteur: Bruno DELATTRE
Date : 02/10/2016
"""
import time
from dal import dal_dht11
from lib import com_logger
from lib.driver import com_gpio
class DHT11Result:
ERR_NO_ERROR = 0
ERR_MISSING_DATA = 1
ERR_CRC = 2
error_code = ERR_NO_ERROR
temperature = -1
humidity = -1
def __init__(self, error_code, temperature, humidity):
self.error_code = error_code
self.temperature = temperature
self.humidity = humidity
class DHT11:
__pin = 0
def __init__(self, pin):
self.__pin = pin
self.gpio = com_gpio.GPIODialog('DTH11')
self.gpio.setmodebcm()
def __delete__(self, instance):
self.gpio.cleanup()
def read(self, name, connection, cursor):
if self.gpio.importlib is not None:
dal = dal_dht11.DAL_DHT11(connection, cursor)
self.gpio.setup(self.__pin, self.gpio.OUT)
# send initial high
self.__send_and_sleep(self.gpio.HIGH, 0.05)
# pull down to low
self.__send_and_sleep(self.gpio.LOW, 0.02)
# change to input using pull up
self.gpio.setuppud(self.__pin, self.gpio.IN, self.gpio.PUD_UP)
# collect data into an array
data = self.__collect_input()
# parse lengths of all data pull up periods
pull_up_lengths = self.__parse_data_pull_up_lengths(data)
# if bit count mismatch, return error (4 byte data + 1 byte checksum)
if len(pull_up_lengths) != 40:
return DHT11Result(DHT11Result.ERR_MISSING_DATA, 0, 0)
# calculate bits from lengths of the pull up periods
bits = self.__calculate_bits(pull_up_lengths)
# we have the bits, calculate bytes
the_bytes = self.__bits_to_bytes(bits)
logger = com_logger.Logger('DHT11 ' + name)
# calculate checksum and check
checksum = self.__calculate_checksum(the_bytes)
if the_bytes[4] != checksum:
logger.debug('Checksum ERROR')
return DHT11Result(DHT11Result.ERR_CRC, 0, 0)
dal.set_dht11(name, str(the_bytes[2]), str(the_bytes[0]))
logger.info('Temperature:' + str(the_bytes[2]) + ' Humidity:' + str(the_bytes[0]))
# ok, we have valid data, return it
return DHT11Result(DHT11Result.ERR_NO_ERROR, the_bytes[2], the_bytes[0])
def __send_and_sleep(self, output, sleep):
self.gpio.setio(self.__pin, output)
time.sleep(sleep)
def __collect_input(self):
# collect the data while unchanged found
unchanged_count = 0
# this is used to determine where is the end of the data
max_unchanged_count = 100
last = -1
data = []
while True:
current = self.gpio.getio(self.__pin)
data.append(current)
if last != current:
unchanged_count = 0
last = current
else:
unchanged_count += 1
if unchanged_count > max_unchanged_count:
break
return data
def __parse_data_pull_up_lengths(self, data):
STATE_INIT_PULL_DOWN = 1
STATE_INIT_PULL_UP = 2
STATE_DATA_FIRST_PULL_DOWN = 3
STATE_DATA_PULL_UP = 4
STATE_DATA_PULL_DOWN = 5
state = STATE_INIT_PULL_DOWN
lengths = [] # will contain the lengths of data pull up periods
current_length = 0 # will contain the length of the previous period
for i in range(len(data)):
current = data[i]
current_length += 1
if state == STATE_INIT_PULL_DOWN:
if current == self.gpio.LOW:
# ok, we got the initial pull down
state = STATE_INIT_PULL_UP
continue
else:
continue
if state == STATE_INIT_PULL_UP:
if current == self.gpio.HIGH:
# ok, we got the initial pull up
state = STATE_DATA_FIRST_PULL_DOWN
continue
else:
continue
if state == STATE_DATA_FIRST_PULL_DOWN:
if current == self.gpio.LOW:
# we have the initial pull down, the next will be the data pull up
state = STATE_DATA_PULL_UP
continue
else:
continue
if state == STATE_DATA_PULL_UP:
if current == self.gpio.HIGH:
# data pulled up, the length of this pull up will determine whether it is 0 or 1
current_length = 0
state = STATE_DATA_PULL_DOWN
continue
else:
continue
if state == STATE_DATA_PULL_DOWN:
if current == self.gpio.LOW:
# pulled down, we store the length of the previous pull up period
lengths.append(current_length)
state = STATE_DATA_PULL_UP
continue
else:
continue
return lengths
def __calculate_bits(self, pull_up_lengths):
# find shortest and longest period
shortest_pull_up = 1000
longest_pull_up = 0
for i in range(0, len(pull_up_lengths)):
length = pull_up_lengths[i]
if length < shortest_pull_up:
shortest_pull_up = length
if length > longest_pull_up:
longest_pull_up = length
# use the halfway to determine whether the period it is long or short
halfway = shortest_pull_up + (longest_pull_up - shortest_pull_up) / 2
bits = []
for i in range(0, len(pull_up_lengths)):
bit = False
if pull_up_lengths[i] > halfway:
bit = True
bits.append(bit)
return bits
def __bits_to_bytes(self, bits):
the_bytes = []
byte = 0
for i in range(0, len(bits)):
byte <<= 1
if bits[i]:
byte |= 1
else:
byte |= 0
if (i + 1) % 8 == 0:
the_bytes.append(byte)
byte = 0
return the_bytes
def __calculate_checksum(self, the_bytes):
return the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3] & 255
| delattreb/StratoBalloon | src/lib/driver/com_dht11.py | Python | gpl-3.0 | 6,901 |
"""
GNU Radio is a free & open-source software development toolkit that provides signal processing blocks to implement software radios. It can be used with readily-available low-cost external RF hardware to create software-defined radios, or without hardware in a simulation-like environment. It is widely used in hobbyist, academic and commercial environments to support both wireless communications research and real-world radio systems.
GNU Radio applications are primarily written using the Python programming language, while the supplied performance-critical signal-processing path is implemented in C++ using processor floating-point extensions, where available. Thus, the developer is able to implement real-time, high-throughput radio systems in a simple-to-use, rapid-application-development environment.
While not primarily a simulation tool, GNU Radio does support development of signal processing algorithms using pre-recorded or generated data, avoiding the need for actual RF hardware.
GNU Radio is licensed under the GNU General Public License (GPL) version 3. All of the code is copyright of the Free Software Foundation.
"""
# This file makes gnuradio a package
# The docstring will be associated with the top level of the package.
import os
# Check if the gnuradio package is installed or whether we're attempting to import it from
# the build directory.
path_ending = os.path.join('gnuradio-runtime', 'python', 'gnuradio', '__init__.py')
path = os.path.abspath(__file__)
if path.endswith('.pyc'):
path = path[:-1]
if path.endswith(path_ending):
# We importing it from build directory.
build_path = os.path.join(path[:-len(path_ending)])
# Place these directories on __path__ so that their contents are
# part of the gnuradio package.
__path__.append(os.path.join(build_path, 'gr-utils', 'src', 'python'))
__path__.append(os.path.join(build_path, 'gr-blocks', 'python'))
__path__.append(os.path.join(build_path, 'gr-digital', 'python'))
__path__.append(os.path.join(build_path, 'gr-filter', 'python'))
__path__.append(os.path.join(build_path, 'gr-fft', 'python'))
__path__.append(os.path.join(build_path, 'gr-analog', 'python'))
__path__.append(os.path.join(build_path, 'gr-trellis', 'python'))
__path__.append(os.path.join(build_path, 'gr-wavelet', 'python'))
__path__.append(os.path.join(build_path, 'gr-audio', 'python'))
__path__.append(os.path.join(build_path, 'gr-qtgui', 'python'))
__path__.append(os.path.join(build_path, 'gr-wxgui', 'python'))
__path__.append(os.path.join(build_path, 'gr-atsc', 'python'))
__path__.append(os.path.join(build_path, 'gr-noaa', 'python'))
__path__.append(os.path.join(build_path, 'gr-pager', 'python'))
__path__.append(os.path.join(build_path, 'gr-video-sdl', 'python'))
__path__.append(os.path.join(build_path, 'gr-vocoder', 'python'))
__path__.append(os.path.join(build_path, 'gr-fcd', 'python'))
__path__.append(os.path.join(build_path, 'gr-comedi', 'python'))
__path__.append(os.path.join(build_path, 'gr-channels', 'python'))
__path__.append(os.path.join(build_path, 'gr-fec', 'python'))
__path__.append(os.path.join(build_path, 'gr-utils', 'python'))
__path__.append(os.path.join(build_path, 'gr-uhd', 'python'))
__path__.append(os.path.join(build_path, 'gr-zeromq', 'python'))
| ambikeshwar1991/gnuradio-3.7.4 | gnuradio-runtime/python/gnuradio/__init__.py | Python | gpl-3.0 | 3,362 |
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from indico.util.contextManager import ContextManager
import time
import pkg_resources
from persistent import Persistent
from hashlib import md5
from MaKaC.common.Counter import Counter
from MaKaC.common.utils import formatDateTime, parseDateTime
from MaKaC.common.timezoneUtils import getAdjustedDate, setAdjustedDate,\
datetimeToUnixTimeInt
from MaKaC.webinterface import wcomponents
from MaKaC.plugins import PluginsHolder
from MaKaC.errors import MaKaCError, NoReportError
from MaKaC.services.interface.rpc.common import ServiceError
from MaKaC.common.timezoneUtils import nowutc
from indico.core.logger import Logger
from MaKaC.common.indexes import IndexesHolder
from MaKaC.plugins.Collaboration.collaborationTools import CollaborationTools,\
MailTools
from MaKaC.plugins.Collaboration.urlHandlers import UHConfModifCollaboration
from indico.core.index import Catalog
from MaKaC.conference import Observer
from MaKaC.webinterface.common.tools import hasTags
from MaKaC.plugins.Collaboration import mail
from MaKaC.common.mail import GenericMailer
import os, inspect
from indico.modules.scheduler.client import Client
from indico.modules.scheduler.tasks import HTTPTask
from indico.util import json
from indico.util.date_time import now_utc
from MaKaC.common.fossilize import Fossilizable, fossilizes
from MaKaC.common.externalOperationsManager import ExternalOperationsManager
from BTrees.OOBTree import OOBTree
from MaKaC.plugins.Collaboration.fossils import ICSErrorBaseFossil, ICSSanitizationErrorFossil,\
ICSBookingBaseConfModifFossil, ICSBookingBaseIndexingFossil,\
ISpeakerWrapperBaseFossil
from MaKaC.conference import Contribution
class CSBookingManager(Persistent, Observer):
""" Class for managing the bookins of a meeting.
It will store the list of bookings. Adding / removing / editing bookings should be through this class.
"""
_shouldBeTitleNotified = True
_shouldBeDateChangeNotified = True
_shouldBeLocationChangeNotified = True
_shouldBeDeletionNotified = True
def __init__(self, conf):
""" Constructor for the CSBookingManager class.
conf: a Conference object. The meeting that owns this CSBookingManager.
"""
self._conf = conf
self._counter = Counter(1)
# a dict where the bookings will be stored. The key will be the booking id, the value a CSBookingBase object.
self._bookings = {}
# an index of bookings by type. The key will be a booking type (string), the value a list of booking id
self._bookingsByType = {}
# an index of bookings to video services by event.uniqueId : video.uniqueId pairind.
self._bookingsToVideoServices = OOBTree()
# a list of ids with hidden bookings
self._hiddenBookings = set()
# an index of video services managers for each plugin. key: plugin name, value: list of users
self._managers = {}
# list of speaker wrapper for a conference
self._speakerWrapperList = []
self.updateSpeakerWrapperList()
# Send email to managers when Electronic Agreement accepted
self._notifyElectronicAgreementAnswer = True
def getOwner(self):
""" Returns the Conference (the meeting) that owns this CSBookingManager object.
"""
return self._conf
def isCSAllowed(self, user = None):
""" Returns if the associated event should display a Video Services tab
This can depend on the kind of event (meeting, lecture, conference), on the equipment of the room...
If a user is provided, we will take into account if the user can manage the plugin (for example,
an event manager cannot manage an admin-only plugin)
"""
pluginsPerEventType = CollaborationTools.getCollaborationPluginType().getOption("pluginsPerEventType").getValue()
if pluginsPerEventType:
for plugin in pluginsPerEventType[self._conf.getType()]:
if plugin.isActive() and (user is None or CollaborationTools.canUserManagePlugin(self._conf, plugin, user)):
return True
return False
def getAllowedPlugins(self):
""" Returns a list of allowed plugins (Plugin objects) for this event.
Only active plugins are returned.
This can depend on the kind of event (meeting, lecture, conference), on the equipment of the room...
"""
pluginsPerEventType = CollaborationTools.getCollaborationPluginType().getOption("pluginsPerEventType").getValue()
if pluginsPerEventType is not None:
allowedForThisEvent = pluginsPerEventType[self._conf.getType()]
return [plugin for plugin in allowedForThisEvent if plugin.isActive()]
def getBookingList(self, sorted = False, filterByType = None, notify = False, onlyPublic = False):
""" Returns a list of all the bookings.
If sorted = True, the list of bookings will be sorted by id.
If filterByType = None, all bookings are returned.
Otherwise, just those of the type "filterByType" if filterByType is a string,
or if it is a list of strings, those who have a type included in filterByType.
"""
if not hasattr(self, "_bookingsByType"): #TODO: remove when safe
self._bookingsByType = {}
if filterByType is not None:
if isinstance(filterByType, basestring):
keys = self._bookingsByType.get(filterByType, [])
elif isinstance(filterByType, list):
keys = []
for pluginName in filterByType:
keys.extend(self._bookingsByType.get(pluginName, []))
else:
raise ValueError('Unexpected filterByType type: {}'.format(type(filterByType)))
else:
keys = self._bookings.keys()
if onlyPublic and self.getHiddenBookings():
keys = set(keys)
keys = keys.difference(self.getHiddenBookings())
keys = list(keys)
if sorted:
keys.sort(key = lambda k: int(k))
bookingList = [self._bookings[k] for k in keys if not self._bookings[k].hasSessionOrContributionLink() or self._bookings[k].getLinkObject()]
#we notify all the bookings that they have been viewed. If a booking doesn't need to be viewed, nothing will happen
if notify:
for booking in bookingList:
if booking.needsToBeNotifiedOnView():
try:
booking._notifyOnView()
except Exception, e:
Logger.get('VideoServ').error("Exception while notifying to a booking that it is being viewed. Exception: " + str(e))
return bookingList
def getBooking(self, id):
""" Returns a booking given its id.
"""
return self._bookings.get(id,None)
def getSingleBooking(self, type, notify = False):
""" Returns the single booking of a plugin who only allows one booking.
type: a string with the name of the plugin
If the plugin actually allows multiple bookings, an exception will be thrown
If the plugin has no booking, None will be returned.
Otherwise the booking will be returned
"""
if CollaborationTools.getCSBookingClass(type)._allowMultiple:
raise CollaborationException("Plugin type " + str(type) + " is not a single-booking plugin")
blist = self._bookingsByType.get(type,[])
if blist:
booking = self._bookings[blist[0]]
if notify:
try:
booking._notifyOnView()
except Exception, e:
Logger.get('VideoServ').error("Exception while notifying to a booking that it is being viewed. Exception: " + str(e))
return booking
else:
return None
def getHiddenBookings(self):
if not hasattr(self, '_hiddenBookings'):
self._hiddenBookings = set()
return self._hiddenBookings
def hasBookings(self):
return len(self._bookings) > 0
def canCreateBooking(self, type):
""" Returns if it's possible to create a booking of this given type
"""
if not CollaborationTools.getCSBookingClass(type)._allowMultiple:
return len(self.getBookingList(filterByType = type)) == 0
return True
def checkVideoLink(self, bookingParams):
if bookingParams.get('videoLinkType',"") == "session":
sessSlotId = bookingParams.get("videoLinkSession","")
import re
regExp = re.match(r"""(s[0-9a]*)(l[0-9]*)""", sessSlotId)
if not regExp:
raise CollaborationException(_('No session has been passed when the type is session.'))
sessionId = regExp.group(1)[1:]
slotId = regExp.group(2)[1:]
session = self._conf.getSessionById(sessionId)
if session is None:
raise CollaborationException(_('The session does not exist.'))
slot = session.getSlotById(slotId)
if slot is None:
raise CollaborationException(_('The session does not exist.'))
return slot.getUniqueId()
elif bookingParams.get('videoLinkType',"") == "contribution":
contId = bookingParams.get("videoLinkContribution","")
if contId == "":
raise CollaborationException(_('No contribution has been passed when the type is contribution.'))
cont = self._conf.getContributionById(contId)
if cont is None:
raise CollaborationException(_('The contribution does not exist.'))
return cont.getUniqueId()
return self._conf.getUniqueId()
def addBooking(self, booking):
""" Adds an existing booking to the list of bookings.
booking: The existing booking to be added.
"""
booking.setId( self._getNewBookingId())
self._bookings[booking.getId()] = booking
self._bookingsByType.setdefault(booking.getType(),[]).append(booking.getId())
if booking.isHidden():
self.getHiddenBookings().add(booking.getId())
self._indexBooking(booking)
booking.index_instances()
self._notifyModification()
# the unique id can be diferent for the new conference
booking.setLinkType({booking.getLinkType():ContextManager.get('clone.unique_id_map').get(booking.getLinkId(),"")})
if booking.hasSessionOrContributionLink():
linkObject = booking.getLinkObject()
bp=booking.getBookingParams()
if isinstance(linkObject, Contribution):
bp["videoLinkContribution"] = linkObject.getId()
else: #session
bp["videoLinkSession"] = linkObject.getId()
booking.setBookingParams(bp)
self.addVideoService(booking.getLinkId(), booking)
def _createBooking(self, bookingType, bookingParams = {}, operation = "_create"):
if self.canCreateBooking(bookingType):
uniqueId = self.checkVideoLink(bookingParams)
if (self.hasVideoService(uniqueId) and bookingParams.has_key("videoLinkType") and bookingParams.get("videoLinkType","") != "event"): # Restriction: 1 video service per session or contribution.
raise NoReportError(_('Only one video service per contribution or session is allowed.'))
newBooking = CollaborationTools.getCSBookingClass(bookingType)(bookingType, self._conf)
if bookingParams.has_key("videoLinkType"):
newBooking.setLinkType({bookingParams["videoLinkType"] : uniqueId})
error = newBooking.setBookingParams(bookingParams)
if isinstance(error, CSErrorBase):
return error
elif error:
raise CollaborationServiceException("Problem while creating a booking of type " + bookingType)
else:
newId = self._getNewBookingId()
newBooking.setId(newId)
createResult = getattr(newBooking, operation)()
if isinstance(createResult, CSErrorBase):
return createResult
else:
self._bookings[newId] = newBooking
self._bookingsByType.setdefault(bookingType,[]).append(newId)
if newBooking.isHidden():
self.getHiddenBookings().add(newId)
newBooking.index_instances()
self._indexBooking(newBooking)
self._notifyModification()
if uniqueId is not None: # if we're here and uniqueId has a value, register the video service.
self.addVideoService(uniqueId, newBooking)
if MailTools.needToSendEmails(bookingType):
newBooking._sendNotifications('new')
return newBooking
else:
#we raise an exception because the web interface should take care of this never actually happening
raise CollaborationServiceException(bookingType + " only allows to create 1 booking per event")
def createBooking(self, bookingType, bookingParams = {}):
""" Adds a new booking to the list of bookings.
The id of the new booking is auto-generated incrementally.
After generating the booking, its "performBooking" method will be called.
bookingType: a String with the booking's plugin. Example: "DummyPlugin", "EVO"
bookingParams: a dictionary with the parameters necessary to create the booking.
"create the booking" usually means Indico deciding if the booking can take place.
if "startDate" and "endDate" are among the keys, they will be taken out of the dictionary.
"""
return self._createBooking(bookingType, bookingParams)
def attachBooking(self, bookingType, bookingParams = {}):
""" Attach an existing booking to the list of bookings.
The checking and the params are the same as create the booking
"""
for booking in self.getBookingList(sorted, bookingType):
result = booking.checkAttachParams(bookingParams)
if isinstance(result, CSErrorBase):
return result
return self._createBooking(bookingType, bookingParams, "_attach")
def searchBookings(self, bookingType, user, query, offset=0, limit=None):
""" Adds a new booking to the list of bookings.
The id of the new booking is auto-generated incrementally.
After generating the booking, its "performBooking" method will be called.
bookingType: a String with the booking's plugin. Example: "DummyPlugin", "EVO"
bookingParams: a dictionary with the parameters necessary to create the booking.
"create the booking" usually means Indico deciding if the booking can take place.
if "startDate" and "endDate" are among the keys, they will be taken out of the dictionary.
"""
if CollaborationTools.hasOption(bookingType, "searchAllow") \
and CollaborationTools.getOptionValue(bookingType, "searchAllow"):
res = CollaborationTools.getCSBookingClass(bookingType)._search(user, query, offset, limit)
return {'results': res[0],
'offset': res[1]}
else:
raise CollaborationException("Plugin type " + str(bookingType) + " does not allow search.")
def _indexBooking(self, booking, index_names=None):
indexes = self._getIndexList(booking)
if index_names is not None:
ci = IndexesHolder().getById('collaboration')
all_indexes = list(ci.getIndex(index) for index in index_names)
indexes = list(index for index in all_indexes if index in indexes)
if booking.shouldBeIndexed():
for index in indexes:
index.indexBooking(booking)
def changeBooking(self, bookingId, bookingParams):
"""
Changes the bookingParams of a CSBookingBase object.
After updating the booking, its 'performBooking' method will be called.
bookingId: the id of the CSBookingBase object to change
bookingParams: a dictionary with the new parameters that will modify the booking
'modify the booking' can mean that maybe the booking will be rejected with the new parameters.
if 'startDate' and 'endDate' are among the keys, they will be taken out of the dictionary.
"""
booking = self.getBooking(bookingId)
oldStartDate = booking.getStartDate()
oldModificationDate = booking.getModificationDate()
oldBookingParams = booking.getBookingParams() #this is a copy so it's ok
booking.unindex_instances()
error = booking.setBookingParams(bookingParams)
if isinstance(error, CSSanitizationError):
return error
elif error:
CSBookingManager._rollbackChanges(booking, oldBookingParams, oldModificationDate)
if isinstance(error, CSErrorBase):
return error
raise CollaborationServiceException("Problem while modifying a booking of type " + booking.getType())
else:
modifyResult = booking._modify(oldBookingParams)
if isinstance(modifyResult, CSErrorBase):
CSBookingManager._rollbackChanges(booking, oldBookingParams, oldModificationDate)
return modifyResult
else:
modificationDate = now_utc()
booking.setModificationDate(modificationDate)
if booking.isHidden():
self.getHiddenBookings().add(booking.getId())
elif booking.getId() in self.getHiddenBookings():
self.getHiddenBookings().remove(booking.getId())
eventLinkUpdated = False
newLinkId = self.checkVideoLink(bookingParams)
if bookingParams.has_key("videoLinkType"):
oldLinkData = booking.getLinkIdDict()
oldLinkId = oldLinkData.values()[0]
# Details changed, we need to remove the association and re-create it
if not (oldLinkData.has_key(bookingParams.get('videoLinkType','')) and oldLinkId == newLinkId):
self.removeVideoSingleService(booking.getLinkId(), booking)
eventLinkUpdated = True
if eventLinkUpdated or (bookingParams.has_key("videoLinkType") and bookingParams.get("videoLinkType","") != "event"):
if self.hasVideoService(newLinkId, booking):
pass # No change in the event linking
elif newLinkId is not None:
if (self.hasVideoService(newLinkId) and bookingParams.has_key("videoLinkType") and bookingParams.get("videoLinkType","") != "event"): # Restriction: 1 video service per session or contribution.
raise NoReportError(_('Only one video service per contribution or session is allowed.'))
else:
self.addVideoService(newLinkId, booking)
if bookingParams.has_key("videoLinkType"):
booking.setLinkType({bookingParams['videoLinkType']: newLinkId})
else: # If it's still None, event linking has been completely removed.
booking.resetLinkParams()
self._changeStartDateInIndex(booking, oldStartDate, booking.getStartDate())
self._changeModificationDateInIndex(booking, oldModificationDate, modificationDate)
booking.index_instances()
if booking.hasAcceptReject():
if booking.getAcceptRejectStatus() is not None:
booking.clearAcceptRejectStatus()
self._addToPendingIndex(booking)
self._notifyModification()
if MailTools.needToSendEmails(booking.getType()):
booking._sendNotifications('modify')
return booking
@classmethod
def _rollbackChanges(cls, booking, oldBookingParams, oldModificationDate):
booking.setBookingParams(oldBookingParams)
booking.setModificationDate(oldModificationDate)
def _changeConfTitleInIndex(self, booking, oldTitle, newTitle):
if booking.shouldBeIndexed():
indexes = self._getIndexList(booking)
for index in indexes:
index.changeEventTitle(booking, oldTitle, newTitle)
def _changeStartDateInIndex(self, booking, oldStartDate, newStartDate):
if booking.shouldBeIndexed() and booking.hasStartDate():
indexes = self._getIndexList(booking)
for index in indexes:
index.changeStartDate(booking, oldStartDate, newStartDate)
def _changeModificationDateInIndex(self, booking, oldModificationDate, newModificationDate):
if booking.shouldBeIndexed():
indexes = self._getIndexList(booking)
for index in indexes:
index.changeModificationDate(booking, oldModificationDate, newModificationDate)
def _changeConfStartDateInIndex(self, booking, oldConfStartDate, newConfStartDate):
if booking.shouldBeIndexed() and oldConfStartDate is not None and newConfStartDate is not None:
indexes = self._getIndexList(booking)
for index in indexes:
index.changeConfStartDate(booking, oldConfStartDate, newConfStartDate)
def removeBooking(self, id):
""" Removes a booking given its id.
"""
booking = self.getBooking(id)
bookingType = booking.getType()
bookingLinkId = booking.getLinkId()
removeResult = booking._delete()
if isinstance(removeResult, CSErrorBase):
return removeResult
else:
del self._bookings[id]
self._bookingsByType[bookingType].remove(id)
if not self._bookingsByType[bookingType]:
del self._bookingsByType[bookingType]
if id in self.getHiddenBookings():
self.getHiddenBookings().remove(id)
# If there is an association to a session or contribution, remove it
if bookingLinkId is not None:
self.removeVideoSingleService(bookingLinkId, booking)
booking.unindex_instances()
self._unindexBooking(booking)
self._notifyModification()
if MailTools.needToSendEmails(booking.getType()):
booking._sendNotifications('remove')
return booking
def _unindexBooking(self, booking):
if booking.shouldBeIndexed() and not booking.keepForever():
indexes = self._getIndexList(booking)
for index in indexes:
index.unindexBooking(booking)
def startBooking(self, id):
booking = self._bookings[id]
if booking.canBeStarted():
booking._start()
return booking
else:
raise CollaborationException(_("Tried to start booking ") + str(id) + _(" of meeting ") + str(self._conf.getId()) + _(" but this booking cannot be started."))
def stopBooking(self, id):
booking = self._bookings[id]
if booking.canBeStopped():
booking._stop()
return booking
else:
raise CollaborationException(_("Tried to stop booking ") + str(id) + _(" of meeting ") + str(self._conf.getId()) + _(" but this booking cannot be stopped."))
def checkBookingStatus(self, id):
booking = self._bookings[id]
if booking.hasCheckStatus():
result = booking._checkStatus()
if isinstance(result, CSErrorBase):
return result
else:
return booking
else:
raise ServiceError(message=_("Tried to check status of booking ") + str(id) + _(" of meeting ") + str(self._conf.getId()) + _(" but this booking does not support the check status service."))
def acceptBooking(self, id, user = None):
booking = self._bookings[id]
if booking.hasAcceptReject():
if booking.getAcceptRejectStatus() is None:
self._removeFromPendingIndex(booking)
booking.accept(user)
return booking
else:
raise ServiceError(message=_("Tried to accept booking ") + str(id) + _(" of meeting ") + str(self._conf.getId()) + _(" but this booking cannot be accepted."))
def rejectBooking(self, id, reason):
booking = self._bookings[id]
if booking.hasAcceptReject():
if booking.getAcceptRejectStatus() is None:
self._removeFromPendingIndex(booking)
booking.reject(reason)
return booking
else:
raise ServiceError("ERR-COLL10", _("Tried to reject booking ") + str(id) + _(" of meeting ") + str(self._conf.getId()) + _(" but this booking cannot be rejected."))
def makeMeModeratorBooking(self, id, user):
booking = self._bookings[id]
bookingParams = booking.getBookingParams()
bookingParams["owner"] = user
return self.changeBooking(id,bookingParams)
def _addToPendingIndex(self, booking):
if booking.shouldBeIndexed():
indexes = self._getPendingIndexList(booking)
for index in indexes:
index.indexBooking(booking)
def _removeFromPendingIndex(self, booking):
if booking.shouldBeIndexed():
indexes = self._getPendingIndexList(booking)
for index in indexes:
index.unindexBooking(booking)
def _getNewBookingId(self):
return self._counter.newCount()
def _getIndexList(self, booking):
""" Returns a list of BookingsIndex objects where the booking should be indexed.
This list includes:
-an index of all bookings
-an index of bookings of the given type
-an index of all bookings in the category of the event
-an index of booking of the given type, in the category of the event
If the booking type declared common indexes:
-the common indexes
-the common indexes for the category of the event
If the booking is of the Accept/Reject type
-same indexes as above, but only for pending bookings
"""
collaborationIndex = IndexesHolder().getById("collaboration")
indexes = [collaborationIndex.getAllBookingsIndex(),
collaborationIndex.getIndex(booking.getType())]
for commonIndexName in booking.getCommonIndexes():
indexes.append(collaborationIndex.getIndex(commonIndexName))
if booking.hasAcceptReject() and booking.getAcceptRejectStatus() is None:
indexes.extend(self._getPendingIndexList(booking))
return indexes
def _getPendingIndexList(self, booking):
collaborationIndex = IndexesHolder().getById("collaboration")
indexes = [collaborationIndex.getIndex("all_pending"),
collaborationIndex.getIndex(booking.getType() + "_pending")]
for commonIndexName in booking.getCommonIndexes():
indexes.append(collaborationIndex.getIndex(commonIndexName + "_pending"))
return indexes
def getManagers(self):
if not hasattr(self, "_managers"):
self._managers = {}
return self._managers
def addPluginManager(self, plugin, user):
#TODO: use .linkTo on the user. To be done when the list of roles of a user is actually needed for smth...
self.getManagers().setdefault(plugin, []).append(user)
self._notifyModification()
def removePluginManager(self, plugin, user):
#TODO: use .unlinkTo on the user. To be done when the list of roles of a user is actually needed for smth...
if user in self.getManagers().setdefault(plugin,[]):
self.getManagers()[plugin].remove(user)
self._notifyModification()
def getVideoServicesManagers(self):
return self.getManagers().setdefault('all', [])
def isVideoServicesManager(self, user):
return user in self.getManagers().setdefault('all', [])
def getPluginManagers(self, plugin):
return self.getManagers().setdefault(plugin, [])
def isPluginManager(self, plugin, user):
return user in self.getManagers().setdefault(plugin, [])
def getAllManagers(self):
""" Returns a list with all the managers, no matter their type
The returned list is not ordered.
"""
managers = set()
for managerList in self.getManagers().itervalues():
managers = managers.union(managerList)
return list(managers)
def isPluginManagerOfAnyPlugin(self, user):
#TODO: this method is not optimal. to be optimal, we should store somewhere an index where the key
#is the user, and the value is a list of plugins where they are managers.
#this could be done with .getLinkTo, but we would need to change the .linkTo method to add extra information
#(since we cannot create a role for each plugin)
if self.isVideoServicesManager(user):
return True
else:
for plugin in self.getManagers().iterkeys():
if self.isPluginManager(plugin, user):
return True
return False
def notifyTitleChange(self, oldTitle, newTitle):
""" Notifies the CSBookingManager that the title of the event (meeting) it's attached to has changed.
The CSBookingManager will reindex all its bookings in the event title index.
This method will be called by the event (meeting) object
"""
for booking in self.getBookingList():
try:
self._changeConfTitleInIndex(booking, oldTitle, newTitle)
except Exception, e:
Logger.get('VideoServ').exception("Exception while reindexing a booking in the event title index because its event's title changed: " + str(e))
def notifyInfoChange(self):
self.updateSpeakerWrapperList()
def notifyEventDateChanges(self, oldStartDate = None, newStartDate = None, oldEndDate = None, newEndDate = None):
""" Notifies the CSBookingManager that the start and / or end dates of the event it's attached to have changed.
The CSBookingManager will change the dates of all the bookings that want to be updated.
If there are problems (such as a booking not being able to be modified)
it will write a list of strings describing the problems in the 'dateChangeNotificationProblems' context variable.
(each string is produced by the _booking2NotifyProblem method).
This method will be called by the event (meeting) object.
"""
startDateChanged = oldStartDate is not None and newStartDate is not None and not oldStartDate == newStartDate
endDateChanged = oldEndDate is not None and newEndDate is not None and not oldEndDate == newEndDate
someDateChanged = startDateChanged or endDateChanged
Logger.get("VideoServ").info("""CSBookingManager: starting notifyEventDateChanges. Arguments: confId=%s, oldStartDate=%s, newStartDate=%s, oldEndDate=%s, newEndDate=%s""" %
(str(self._conf.getId()), str(oldStartDate), str(newStartDate), str(oldEndDate), str(newEndDate)))
if someDateChanged:
problems = []
for booking in self.getBookingList():
# booking "instances" provide higher granularity in search
booking.unindex_instances()
booking.index_instances()
if startDateChanged:
try:
self._changeConfStartDateInIndex(booking, oldStartDate, newStartDate)
except Exception, e:
Logger.get('VideoServ').error("Exception while reindexing a booking in the event start date index because its event's start date changed: " + str(e))
if booking.hasStartDate():
if booking.needsToBeNotifiedOfDateChanges():
Logger.get("VideoServ").info("""CSBookingManager: notifying date changes to booking %s of event %s""" %
(str(booking.getId()), str(self._conf.getId())))
oldBookingStartDate = booking.getStartDate()
oldBookingEndDate = booking.getEndDate()
oldBookingParams = booking.getBookingParams() #this is a copy so it's ok
if startDateChanged:
booking.setStartDate(oldBookingStartDate + (newStartDate - oldStartDate) )
if endDateChanged:
booking.setEndDate(oldBookingEndDate + (newEndDate - oldEndDate) )
rollback = False
modifyResult = None
try:
modifyResult = booking._modify(oldBookingParams)
if isinstance(modifyResult, CSErrorBase):
Logger.get('VideoServ').warning("""Error while changing the dates of booking %s of event %s after event dates changed: %s""" %
(str(booking.getId()), str(self._conf.getId()), modifyResult.getLogMessage()))
rollback = True
except Exception, e:
Logger.get('VideoServ').error("""Exception while changing the dates of booking %s of event %s after event dates changed: %s""" %
(str(booking.getId()), str(self._conf.getId()), str(e)))
rollback = True
if rollback:
booking.setStartDate(oldBookingStartDate)
booking.setEndDate(oldBookingEndDate)
problems.append(CSBookingManager._booking2NotifyProblem(booking, modifyResult))
elif startDateChanged:
self._changeStartDateInIndex(booking, oldBookingStartDate, booking.getStartDate())
if hasattr(booking, "notifyEventDateChanges"):
try:
booking.notifyEventDateChanges(oldStartDate, newStartDate, oldEndDate, newEndDate)
except Exception, e:
Logger.get('VideoServ').exception("Exception while notifying a plugin of an event date changed: " + str(e))
if problems:
ContextManager.get('dateChangeNotificationProblems')['Collaboration'] = [
'Some Video Services bookings could not be moved:',
problems,
'Go to [[' + str(UHConfModifCollaboration.getURL(self.getOwner(), secure = ContextManager.get('currentRH').use_https())) + ' the Video Services section]] to modify them yourself.'
]
def notifyTimezoneChange(self, oldTimezone, newTimezone):
""" Notifies the CSBookingManager that the timezone of the event it's attached to has changed.
The CSBookingManager will change the dates of all the bookings that want to be updated.
This method will be called by the event (Conference) object
"""
return []
def notifyLocationChange(self):
for booking in self.getBookingList():
if hasattr(booking, "notifyLocationChange"):
try:
booking.notifyLocationChange()
except Exception, e:
Logger.get('VideoServ').exception("Exception while notifying a plugin of a location change: " + str(e))
@classmethod
def _booking2NotifyProblem(cls, booking, modifyError):
""" Turns a booking into a string used to tell the user
why a date change of a booking triggered by the event's start or end date change
went bad.
"""
message = []
message.extend(["The dates of the ", booking.getType(), " booking"])
if booking.hasTitle():
message.extend([': "', booking._getTitle(), '" (', booking.getStartDateAsString(), ' - ', booking.getEndDateAsString(), ')'])
else:
message.extend([' ongoing from ', booking.getStartDateAsString(), ' to ', booking.getEndDateAsString(), ''])
message.append(' could not be changed.')
if modifyError and modifyError.getUserMessage():
message.extend([' Reason: ', modifyError.getUserMessage()])
return "".join(message)
def notifyDeletion(self):
""" Notifies the CSBookingManager that the Conference object it is attached to has been deleted.
The CSBookingManager will change the dates of all the bookings that want to be updated.
This method will be called by the event (Conference) object
"""
for booking in self.getBookingList():
try:
# We will delete the bookings connected to the event, not Contribution or
if booking.getLinkType() and booking.getLinkType() != "event":
continue
removeResult = booking._delete()
if isinstance(removeResult, CSErrorBase):
Logger.get('VideoServ').warning("Error while deleting a booking of type %s after deleting an event: %s"%(booking.getType(), removeResult.getLogMessage() ))
booking.unindex_instances()
self._unindexBooking(booking)
except Exception, e:
Logger.get('VideoServ').exception("Exception while deleting a booking of type %s after deleting an event: %s" % (booking.getType(), str(e)))
def getEventDisplayPlugins(self, sorted = False):
""" Returns a list of names (strings) of plugins which have been configured
as showing bookings in the event display page, and which have bookings
already (or previously) created in the event.
(does not check if the bookings are hidden or not)
"""
pluginsWithEventDisplay = CollaborationTools.pluginsWithEventDisplay()
l = []
for pluginName in self._bookingsByType:
if pluginName in pluginsWithEventDisplay:
l.append(pluginName)
if sorted:
l.sort()
return l
def createTestBooking(self, bookingParams = {}):
""" Function that creates a 'test' booking for performance test.
Avoids to use any of the plugins except DummyPlugin
"""
from MaKaC.plugins.Collaboration.DummyPlugin.collaboration import CSBooking as DummyBooking
bookingType = 'DummyPlugin'
newBooking = DummyBooking(bookingType, self._conf)
error = newBooking.setBookingParams(bookingParams)
if error:
raise CollaborationServiceException("Problem while creating a test booking")
else:
newId = self._getNewBookingId()
newBooking.setId(newId)
createResult = newBooking._create()
if isinstance(createResult, CSErrorBase):
return createResult
else:
self._bookings[newId] = newBooking
self._bookingsByType.setdefault(bookingType,[]).append(newId)
if newBooking.isHidden():
self.getHiddenBookings().add(newId)
self._indexBooking(newBooking)
self._notifyModification()
return newBooking
def _notifyModification(self):
self._p_changed = 1
def getSortedContributionSpeaker(self, exclusive):
''' This method will create a dictionary by sorting the contribution/speakers
that they are in recording, webcast or in both.
bool: exclusive - if True, every dicts (recording, webcast, both) will
have different speaker list (no repetition allowed)
if an element is present in 'both', it will be deleted from
'recording and 'webcast'
returns d = { 'recording': {}, 'webcast' : {}, 'both': {} }
'''
d = {}
recordingBooking = self.getSingleBooking("RecordingRequest")
webcastBooking = self.getSingleBooking("WebcastRequest")
d["recording"] = recordingBooking.getContributionSpeakerSingleBooking() if recordingBooking else {}
d["webcast"] = webcastBooking.getContributionSpeakerSingleBooking() if webcastBooking else {}
contributions = {}
''' Look for speaker intersections between 'recording' and 'webcast' dicts
and put them in 'both' dict. Additionally, if any intersection has been found,
we exclude them from the original dictionary.
'''
for cont in d["recording"].copy():
if cont in d["webcast"].copy():
# Check if same contribution/speaker in 'recording' and 'webcast'
intersection = set(d['recording'][cont]) & set(d['webcast'][cont])
if intersection:
contributions[cont] = list(intersection)
# if exclusive is True, and as we found same contribution/speaker,
# we delete them from 'recording' and 'webcast' dicts
if exclusive:
exclusion = set(d['recording'][cont]) ^ set(contributions[cont])
if not exclusion:
del d["recording"][cont]
else:
d["recording"][cont] = list(exclusion)
exclusion = set(d['webcast'][cont]) ^ set(contributions[cont])
if not exclusion:
del d["webcast"][cont]
else:
d["webcast"][cont] = list(exclusion)
d["both"] = contributions
return d
def getContributionSpeakerByType(self, requestType):
''' Return a plain dict of contribution/speaker according to the requestType
if the request type is 'both', we need to merge the lists
'''
d = self.getSortedContributionSpeaker(False) # We want non exclusive dict
if requestType == "recording":
return d['recording']
elif requestType == "webcast":
return d['webcast']
elif requestType == "both":
# We merge 'recording' and 'webcast'
m = dict(((cont, list(set(spks) | \
set(d['webcast'].get(cont, [])))) for cont, spks in d['recording'].iteritems()))
m.update(dict((cont, spks) for cont, spks in d['webcast'].iteritems() if cont not in m))
return m
else:
return {}
def updateSpeakerWrapperList(self, newList = False):
"""
if newList arg is True, don't check if there is an existing speakerWrapperList
and create a new one straight forward. (Done to avoid loops)
"""
SWList = []
contributions = self.getSortedContributionSpeaker(True)
requestType = ['recording', 'webcast', 'both']
for type in requestType:
for cont in contributions[type]:
for spk in contributions[type][cont]:
if newList:
sw = None
else:
sw = self.getSpeakerWrapperByUniqueId("%s.%s"%(cont, spk.getId()))
if sw:
if not sw.getObject().getEmail():
if sw.getStatus() not in [SpeakerStatusEnum.SIGNED,
SpeakerStatusEnum.FROMFILE,
SpeakerStatusEnum.REFUSED]:
sw.setStatus(SpeakerStatusEnum.NOEMAIL)
elif sw.getStatus() == SpeakerStatusEnum.NOEMAIL:
sw.setStatus(SpeakerStatusEnum.NOTSIGNED)
sw.setRequestType(type)
SWList.append(sw)
else:
newSw = SpeakerWrapper(spk, cont, type)
if not newSw.getObject().getEmail():
newSw.setStatus(SpeakerStatusEnum.NOEMAIL)
SWList.append(newSw)
self._speakerWrapperList = SWList
def getSpeakerWrapperList(self):
if not hasattr(self, "_speakerWrapperList"):#TODO: remove when safe
self.updateSpeakerWrapperList(True)
return self._speakerWrapperList
def getSpeakerWrapperByUniqueId(self, id):
if not hasattr(self, "_speakerWrapperList"):#TODO: remove when safe
self.updateSpeakerWrapperList(True)
for spkWrap in self._speakerWrapperList:
if spkWrap.getUniqueId() == id:
return spkWrap
return None
def areSignatureCompleted(self):
value = True;
for spkWrap in self._speakerWrapperList:
if spkWrap.getStatus() != SpeakerStatusEnum.FROMFILE and \
spkWrap.getStatus() != SpeakerStatusEnum.SIGNED:
value = False;
return value
def getSpeakerWrapperListByStatus(self, status):
'''Return a list of SpeakerWrapper matching the status.
'''
list = []
for spkWrap in self._speakerWrapperList:
if spkWrap.getStatus() == status:
list.append(spkWrap)
return list
def getSpeakerEmailByUniqueId(self, id, user):
''' Return the email of a speaker according to the uniqueId.
id: uniqueId of the speaker wrapper.
user: user object of the sender of the emails, in order to check the rights.
'''
canManageRequest = CollaborationTools.getRequestTypeUserCanManage(self._conf, user)
requestTypeAccepted = ""
if canManageRequest == "recording":
requestTypeAccepted = ["recording"]
elif canManageRequest == "webcast":
requestTypeAccepted = ["webcast"]
elif canManageRequest == "both":
requestTypeAccepted = ["recording", "webcast", "both"]
list = []
for spkWrap in self._speakerWrapperList:
if spkWrap.getUniqueId() == id and \
spkWrap.hasEmail() and spkWrap.getStatus() not in \
[SpeakerStatusEnum.SIGNED, SpeakerStatusEnum.FROMFILE] and \
spkWrap.getRequestType() in requestTypeAccepted:
list.append(spkWrap.getObject().getEmail())
return list
def addVideoService(self, uniqueId, videoService):
""" Adds a video service to Contribution / Session link in the tracking
dictionary in order {uniqueId : videoService}
"""
if self.getVideoServices().has_key(uniqueId):
self.getVideoServices()[uniqueId].append(videoService)
else:
self.getVideoServices()[uniqueId] = [videoService]
def removeVideoAllServices(self, uniqueId):
""" Removes all associations of Contributions / Sessions with video
services from the dictionary, key included.
"""
if not self.hasVideoService(uniqueId):
return None
del self.getVideoServices()[uniqueId]
def removeVideoSingleService(self, uniqueId, videoService):
""" Removes a specific video service from a specific contribution. As
the list of services is unordered, iterate through to match for
removal - performance cost therefore occurs here.
"""
if not self.hasVideoService(uniqueId):
return None
target = self.getVideoServicesById(uniqueId)
for service in target:
if service == videoService:
target.remove(service)
break
# There are no more entries, therefore remove the dictionary entry too.
if len(target) == 0:
self.removeVideoAllServices(uniqueId)
def getVideoServices(self):
""" Returns the OOBTree associating event unique IDs with the List
of video services associated.
"""
if not hasattr(self, "_bookingsToVideoServices"):
self._bookingsToVideoServices = OOBTree()
return self._bookingsToVideoServices
def getVideoServicesById(self, uniqueId):
""" Returns a list of video services associated with the uniqueId
for printing in event timetable. Returns None if no video services
are found.
"""
if not self.hasVideoService(uniqueId):
return None
return self.getVideoServices()[uniqueId]
def hasVideoService(self, uniqueId, service=None):
""" Returns True if the uniqueId of the Contribution or Session provided
has an entry in the self._bookingsToVideoServices dictionary, thusly
denoting the presence of linked bookings. Second parameter is for more
specific matching, i.e. returns True if unique ID is associated with
specific service.
"""
if service is None:
return self.getVideoServices().has_key(uniqueId)
if self.getVideoServices().has_key(uniqueId):
for serv in self.getVideoServicesById(uniqueId):
if serv == service:
return True
else:
return self.getVideoServices().has_key(uniqueId)
def isAnyRequestAccepted(self):
'''
Return true if at least one between recording and webcast request
has been accepted.
'''
value = False
rr = self.getSingleBooking("RecordingRequest")
wr = self.getSingleBooking("WebcastRequest")
if rr:
value = rr.getAcceptRejectStatus()
if wr:
value = value or wr.getAcceptRejectStatus()
return value
def isContributionReadyToBePublished(self, contId):
if not hasattr(self, "_speakerWrapperList"):#TODO: remove when safe
self.updateSpeakerWrapperList(True)
exists = False
for spkWrap in self._speakerWrapperList:
if spkWrap.getContId() == contId:
exists = True
if spkWrap.getStatus() != SpeakerStatusEnum.SIGNED and \
spkWrap.getStatus() != SpeakerStatusEnum.FROMFILE:
return False
#The list has to have at least one spkWrap with the given contId
return exists
def notifyElectronicAgreementAnswer(self):
if not hasattr(self, "_notifyElectronicAgreementAnswer"):
self._notifyElectronicAgreementAnswer = True
return self._notifyElectronicAgreementAnswer
def setNotifyElectronicAgreementAnswer(self, notifyElectronicAgreementAnswer):
self._notifyElectronicAgreementAnswer = notifyElectronicAgreementAnswer
class CSBookingBase(Persistent, Fossilizable):
fossilizes(ICSBookingBaseConfModifFossil, ICSBookingBaseIndexingFossil)
""" Base class that represents a Collaboration Systems booking.
Every Collaboration plugin will have to implement this class.
In the base class are gathered all the functionalities / elements that are common for all plugins.
A booking is Persistent (DateChangeObserver inherits from Persistent) so it will be stored in the database.
Also, every CSBookingBase object in the server will be mirrored by a Javascript object in the client, through "Pickling".
Every class that implements the CSBookingBase has to declare the following class attributes:
_hasStart : True if the plugin has a "start" concept. Otherwise, the "start" button will not appear, etc.
_hasStop : True if the plugin has a "stop" concept. Otherwise, the "stop" button will not appear, etc.
_hasConnect : True if the plugin has a "connect" concept. Otherwise, the "connect" button will not appear, etc.
_hasCheckStatus: True if the plugin has a "check status" concept. Otherwise, the "check status" button will not appear, etc.
_hasAcceptReject: True if the plugin has a "accept or reject" concept. Otherwise, the "accept" and "reject" buttons will not appear, etc.
_requiresServerCallForStart : True if we should notify the server when the user presses the "start" button.
_requiresServerCallForStop : True if we should notify the server when the user presses the "stop" button.
_requiresClientCallForStart : True if the browser should execute some JS action when the user presses the "start" button.
_requiresClientCallForStop : True if the browser should execute some JS action when the user presses the "stop" button.
_needsBookingParamsCheck : True if the booking parameters should be checked after the booking is added / edited.
If True, the _checkBookingParams method will be called by the setBookingParams method.
_needsToBeNotifiedOnView: True if the booking object needs to be notified (through the "notifyOnView" method)
when the user "sees" the booking, for example when returning the list of bookings.
_canBeNotifiedOfEventDateChanges: True if bookings of this type should be able to be notified
of their owner Event changing start date, end date or timezone.
_allowMultiple: True if this booking type allows more than 1 booking per event.
_keepForever: True if this booking has to be in the Video Services Overview indexes forever
"""
_hasStart = False
_hasStop = False
_hasCheckStatus = False
_hasAcceptReject = False
_hasStartStopAll = False
_requiresServerCallForStart = False
_requiresServerCallForStop = False
_requiresClientCallForStart = False
_requiresClientCallForStop = False
_needsBookingParamsCheck = False
_needsToBeNotifiedOnView = False
_canBeNotifiedOfEventDateChanges = True
_allowMultiple = True
_shouldBeIndexed = True
_commonIndexes = []
_hasStartDate = True
_hasEventDisplay = False
_hasTitle = False
_adminOnly = False
_complexParameters = []
_linkVideoType = None
_linkVideoId = None
_keepForever = False
def __init__(self, bookingType, conf):
""" Constructor for the CSBookingBase class.
id: a string with the id of the booking
bookingType: a string with the type of the booking. Example: "DummyPlugin", "EVO"
conf: a Conference object to which this booking belongs (through the CSBookingManager object). The meeting of this booking.
startTime: TODO
endTime: TODO
Other attributes initialized by this constructor:
-_bookingParams: the parameters necessary to perform the booking.
The plugins will decide if the booking gets authorized or not depending on this.
Needs to be defined by the implementing class, as keys with empty values.
-_startingParams: the parameters necessary to start the booking.
They will be used on the client for the local start action.
Needs to be defined by the implementing class, as keys with empty values.
-_warning: A warning is a plugin-defined object, with information to show to the user when
the operation went well but we still have to show some info to the user.
-_permissionToStart : Even if the "start" button for a booking is able to be pushed, there may be cases where the booking should
not start. For example, if it's not the correct time yet.
In that case "permissionToStart" should be set to false so that the booking doesn't start.
-_permissionToStop: Same as permissionToStart. Sometimes the booking should not be allowed to stop even if the "stop" button is available.
"""
self._id = None
self._type = bookingType
self._plugin = CollaborationTools.getPlugin(self._type)
self._conf = conf
self._warning = None
self._creationDate = nowutc()
self._modificationDate = nowutc()
self._creationDateTimestamp = int(datetimeToUnixTimeInt(self._creationDate))
self._modificationDateTimestamp = int(datetimeToUnixTimeInt(self._modificationDate))
self._startDate = None
self._endDate = None
self._startDateTimestamp = None
self._endDateTimestamp = None
self._acceptRejectStatus = None #None = not yet accepted / rejected; True = accepted; False = rejected
self._rejectReason = ""
self._bookingParams = {}
self._canBeDeleted = True
self._permissionToStart = False
self._permissionToStop = False
self._needsToBeNotifiedOfDateChanges = self._canBeNotifiedOfEventDateChanges
self._hidden = False
self._play_status = None
setattr(self, "_" + bookingType + "Options", CollaborationTools.getPlugin(bookingType).getOptions())
#NOTE: Should maybe notify the creation of a new booking, specially if it's a single booking
# like that can update requestType of the speaker wrapper...
def getId(self):
""" Returns the internal, per-conference id of the booking.
This attribute will be available in Javascript with the "id" identifier.
"""
return self._id
def setId(self, id):
""" Sets the internal, per-conference id of the booking
"""
self._id = id
def getUniqueId(self):
""" Returns an unique Id that identifies this booking server-wide.
Useful for ExternalOperationsManager
"""
return "%scsbook%s" % (self.getConference().getUniqueId(), self.getId())
def getType(self):
""" Returns the type of the booking, as a string: "EVO", "DummyPlugin"
This attribute will be available in Javascript with the "type" identifier.
"""
return self._type
def getConference(self):
""" Returns the owner of this CSBookingBase object, which is a Conference object representing the meeting.
"""
return self._conf
def setConference(self, conf):
""" Sets the owner of this CSBookingBase object, which is a Conference object representing the meeting.
"""
self._conf = conf
def getWarning(self):
""" Returns a warning attached to this booking.
A warning is a plugin-defined object, with information to show to the user when
the operation went well but we still have to show some info to the user.
To be overloaded by plugins.
"""
if not hasattr(self, '_warning'):
self._warning = None
return self._warning
def setWarning(self, warning):
""" Sets a warning attached to this booking.
A warning is a plugin-defined object, with information to show to the user when
the operation went well but we still have to show some info to the user.
To be overloaded by plugins.
"""
self._warning = warning
def getCreationDate(self):
""" Returns the date this booking was created, as a timezone localized datetime object
"""
if not hasattr(self, "_creationDate"): #TODO: remove when safe
self._creationDate = nowutc()
return self._creationDate
def getAdjustedCreationDate(self, tz=None):
""" Returns the booking creation date, adjusted to a given timezone.
If no timezone is provided, the event's timezone is used
"""
return getAdjustedDate(self.getCreationDate(), self.getConference(), tz)
def getCreationDateTimestamp(self):
if not hasattr(object, "_creationDateTimestamp"): #TODO: remove when safe
self._creationDateTimestamp = int(datetimeToUnixTimeInt(self._creationDate))
return self._creationDateTimestamp
def getModificationDate(self):
""" Returns the date this booking was modified last
"""
if not hasattr(self, "_modificationDate"): #TODO: remove when safe
self._modificationDate = nowutc()
return self._modificationDate
def getAdjustedModificationDate(self, tz=None):
""" Returns the booking last modification date, adjusted to a given timezone.
If no timezone is provided, the event's timezone is used
"""
return getAdjustedDate(self.getModificationDate(), self.getConference(), tz)
def getModificationDateTimestamp(self):
if not hasattr(object, "_modificationDateTimestamp"): #TODO: remove when safe
self._modificationDateTimestamp = int(datetimeToUnixTimeInt(self._modificationDate))
return self._modificationDateTimestamp
def setModificationDate(self, date):
""" Sets the date this booking was modified last
"""
self._modificationDate = date
if date:
self._modificationDateTimestamp = int(datetimeToUnixTimeInt(date))
else:
self._modificationDateTimestamp = None
def getBookingsOfSameType(self, sorted = False):
""" Returns a list of the bookings of the same type as this one (including this one)
sorted: if true, bookings will be sorted by id
"""
return Catalog.getIdx("cs_bookingmanager_conference").get(self._conf.getId()).getBookingList(sorted, self._type)
def getPlugin(self):
""" Returns the Plugin object associated to this booking.
"""
return self._plugin
def setPlugin(self, plugin):
""" Sets the Plugin object associated to this booking.
"""
self._plugin = plugin
def getPluginOptions(self):
""" Utility method that returns the plugin options for this booking's type of plugin
"""
return self._plugin.getOptions()
def getPluginOptionByName(self, optionName):
""" Utility method that returns a plugin option, given its name, for this booking's type of plugin
"""
return self.getPluginOptions()[optionName]
def getStartDate(self):
""" Returns the start date as an datetime object with timezone information (adjusted to the meeting's timezone)
"""
return self._startDate
def getAdjustedStartDate(self, tz=None):
""" Returns the booking start date, adjusted to a given timezone.
If no timezone is provided, the event's timezone is used
"""
if self.getStartDate():
return getAdjustedDate(self.getStartDate(), self.getConference(), tz)
else:
return None
def getStartDateTimestamp(self):
if not hasattr(object, "_startDateTimestamp"): #TODO: remove when safe
self._startDateTimestamp = int(datetimeToUnixTimeInt(self._startDate))
return self._startDateTimestamp
def setStartDateTimestamp(self, startDateTimestamp):
self._startDateTimestamp = startDateTimestamp
def getStartDateAsString(self):
""" Returns the start date as a string, expressed in the meeting's timezone
"""
if self.getStartDate() == None:
return ""
else:
return formatDateTime(self.getAdjustedStartDate(), locale='en_US')
def setStartDate(self, startDate):
""" Sets the start date as an datetime object with timezone information (adjusted to the meeting's timezone)
"""
self._startDate = startDate
if startDate:
self._startDateTimestamp = int(datetimeToUnixTimeInt(startDate))
else:
self._startDateTimestamp = None
def setStartDateFromString(self, startDateString):
""" Sets the start date from a string. It is assumed that the date is expressed in the meeting's timezone
"""
if startDateString == "":
self.setStartDate(None)
else:
try:
self.setStartDate(setAdjustedDate(parseDateTime(startDateString), self._conf))
except ValueError:
raise CollaborationServiceException("startDate parameter (" + startDateString +" ) is in an incorrect format for booking with id: " + str(self._id))
def getEndDate(self):
""" Returns the end date as an datetime object with timezone information (adjusted to the meeting's timezone)
"""
return self._endDate
def isHappeningNow(self):
now = nowutc()
return self.getStartDate() < now and self.getEndDate() > now
def hasHappened(self):
now = nowutc()
return now > self.getEndDate()
def getAdjustedEndDate(self, tz=None):
""" Returns the booking end date, adjusted to a given timezone.
If no timezone is provided, the event's timezone is used
"""
return getAdjustedDate(self.getEndDate(), self.getConference(), tz)
def getEndDateTimestamp(self):
if not hasattr(object, "_endDateTimestamp"): #TODO: remove when safe
self._endDateTimestamp = int(datetimeToUnixTimeInt(self._endDate))
return self._endDateTimestamp
def setEndDateTimestamp(self, endDateTimestamp):
self._endDateTimestamp = endDateTimestamp
def getEndDateAsString(self):
""" Returns the start date as a string, expressed in the meeting's timezone
"""
if self.getEndDate() == None:
return ""
else:
return formatDateTime(self.getAdjustedEndDate(), locale='en_US')
def setEndDate(self, endDate):
""" Sets the start date as an datetime object with timezone information (adjusted to the meeting's timezone)
"""
self._endDate = endDate
if endDate:
self._endDateTimestamp = int(datetimeToUnixTimeInt(endDate))
else:
self._endDateTimestamp = None
def setEndDateFromString(self, endDateString):
""" Sets the start date from a string. It is assumed that the date is expressed in the meeting's timezone
"""
if endDateString == "":
self.setEndDate(None)
else:
try:
self.setEndDate(setAdjustedDate(parseDateTime(endDateString), self._conf))
except ValueError:
raise CollaborationServiceException("endDate parameter (" + endDateString +" ) is in an incorrect format for booking with id: " + str(self._id))
def getStatusMessage(self):
""" Returns the status message as a string.
This attribute will be available in Javascript with the "statusMessage"
"""
status = self.getPlayStatus()
if status == None:
if self.isHappeningNow():
return _("Ready to start!")
elif self.hasHappened():
return _("Already took place")
else:
return _("Booking created")
elif status:
return _("Conference started")
elif not status:
return _("Conference stopped")
def getStatusClass(self):
""" Returns the status message CSS class as a string.
This attribute will be available in Javascript with the "statusClass"
"""
if self.getPlayStatus() == None or self.hasHappened():
return "statusMessageOther"
else:
return "statusMessageOK"
def accept(self, user = None):
""" Sets this booking as accepted
"""
self._acceptRejectStatus = True
self._accept(user)
def reject(self, reason):
""" Sets this booking as rejected, and stores the reason
"""
self._acceptRejectStatus = False
self._rejectReason = reason
self._reject()
def clearAcceptRejectStatus(self):
""" Sets back the accept / reject status to None
"""
self._acceptRejectStatus = None
def getAcceptRejectStatus(self):
""" Returns the Accept/Reject status of the booking
This attribute will be available in Javascript with the "acceptRejectStatus"
Its value will be:
-None if the booking has not been accepted or rejected yet,
-True if it has been accepted,
-False if it has been rejected
"""
if not hasattr(self, "_acceptRejectStatus"):
self._acceptRejectStatus = None
return self._acceptRejectStatus
def getRejectReason(self):
""" Returns the rejection reason.
This attribute will be available in Javascript with the "rejectReason"
"""
if not hasattr(self, "_rejectReason"):
self._rejectReason = ""
return self._rejectReason
## methods relating to the linking of CSBooking objects to Contributions & Sessions
def hasSessionOrContributionLink(self):
return (self.isLinkedToContribution() or self.isLinkedToSession())
def isLinkedToSession(self):
return (self._linkVideoType == "session")
def isLinkedToContribution(self):
return (self._linkVideoType == "contribution")
def getLinkId(self):
""" Returns the unique ID of the Contribution or Session which this
object is associated with, completely agnostic of the link type.
Returns None if no association (default) found.
"""
return self._linkVideoId
def getLinkIdDict(self):
""" Returns a dictionary of structure linkType (session | contribution)
: unique ID of referenced object.
Returns None if no association is found.
"""
linkId = self.getLinkId()
if linkId == None:
return linkId
return {self._linkVideoType : linkId}
def getLinkType(self):
""" Returns a string denoting the link type, that is whether linked
to a session or contribution.
"""
return self._linkVideoType
def setLinkType(self, linkDict):
""" Accepts a dictionary of linkType: linkId """
# case of non-linked bookings
if linkDict is None:
return
self._linkVideoType = linkDict.keys()[0]
self._linkVideoId = linkDict.values()[0]
def resetLinkParams(self):
""" Removes all association with a Session or Contribution from this
CSBooking only.
"""
self._linkVideoType = self._linkVideoId = None
def getLocation(self):
return self._conf.getLocation().getName() if self._conf.getLocation() else ""
def getRoom(self):
return self._conf.getRoom().getName() if self._conf.getRoom() else ""
def getBookingParams(self):
""" Returns a dictionary with the booking params.
This attribute will be available in Javascript with the "bookingParams"
If self._bookingParams has not been set by the implementing class, an exception is thrown.
Support for "complex" parameters, that are not defined in the self._bookingParams dict, but can
be retrieved through getter methods.
If a subclass defines a class attributes called _complexParameters (a list of strings),
parameter names that are in this list will also be included in the returned dictionary.
Their value will be retrieved by calling the corresponding getXXX methods
but instead the inheriting class's setXXX method will be called.
Example: _complexParameters = ["communityName", "accessPassword", "hasAccessPassword"] correspond
to the methods getCommunityName, getAccessPassword, getHasAccessPassword.
If you include a parameter in the _complexParameters list, you always have to implement the corresponding getter method.
"""
bookingParams = {}
for k, v in self.__class__._simpleParameters.iteritems():
if k in self._bookingParams:
value = self._bookingParams[k]
else:
value = v[1] #we use the default value
if v[0] is bool and value is True: #we assume it will be used in a single checkbox
value = ["yes"]
if value is not False: #we do not include False, it means the single checkbox is not checked
bookingParams[k] = value
if hasattr(self.__class__, "_complexParameters") and len(self.__class__._complexParameters) > 0:
getterMethods = dict(inspect.getmembers(self, lambda m: inspect.ismethod(m) and m.__name__.startswith('get')))
for paramName in self.__class__._complexParameters:
getMethodName = 'get' + paramName[0].upper() + paramName[1:]
if getMethodName in getterMethods:
bookingParams[paramName] = getterMethods[getMethodName]()
else:
raise CollaborationServiceException("Tried to retrieve complex parameter " + str(paramName) + " but the corresponding getter method " + getMethodName + " is not implemented")
bookingParams["startDate"] = self.getStartDateAsString()
bookingParams["endDate"] = self.getEndDateAsString()
if self.needsToBeNotifiedOfDateChanges():
bookingParams["notifyOnDateChanges"] = ["yes"]
if self.isHidden():
bookingParams["hidden"] = ["yes"]
return bookingParams
def getBookingParamByName(self, paramName):
if paramName in self.__class__._simpleParameters:
if not paramName in self._bookingParams:
self._bookingParams[paramName] = self.__class__._simpleParameters[paramName][1]
return self._bookingParams[paramName]
elif hasattr(self.__class__, "_complexParameters") and paramName in self.__class__._complexParameters:
getterMethods = dict(inspect.getmembers(self, lambda m: inspect.ismethod(m) and m.__name__.startswith('get')))
getMethodName = 'get' + paramName[0].upper() + paramName[1:]
if getMethodName in getterMethods:
return getterMethods[getMethodName]()
else:
raise CollaborationServiceException("Tried to retrieve complex parameter " + str(paramName) + " but the corresponding getter method " + getMethodName + " is not implemented")
else:
raise CollaborationServiceException("Tried to retrieve parameter " + str(paramName) + " but this parameter does not exist")
def getContributionSpeakerSingleBooking(self):
''' Return a dictionnary with the contributions and their speakers that need to be recorded
e.g: {contId:[Spk1Object, Spk2Object, Spk3Object], cont2:[Spk1Object]}...
'''
request = {}
recordingTalksChoice = self.getBookingParams()["talks"] #either "all", "choose" or ""
listTalksToRecord = self.getBookingParams()["talkSelection"]
if self._conf.getType() == "simple_event":
request[self._conf.getId()] = []
for chair in self._conf.getChairList():
request[self._conf.getId()].append(chair)
else:
for cont in self._conf.getContributionList():
''' We select the contributions that respect the following conditions:
- They have Speakers assigned.
- They are scheduled. (to discuss...)
- They have been chosen for the recording request.
'''
if recordingTalksChoice != "choose" or cont.getId() in listTalksToRecord:
if cont.isScheduled():
request[cont.getId()] = []
for spk in cont.getSpeakerList():
request[cont.getId()].append(spk)
return request
def setBookingParams(self, params):
""" Sets new booking parameters.
params: a dict with key/value pairs with the new values for the booking parameters.
If the plugin's _needsBookingParamsCheck is True, the _checkBookingParams() method will be called.
This function will return False if all the checks were OK or if there were no checks, and otherwise will throw
an exception or return a CSReturnedErrorBase error.
Support for "complex" parameters, that are not defined in the self._bookingParams dict, but can
be set through setter methods.
If a subclass defines a class attributes called _complexParameters (a list of strings),
parameter names that are in 'params' and also in this list will not be assigned directly,
but instead the inheriting class's setXXX method will be called.
Example: _complexParameters = ["communityName", "accessPassword", "hasAccessPassword"] corresponds
to methods setCommunityName, setAccessPassword, setHasAccessPassword.
Note: even if a parameter is in this list, you can decide not to implement its corresponding set
method if you never expect the parameter name to come up inside 'params'.
"""
sanitizeResult = self.sanitizeParams(params)
if sanitizeResult:
return sanitizeResult
self.setHidden(params.pop("hidden", False) == ["yes"])
self.setNeedsToBeNotifiedOfDateChanges(params.pop("notifyOnDateChanges", False) == ["yes"])
startDate = params.pop("startDate", None)
if startDate is not None:
self.setStartDateFromString(startDate)
endDate = params.pop("endDate", None)
if endDate is not None:
self.setEndDateFromString(endDate)
for k,v in params.iteritems():
if k in self.__class__._simpleParameters:
if self.__class__._simpleParameters[k][0]:
try:
v = self.__class__._simpleParameters[k][0](v)
except ValueError:
raise CollaborationServiceException("Tried to set value of parameter with name " + str(k) + ", recognized as a simple parameter of type" + str(self._simpleParameters[k]) + ", but the conversion failed")
self._bookingParams[k] = v
elif k in self.__class__._complexParameters:
setterMethods = dict(inspect.getmembers(self, lambda m: inspect.ismethod(m) and m.__name__.startswith('set')))
setMethodName = 'set' + k[0].upper() + k[1:]
if setMethodName in setterMethods:
setterMethods[setMethodName](v)
else:
raise CollaborationServiceException("Tried to set value of parameter with name " + str(k) + ", recognized as a complex parameter, but the corresponding setter method " + setMethodName + " is not implemented")
else:
raise CollaborationServiceException("Tried to set the value of a parameter with name " + str(k) + " that was not declared")
for k, v in self.__class__._simpleParameters.iteritems():
if not k in self._bookingParams:
self._bookingParams[k] = self.__class__._simpleParameters[k][1]
if self.needsBookingParamsCheck():
return self._checkBookingParams()
return False
def sanitizeParams(self, params):
""" Checks if the fields introduced into the booking / request form
have any kind of HTML or script tag.
"""
if not isinstance(params, dict):
raise CollaborationServiceException("Booking parameters are not a dictionary")
invalidFields = []
for k, v in params.iteritems():
if isinstance(v, basestring) and hasTags(v):
invalidFields.append(k)
if invalidFields:
return CSSanitizationError(invalidFields)
else:
return None
def _getTypeDisplayName(self):
return CollaborationTools.getXMLGenerator(self._type).getDisplayName()
def _getFirstLineInfo(self, tz):
return CollaborationTools.getXMLGenerator(self._type).getFirstLineInfo(self, tz)
def _getTitle(self):
if self.hasEventDisplay():
raise CollaborationException("Method _getTitle was not overriden for the plugin type " + str(self._type))
def _getInformationDisplay(self, tz):
templateClass = CollaborationTools.getTemplateClass(self.getType(), "WInformationDisplay")
if templateClass:
return templateClass(self, tz).getHTML()
else:
return None
def _getLaunchDisplayInfo(self):
""" To be overloaded by plugins
"""
return None
def _checkBookingParams(self):
""" To be overriden by inheriting classes.
Verifies that the booking parameters are correct. For example, that a numeric field is actually a number.
Otherwise, an exception should be thrown.
If there are no errors, the method should just return.
"""
if self.needsBookingParamsCheck():
raise CollaborationServiceException("Method _checkBookingParams was not overriden for the plugin type " + str(self._type))
def hasStart(self):
""" Returns if this booking belongs to a plugin who has a "start" concept.
This attribute will be available in Javascript with the "hasStart" attribute
"""
return self._hasStart
def hasStartStopAll(self):
""" Returns if this booking belongs to a plugin who has a "start" concept, and all of its bookings for a conference
can be started simultanously.
This attribute will be available in Javascript with the "hasStart" attribute
"""
return self._hasStartStopAll
def hasStop(self):
""" Returns if this booking belongs to a plugin who has a "stop" concept.
This attribute will be available in Javascript with the "hasStop" attribute
"""
return self._hasStop
def hasConnect(self):
""" Returns if this booking belongs to a plugin who has a "connect" concept.
This attribute will be available in Javascript with the "hasConnect" attribute
"""
if not hasattr(self, '_hasConnect'):
self._hasConnect = False
return self._hasConnect
def hasDisconnect(self):
""" Returns if this booking belongs to a plugin who has a "connect" concept.
This attribute will be available in Javascript with the "hasConnect" attribute
"""
if not hasattr(self, '_hasDisconnect'):
self._hasDisconnect = False
return self._hasDisconnect
def hasCheckStatus(self):
""" Returns if this booking belongs to a plugin who has a "check status" concept.
This attribute will be available in Javascript with the "hasCheckStatus" attribute
"""
return self._hasCheckStatus
def isLinkedToEquippedRoom(self):
return None
def hasAcceptReject(self):
""" Returns if this booking belongs to a plugin who has a "accept or reject" concept.
This attribute will be available in Javascript with the "hasAcceptReject" attribute
"""
return self._hasAcceptReject
def requiresServerCallForStart(self):
""" Returns if this booking belongs to a plugin who requires a server call when the start button is pressed.
This attribute will be available in Javascript with the "requiresServerCallForStart" attribute
"""
return self._requiresServerCallForStart
def requiresServerCallForStop(self):
""" Returns if this booking belongs to a plugin who requires a server call when the stop button is pressed.
This attribute will be available in Javascript with the "requiresServerCallForStop" attribute
"""
return self._requiresServerCallForStop
def requiresClientCallForStart(self):
""" Returns if this booking belongs to a plugin who requires a client call when the start button is pressed.
This attribute will be available in Javascript with the "requiresClientCallForStart" attribute
"""
return self._requiresClientCallForStart
def requiresClientCallForStop(self):
""" Returns if this booking belongs to a plugin who requires a client call when the stop button is pressed.
This attribute will be available in Javascript with the "requiresClientCallForStop" attribute
"""
return self._requiresClientCallForStop
def requiresClientCallForConnect(self):
""" Returns if this booking belongs to a plugin who requires a client call when the connect button is pressed.
This attribute will be available in Javascript with the "requiresClientCallForConnect" attribute
"""
if not hasattr(self, '_requiresClientCallForConnect'):
self._requiresClientCallForConnect = False
return self._requiresClientCallForConnect
def requiresClientCallForDisconnect(self):
""" Returns if this booking belongs to a plugin who requires a client call when the connect button is pressed.
This attribute will be available in Javascript with the "requiresClientCallForDisconnect" attribute
"""
if not hasattr(self, '_requiresClientCallForDisconnect'):
self._requiresClientCallForDisconnect = False
return self._requiresClientCallForDisconnect
def canBeDeleted(self):
""" Returns if this booking can be deleted, in the sense that the "Remove" button will be active and able to be pressed.
This attribute will be available in Javascript with the "canBeDeleted" attribute
"""
return self._canBeDeleted
def setCanBeDeleted(self, canBeDeleted):
""" Sets if this booking can be deleted, in the sense that the "Remove" button will be active and able to be pressed.
This attribute will be available in Javascript with the "canBeDeleted" attribute
"""
self._canBeDeleted = canBeDeleted
def canBeStarted(self):
""" Returns if this booking can be started, in the sense that the "Start" button will be active and able to be pressed.
This attribute will be available in Javascript with the "canBeStarted" attribute
"""
return self.isHappeningNow()
def canBeStopped(self):
""" Returns if this booking can be stopped, in the sense that the "Stop" button will be active and able to be pressed.
This attribute will be available in Javascript with the "canBeStopped" attribute
"""
return self.isHappeningNow()
def isPermittedToStart(self):
""" Returns if this booking is allowed to start, in the sense that it will be started after the "Start" button is pressed.
For example a booking should not be permitted to start before a given time, even if the button is active.
This attribute will be available in Javascript with the "isPermittedToStart" attribute
"""
return self._permissionToStart
def isPermittedToStop(self):
""" Returns if this booking is allowed to stop, in the sense that it will be started after the "Stop" button is pressed.
This attribute will be available in Javascript with the "isPermittedToStop" attribute
"""
return self._permissionToStop
def needsBookingParamsCheck(self):
""" Returns if this booking belongs to a plugin that needs to verify the booking parameters.
"""
return self._needsBookingParamsCheck
def needsToBeNotifiedOnView(self):
""" Returns if this booking needs to be notified when someone views it (for example when the list of bookings is returned)
"""
return self._needsToBeNotifiedOnView
def canBeNotifiedOfEventDateChanges(self):
""" Returns if bookings of this type should be able to be notified
of their owner Event changing start date, end date or timezone.
"""
return False
def needsToBeNotifiedOfDateChanges(self):
""" Returns if this booking in particular needs to be notified
of their owner Event changing start date, end date or timezone.
"""
return self._needsToBeNotifiedOfDateChanges
def setNeedsToBeNotifiedOfDateChanges(self, needsToBeNotifiedOfDateChanges):
""" Sets if this booking in particular needs to be notified
of their owner Event changing start date, end date or timezone.
"""
self._needsToBeNotifiedOfDateChanges = needsToBeNotifiedOfDateChanges
def isHidden(self):
""" Return if this booking is "hidden"
A hidden booking will not appear in display pages
"""
if not hasattr(self, '_hidden'):
self._hidden = False
return self._hidden
def setHidden(self, hidden):
""" Sets if this booking is "hidden"
A hidden booking will not appear in display pages
hidden: a Boolean
"""
self._hidden = hidden
def isAllowMultiple(self):
""" Returns if this booking belongs to a type that allows multiple bookings per event.
"""
return self._allowMultiple
def shouldBeIndexed(self):
""" Returns if bookings of this type should be indexed
"""
return self._shouldBeIndexed
def getCommonIndexes(self):
""" Returns a list of strings with the names of the
common (shared) indexes that bookings of this type want to
be included in.
"""
return self._commonIndexes
def index_instances(self):
"""
To be overloaded
"""
return
def unindex_instances(self):
"""
To be overloaded
"""
return
def index_talk(self, talk):
"""
To be overloaded
"""
return
def unindex_talk(self, talk):
"""
To be overloaded
"""
return
def getModificationURL(self):
return UHConfModifCollaboration.getURL(self.getConference(),
secure = ContextManager.get('currentRH').use_https(),
tab = CollaborationTools.getPluginTab(self.getPlugin()))
def hasStartDate(self):
""" Returns if bookings of this type have a start date
(they may only have creation / modification date)
"""
return self._hasStartDate
def hasTitle(self):
""" Returns if bookings of this type have a title
"""
return self._hasTitle
def hasEventDisplay(self):
""" Returns if the type of this booking should display something on
an event display page
"""
return self._hasEventDisplay
def keepForever(self):
""" Returns if this booking has to be in the Video Services Overview indexes forever
"""
return self._keepForever
def canBeDisplayed(self):
""" Returns if this booking can be displayed in the event page.
By default is True and it will be shown as "Active" but can be overriden
"""
return True
def isAdminOnly(self):
""" Returns if this booking / this booking's plugin pages should only be displayed
to Server Admins, Video Service Admins, or the respective plugin admins.
"""
return self._adminOnly
def _create(self):
""" To be overriden by inheriting classes.
This method is called when a booking is created, after setting the booking parameters.
The plugin should decide if the booking is accepted or not.
Often this will involve communication with another entity, like an MCU for the multi-point H.323 plugin,
or a EVO HTTP server in the EVO case.
"""
raise CollaborationException("Method _create was not overriden for the plugin type " + str(self._type))
def _attach(self):
""" To be overriden by inheriting classes.
This method is called when a booking is attached, after setting the booking parameters.
The plugin should decide if the booking is accepted or not.
Often this will involve communication with another entity, like an MCU for the multi-point H.323 plugin,
or a EVO HTTP server in the EVO case.
"""
raise CollaborationException("Method _attach was not overriden for the plugin type " + str(self._type))
def _modify(self, oldBookingParams):
""" To be overriden by inheriting classes.
This method is called when a booking is modifying, after setting the booking parameters.
The plugin should decide if the booking is accepted or not.
Often this will involve communication with another entity, like an MCU for the multi-point H.323 plugin
or a EVO HTTP server in the EVO case.
A dictionary with the previous booking params is passed. This dictionary is the one obtained
by the method self.getBookingParams() before the new params input by the user are applied.
"""
raise CollaborationException("Method _modify was not overriden for the plugin type " + str(self._type))
def _start(self):
""" To be overriden by inheriting classes
This method is called when the user presses the "Start" button in a plugin who has a "Start" concept
and whose flag _requiresServerCallForStart is True.
Often this will involve communication with another entity.
"""
if self.hasStart():
raise CollaborationException("Method _start was not overriden for the plugin type " + str(self._type))
else:
pass
def _stop(self):
""" To be overriden by inheriting classes
This method is called when the user presses the "Stop" button in a plugin who has a "Stop" concept
and whose flag _requiresServerCallForStop is True.
Often this will involve communication with another entity.
"""
if self.hasStop():
raise CollaborationException("Method _stop was not overriden for the plugin type " + str(self._type))
else:
pass
def _checkStatus(self):
""" To be overriden by inheriting classes
This method is called when the user presses the "Check Status" button in a plugin who has a "check status" concept.
Often this will involve communication with another entity.
"""
if self.hasCheckStatus():
raise CollaborationException("Method _checkStatus was not overriden for the plugin type " + str(self._type))
else:
pass
def _accept(self, user = None):
""" To be overriden by inheriting classes
This method is called when a user with privileges presses the "Accept" button
in a plugin who has a "accept or reject" concept.
Often this will involve communication with another entity.
"""
if self.hasAcceptReject():
raise CollaborationException("Method _accept was not overriden for the plugin type " + str(self._type))
else:
pass
def _reject(self):
""" To be overriden by inheriting classes
This method is called when a user with privileges presses the "Reject" button
in a plugin who has a "accept or reject" concept.
Often this will involve communication with another entity.
"""
if self.hasAcceptReject():
raise CollaborationException("Method _reject was not overriden for the plugin type " + str(self._type))
else:
pass
def _notifyOnView(self):
""" To be overriden by inheriting classes
This method is called when a user "sees" a booking, for example when the list of bookings is displayed.
Maybe in this moment the booking wants to update its status.
"""
if self.needsToBeNotifiedOnView():
raise CollaborationException("Method _notifyOnView was not overriden for the plugin type " + str(self._type))
else:
pass
def _delete(self):
""" To be overriden by inheriting classes
This method is called whent he user removes a booking. Maybe the plugin will need to liberate
ressources that were allocated to it.
This method does not unregister the booking from the list of date change observer of the meeting
"""
raise CollaborationException("Method _delete was not overriden for the plugin type " + str(self._type))
def _sendNotifications(self, operation):
"""
Sends a mail, wrapping it with ExternalOperationsManager
"""
ExternalOperationsManager.execute(self, "sendMail_" + operation, self._sendMail, operation)
def _sendMail(self, operation):
if operation == 'new':
try:
notification = mail.NewBookingNotification(self)
GenericMailer.sendAndLog(notification, self._conf,
self.getPlugin().getName())
except Exception, e:
Logger.get('VideoServ').error(
"""Could not send NewBookingNotification for booking with id %s of event with id %s, exception: %s""" %
(self.getId(), self._conf.getId(), str(e)))
raise
elif operation == 'modify':
try:
notification = mail.BookingModifiedNotification(self)
GenericMailer.sendAndLog(notification, self._conf,
self.getPlugin().getName())
except Exception, e:
Logger.get('VideoServ').error(
"""Could not send BookingModifiedNotification for booking with id %s of event with id %s, exception: %s""" %
(self.getId(), self._conf.getId(), str(e)))
raise
elif operation == 'remove':
try:
notification = mail.BookingDeletedNotification(self)
GenericMailer.sendAndLog(notification, self._conf,
self.getPlugin().getName())
except Exception, e:
Logger.get('VideoServ').error(
"""Could not send BookingDeletedNotification for booking with id %s of event with id %s, exception: %s""" %
(self.getId(), self._conf.getId(), str(e)))
raise
def getPlayStatus(self):
if not hasattr(self, '_play_status'):
self._play_status = None
return self._play_status
""" Methods relating to the certain plugin architectures whereby talk
selection is appropriate through the inheriting class' attributes.
"""
def hasTalkSelection(self):
""" Some plugin types select individual contributions stored as a list
of IDs in this parameter, returns param if this instance is one of them.
"""
return self._bookingParams.has_key('talkSelection')
def _getTalkSelection(self):
""" Returns the attribute if it is defined, None on error. """
if self.hasTalkSelection():
return self._bookingParams.get('talkSelection')
return None
def _hasTalkSelectionContent(self):
""" If the talkSelection attribute is present and it has a quantity of
items in its list greater than 0, individual talks have been chosen.
"""
ts = self._getTalkSelection()
if ts is None:
return False
return len(ts) > 0
def getTalkSelectionList(self):
""" Returns the resultant list if it is present and populated. None if
neither are true.
"""
if not self._hasTalkSelectionContent():
return None
return self._getTalkSelection()
def _hasTalks(self):
""" Returns the attribute if it is defined, None on error. """
return self._bookingParams.has_key('talks')
def isChooseTalkSelected(self):
""" Returns if the talks are choosen"""
if self._hasTalks():
return self._bookingParams.get('talks') == "choose"
else:
return False
def __cmp__(self, booking):
return cmp(self.getUniqueId(), booking.getUniqueId()) if booking else 1
def checkAttachParams(self, bookingParams):
return None
def notifyDeletion(self, obj):
""" To be overriden by inheriting classes
This method is called when the parent object has been deleted and some actions are needed.
"""
pass
class WCSTemplateBase(wcomponents.WTemplated):
""" Base class for Collaboration templates.
It stores the following attributes:
_conf : the corresponding Conference object.
_pluginName: the corresponding plugin ("EVO", "DummyPlugin", etc.).
_XXXOptions: a dictionary whose values are the options of the plugin called pluginName.
So, for example, if an EVO template inherits from this class, an attribute self._EVOOptions will be available.
This class also overloads the _setTPLFile method so that Indico knows where each plugin's *.tpl files are.
"""
def __init__(self, pluginId):
""" Constructor for the WCSTemplateBase class.
conf: a Conference object
plugin: the corresponding plugin
"""
self._plugin = CollaborationTools.getPlugin(pluginId)
self._pluginId = self._plugin.getId()
self._ph = PluginsHolder()
setattr(self, "_" + self._pluginId + "Options", self._plugin.getOptions())
def _setTPLFile(self, extension='tpl'):
tplDir = pkg_resources.resource_filename(self._plugin.getModule().__name__, "tpls")
fname = "%s.%s" % (self.tplId, extension)
self.tplFile = os.path.join(tplDir, fname)
hfile = self._getSpecificTPL(os.path.join(tplDir,self._pluginId,'chelp'), self.tplId,extension='wohl')
self.helpFile = os.path.join(tplDir,'chelp',hfile)
class WCSPageTemplateBase(WCSTemplateBase):
""" Base class for Collaboration templates for the create / modify booking form.
"""
def __init__(self, conf, pluginId, user):
WCSTemplateBase.__init__(self, pluginId)
self._conf = conf
self._user = user
class WJSBase(WCSTemplateBase):
""" Base class for Collaboration templates for Javascript code template.
It overloads _setTPLFile so that indico can find the Main.js, Extra.js and Indexing.js files.
"""
def __init__(self, conf, plugin, user):
WCSTemplateBase.__init__(self, plugin)
self._conf = conf
self._user = user
def _setTPLFile(self):
WCSTemplateBase._setTPLFile(self, extension='js')
self.helpFile = ''
class WCSCSSBase(WCSTemplateBase):
""" Base class for Collaboration templates for CSS code template
It overloads _setTPLFile so that indico can find the style.css files.
"""
def _setTPLFile(self):
tplDir = pkg_resources.resource_filename(self._plugin.getModule().__name__, "")
fname = "%s.css" % self.tplId
self.tplFile = os.path.join(tplDir, fname)
self.helpFile = ''
class CSErrorBase(Fossilizable):
fossilizes(ICSErrorBaseFossil)
""" When _create, _modify or _remove want to return an error,
they should return an error that inherits from this class
"""
def __init__(self):
pass
def getUserMessage(self):
""" To be overloaded.
Returns the string that will be shown to the user when this error will happen.
"""
raise CollaborationException("Method getUserMessage was not overriden for the a CSErrorBase object of class " + self.__class__.__name__)
def getLogMessage(self):
""" To be overloaded.
Returns the string that will be printed in Indico's log when this error will happen.
"""
raise CollaborationException("Method getLogMessage was not overriden for the a CSErrorBase object of class " + self.__class__.__name__)
class CSSanitizationError(CSErrorBase): #already Fossilizable
fossilizes(ICSSanitizationErrorFossil)
""" Class used to return which fields have a sanitization error (invalid html / script tags)
"""
def __init__(self, invalidFields):
self._invalidFields = invalidFields
def invalidFields(self):
return self._invalidFields
class CollaborationException(MaKaCError):
""" Error for the Collaboration System "core". Each plugin should declare their own EVOError, etc.
"""
def __init__(self, msg, area = 'Collaboration', inner = None):
MaKaCError.__init__(self, msg, area)
self._inner = inner
def getInner(self):
return self._inner
def __str__(self):
return MaKaCError.__str__(self) + '. Inner: ' + str(self._inner)
class CollaborationServiceException(ServiceError):
""" Error for the Collaboration System "core", for Service calls.
"""
def __init__(self, message, inner = None):
ServiceError.__init__(self, "ERR-COLL", message, inner)
class SpeakerStatusEnum:
(NOEMAIL, NOTSIGNED, SIGNED, FROMFILE, PENDING, REFUSED) = xrange(6)
class SpeakerWrapper(Persistent, Fossilizable):
fossilizes(ISpeakerWrapperBaseFossil)
def __init__(self, speaker, contId, requestType):
self.status = not speaker.getEmail() and SpeakerStatusEnum.NOEMAIL or SpeakerStatusEnum.NOTSIGNED
self.speaker = speaker
self.contId = contId
self.requestType = requestType
self.reason = ""
self.localFile = None
self.dateAgreement = 0
self.ipSignature = None
self.modificationDate = nowutc()
self.uniqueIdHash = md5("%s.%s"%(time.time(), self.getUniqueId())).hexdigest()
def getUniqueId(self):
return "%s.%s"%(self.contId, self.speaker.getId())
def getUniqueIdHash(self):
# to remove once saved
if not hasattr(self, "uniqueIdHash"):#TODO: remove when safe
return md5(self.getUniqueId()).hexdigest()
else:
return self.uniqueIdHash
def getStatus(self):
return self.status
def setStatus(self, newStatus, ip=None):
try:
self.status = newStatus
if newStatus == SpeakerStatusEnum.SIGNED or newStatus == SpeakerStatusEnum.FROMFILE:
self.dateAgreement = now_utc()
if newStatus == SpeakerStatusEnum.SIGNED:
self.ipSignature = ip
except Exception, e:
Logger.get('VideoServ').error("Exception while changing the speaker status. Exception: " + str(e))
def getDateAgreementSigned(self):
if hasattr(self, "dateAgreement"):#TODO: remove when safe
return self.dateAgreement
return 0
def getIpAddressWhenSigned(self):
if hasattr(self, "ipSignature"):#TODO: remove when safe
return self.ipSignature
return None
def getRejectReason(self):
if hasattr(self, "reason"):#TODO: remove when safe
if self.status == SpeakerStatusEnum.REFUSED and hasattr(self, "reason"):
return self.reason
else:
return "This speaker has not refused the agreement."
else:
return "Information not available."
def setRejectReason(self, reason):
if hasattr(self, "reason"):#TODO: remove when safe
self.reason = reason
def getObject(self):
return self.speaker
def getContId(self):
return self.contId
def getRequestType(self):
if hasattr(self, "requestType"):#TODO: remove when safe
return self.requestType
return "NA"
def setRequestType(self, type):
self.requestType = type
def getSpeakerId(self):
return self.speaker.getId()
def getLocalFile(self):
'''
If exists, return path to paper agreement
'''
if hasattr(self, "localFile"):#TODO: remove when safe
return self.localFile
def setLocalFile(self, localFile):
'''
Set localFile of paper agreement
'''
if hasattr(self, "localFile"):#TODO: remove when safe
self.localFile = localFile
def hasEmail(self):
if self.speaker.getEmail():
return True
return False
def getCategory(self):
return None
def getConference(self):
return self.speaker.getConference()
def getContribution(self):
# if the conference is a lecture, the getContribution will fail.
if self.getConference().getType() == "simple_event":
return None
else:
return self.speaker.getContribution()
def getSession(self):
return None
def getSubContribution(self):
return None
def getModificationDate(self):
if hasattr(self, "modificationDate"): # TODO: remove when safe
return self.modificationDate
return None
def setModificationDate(self):
if hasattr(self, "modificationDate"): # TODO: remove when safe
self.modificationDate = now_utc()
def getLocator(self):
return self.getContribution().getLocator()
def triggerNotification(self):
if self.getRequestType() in ('recording', 'webcast'):
self._triggerNotification(self.getRequestType())
elif self.getRequestType() == 'both':
self._triggerNotification('recording')
self._triggerNotification('webcast')
def _triggerNotification(self, type):
url = None
if type == 'recording':
url = CollaborationTools.getOptionValue('RecordingRequest', 'AgreementNotificationURL')
elif type == 'webcast':
url = CollaborationTools.getOptionValue('WebcastRequest', 'AgreementNotificationURL')
if not url:
return
signed = None
if self.getStatus() in (SpeakerStatusEnum.FROMFILE, SpeakerStatusEnum.SIGNED):
signed = True
elif self.getStatus() == SpeakerStatusEnum.REFUSED:
signed = False
spk = self.getObject()
payload = {
'confId': self.getConference().getId(),
'contrib': self.getContId(),
'type': type,
'status': self.getStatus(),
'signed': signed,
'speaker': {
'id': spk.getId(),
'name': spk.getFullName(),
'email': spk.getEmail()
}
}
cl = Client()
cl.enqueue(HTTPTask(url, {'data': json.dumps(payload)}))
| pferreir/indico-backup | indico/MaKaC/plugins/Collaboration/base.py | Python | gpl-3.0 | 112,740 |
# -*- coding: utf-8 -*-
#===================================
#標準地域メッシュコードインポートプラグイン
#
#===================================
def classFactory(iface):
from .main import main
return main(iface) | karaGR/JpnStdMeshImport | __init__.py | Python | gpl-3.0 | 242 |
"""
FILENAME: controller.py
controller.py is the client and SUMO is the server
"""
""" DIRECTORIES & PATHS """
PORT = 8813
""" LIBRARIES """
import os, sys
import subprocess
import traci
import random
import pandas as pd
import numpy as np
import math
from numpy import random
import numpy.matlib
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
from xml.dom import minidom
import heapq
import arrivalRateGen
import sklearn
from sklearn.cluster import KMeans
from sklearn import datasets
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import ggplot as gg
""" PARAMETERS """
secondsInDay = 24*60*60
secondsInHour = 60*60
totalDays = 1 # days to run simulation
alpha = 0.5 # learning rate
SL = "65546898" # ID of stoplight
# counters
currSod = 0
currPhaseID = 0
secsThisPhase = 0
# state objects and boolean helpers
phaseNum = 0
lastObjValue = 0
lastAction = 0
stepThru = 1
arrivalTracker = 0
waitingTime = 0
currState = 0
lastState = 0
# discretization parameters
numPhasesForAction = 4 # 8 including the yellow phases
numEdges = 4
numLanes = 8
numQueueSizeBuckets = 4
numwaitingBuckets = 4
hoursInDay = 24 #
numActions = 2 # 1 = switch to yellow phase; stay in current phase
secsPerInterval = 4
minPhaseTime = 4
maxPhaseTime = 36
yellowPhaseTime = 4
numStates = numPhasesForAction*(numQueueSizeBuckets*numwaitingBuckets)**numEdges
""" COLLECTIONS """
QValues = np.random.rand(numStates,2) # all state action pairs
QProbs = np.ones((numStates,2))/2 # initialize randomly
QCounts = np.zeros((numStates, 2))
QAlphas = np.ones((numStates, 2))
# print 'QValues = ', QValues
# print 'QProbs = ', QProbs
# two lanes for each edge
listLanes = ['8949170_0', '8949170_1', \
'-164126513_0', '-164126513_1',\
'52016249_0', '52016249_1',\
'-164126511_0', '-164126511_1']
listEdges = ['8949170', '-164126513', '52016249', '-164126511']
tupEdges = ('8949170', '-164126513', '52016249', '-164126511')
# (south (palm), north (palm), west (arboretum), east (arboretum))
# pick the thresholds from small, medium, long-sized queues
numPhasesForAction = 4 # 8 including the yellow phases
numEdges = 4
numLanes = 8
numQueueSizeBuckets = 3
numwaitingBuckets = 3
laneQueueTracker = {}
laneWaitingTracker = {}
laneNumVehiclesTracker = {}
# laneMeanSpeedTracker = {}
for lane in listLanes:
laneQueueTracker[lane] = 0
laneWaitingTracker[lane] = 0
laneNumVehiclesTracker[lane] = 0
# laneMeanSpeedTracker[lane] = 0
queueTracker = {}
waitingTracker = {}
numVehiclesTracker = {}
# meanSpeedTracker = {}
for edge in listEdges:
queueTracker[edge] = 0
waitingTracker[edge] = 0
numVehiclesTracker[edge] = 0
# meanSpeedTracker[edge] = 0
# queueBuckets = [3,6] # actually the boundaries of the buckets
# waitingBuckets = [35,120] # actually the boundaries of the buckets
# stateCols = ('phase', '8949170_q', '8949170_w', '-164126513_q', '-164126513_w',\
# '52016249_q', '52016249_w', '-164126511_q','-164126511_w')
# dfStateMapping = pd.DataFrame(columns=stateCols)
# for p in range(numPhasesForAction):
# print 'p = ', p
# for e1q in range(numQueueSizeBuckets):
# for e1w in range (numwaitingBuckets):
# for e2q in range(numQueueSizeBuckets):
# for e2w in range (numwaitingBuckets):
# for e3q in range(numQueueSizeBuckets):
# for e3w in range (numwaitingBuckets):
# for e4q in range(numQueueSizeBuckets):
# for e4w in range (numwaitingBuckets):
# df = pd.DataFrame([[p, e1q, e1w, e2q, e2w, e3q, e3w,e4q, e4w]], columns = stateCols)
# dfStateMapping = dfStateMapping.append(df, ignore_index=True)
# dfStateMapping['stateNum'] = dfStateMapping.index
# dfStateMapping.to_csv('dfStateMapping.csv')
dfStateMapping = pd.DataFrame.from_csv('dfStateMapping.csv')
cols = ('8949170_q', '8949170_w', '-164126513_q', '-164126513_w',\
'52016249_q', '52016249_w', '-164126511_q','-164126511_w')
dfQueueSizesWaitingTimes = pd.DataFrame(columns=cols)
""" HELPER FUNCTIONS """
def computeObjValue(queueTracker, waitingTracker):
currObjValue = 0
for key in listEdges:
currObjValue -= ((1*queueTracker[key])**1.75 + (2*waitingTracker[key])**1.75) #TODO - include waitingTracker
return currObjValue
# determine Thresholds (bucket boundaries) 0.75 percentiles of ALL queue sizes)
def assignStateNum(phaseNum, queueTracker, waitingTracker, queueBuckets, waitingBuckets):
# assign each edge queue size to a corresponding bucket number
for i in queueTracker.keys():
assignedBucket = False
for j in range(len(queueBuckets)):
if not assignedBucket and queueTracker[i] <= queueBuckets[j]:
queueTracker[i] = j
assignedBucket = True
if not assignedBucket:
queueTracker[i] = len(queueBuckets)
for i in waitingTracker.keys():
assignedBucket = False
for j in range(len(waitingBuckets)):
if not assignedBucket and waitingTracker[i] < waitingBuckets[j]:
waitingTracker[i] = j
assignedBucket = True
if not assignedBucket:
waitingTracker[i] = len(waitingBuckets)
# assign each edge waiting time to a correpsonding bucket number
p = dfStateMapping['phase'] == phaseNum/2 # only 4 states where we are taking action
e1q = dfStateMapping['8949170_q'] == queueTracker['8949170']
e1w = dfStateMapping['8949170_w'] == waitingTracker['8949170']
e2q = dfStateMapping['-164126513_q'] == queueTracker['-164126513']
e2w = dfStateMapping['-164126513_w'] == waitingTracker['-164126513']
e3q = dfStateMapping['52016249_q'] == queueTracker['52016249']
e3w = dfStateMapping['52016249_w'] == waitingTracker['52016249']
e4q = dfStateMapping['-164126511_q'] == queueTracker['-164126511']
e4w = dfStateMapping['-164126511_w'] == waitingTracker['-164126511']
a = dfStateMapping['stateNum'][p & e1q & e1w & e2q & e2w & e3q & e3w & e4q & e4w]
# print 'a = ', a
# return a
# print 'a.dtype = ', a.dtype
return int(a.tolist()[0])
# lookup number in table; return number corresponding to state (not numerically significant)
# zz = assignStateNum(phaseNum, queueTracker, waitingTracker, queueBuckets, waitingBuckets)
# print zz
# print zz + 3
# queueTracker['8949170'] = 10
# print 'assignStateNum = ', assignStateNum(phaseNum, queueTracker, waitingTracker, queueBuckets, waitingBuckets)
# Q-value update
def updateQValues(lastState, lastAction, currState, reward, alpha):
QCounts[lastState, lastAction] += 1
QAlphas[lastState, lastAction] = 1/(QCounts[lastState, lastAction])
QValues[lastState, lastAction] = (1 - alpha)*QValues[lastState, lastAction] + QAlphas[lastState, lastAction]*(reward + gamma*max(QValues[currState,]))
def updateQProbs(lastState, lastAction, epsilon):
numerator = np.exp(QValues[lastState, ]/epsilon)
tempSum = np.sum(numerator)
denominator = np.array([tempSum, tempSum])
QProbs[lastState, ] = np.divide(numerator, denominator)
dfObjValsMaster = pd.DataFrame()
dfQueueTrackerMaster = pd.DataFrame()
dfWaitingTrackerMaster = pd.DataFrame()
dfActions = pd.DataFrame()
# make a dict of numpy arrays
stateData = {}
actionPhases = [0,2,4,6]
for i in range(24):
stateData[i] = {}
for j in range(len(actionPhases)):
stateData[i][actionPhases[j]] = np.array([])
global dictClusterObjects
dictClusterObjects = {}
global numClustersTracker
numClustersTracker = {}
for i in range(24): #hod
dictClusterObjects[i] = {}
numClustersTracker[i] = {}
for j in actionPhases:
dictClusterObjects[i][j] = None
numClustersTracker[i][j] = 0
# print 'dictClusterObjects = ', dictClusterObjects
# print 'numClustersTracker = ', numClustersTracker
global mapDiscreteStates
mapDiscreteStates = {}
global listMeanObjVals, listMedianObjVals, listMinObjVals
listMeanObjVals = []
listMedianObjVals = []
listMinObjVals = []
def learnDiscretization(daysToTrain):
# """ SIMULATION """
dynamic = 0
day = 0
totalDays = daysToTrain
# learning rates and discount factors
gamma = 0.95
# epsilons = [1, 0.99, 0.97, 0.93, 0.91, 0.89, 0.80, 0.75, 0.70, 0.6, 0.5, 0.45, 0.3, 0.20, 0.18, 0.17, 0.15, 0.10, 0.08, 0.05, 0.04, 0.02, 0.01, 0.01]
# print 'len(epsilons) = ', len(epsilons)
for day in range(totalDays): # range(len(epsilons)+1): #len(alphas)
# generate the random route schedule for the day
arrivalRateGen.writeRoutes(day+1)
sumoProcess = subprocess.Popen(['sumo-gui.exe', "-c", "palm.sumocfg", \
"--remote-port", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)
# sumoProcess = subprocess.Popen(['sumo.exe', "-c", "palm.sumocfg", "--fcd-output", \
# "out.fcd.xml", "--tripinfo-output", "out.trip.xml", "--summary", "out.summary.xml", "--queue-output", "out.queue.xml", "--remote-port", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)
traci.init(PORT)
dfObjVals = pd.DataFrame()
dfQueueTracker = pd.DataFrame()
dfWaitingTracker = pd.DataFrame()
action = 0 # number of seconds over minimum that we decided to take in 5 buckets (0,1,2,4)
lastAction = 0
hod = 0
currSod = 0
epsilon = 1 # TODO - change epsilon dynamically?
currPhaseID = 0
secsThisPhase = 0
while currSod < secondsInDay:
if currPhaseID == int(traci.trafficlights.getPhase(SL)) and currSod != 0: # if phase HAS NOT changed
secsThisPhase += 1 # increase the seconds in the currentPhase
else: # IF THE PHASE HAS CHANGED
secsThisPhase = 0
currPhaseID = int(traci.trafficlights.getPhase(SL))
# STORE INFORMATION TO DETERMINE IF ITS TIME TO MAKE A DECISION
# ARRAY TO MAP STATE:
# (2) Hour of day (24)
# (1) Light phase for decision (4) getPhase
# (2) Num stopped cars X 4 getLastStepHaltingNumber
# (3) Num vehicles in lane getLastStepVehicleNumber
# (4) Cum waiting time x 4 getWaitingTime
# (5) Last step mean speed X 4 getLastStepMeanSpeed
if currPhaseID%2 == 0 and secsThisPhase == 0: # only collecting data when we come to the end of a yellow phase
#============ HOD
if hod != currSod/secondsInHour:
hod = int(currSod/secondsInHour)
print 'observation day = ', day
print 'hod = ', hod
# print 'len(stateData[h][1]) = ', len(stateData[hod][1])
# print 'int(sum(np.std(stateData[h][a], axis = 0))) = ', int(sum(np.std(stateData[hod][1], axis = 0)))
# print 'len(stateData[h][3]) = ', len(stateData[hod][3])
# print 'int(sum(np.std(stateData[h][a], axis = 0))) = ', int(sum(np.std(stateData[hod][3], axis = 0)))
# print 'len(stateData[h][5]) = ', len(stateData[hod][5])
# print 'int(sum(np.std(stateData[h][a], axis = 0))) = ', int(sum(np.std(stateData[hod][5], axis = 0)))
# print 'len(stateData[h][7]) = ', len(stateData[hod][7])
# print 'int(sum(np.std(stateData[h][a], axis = 0))) = ', int(sum(np.std(stateData[hod][7], axis = 0)))
#============ currPhaseID
#================= count halted vehicles (4 elements)
for lane in listLanes:
laneQueueTracker[lane] = traci.lane.getLastStepHaltingNumber(str(lane))
# laneQueueTracker[lane] = traci.lane.getLastStepVehicleNumber(str(lane))
for edge in queueTracker.keys():
queueTracker[edge] = laneQueueTracker[str(edge) + '_' + str(0)] + laneQueueTracker[str(edge) + '_' + str(1)]
# inherently, we assume balancing here
# TODO - later chage this to only track
# df = pd.DataFrame([[currSod, queueTracker['8949170'], queueTracker['-164126513'], queueTracker['52016249'], queueTracker['-164126511']]])
# dfQueueTracker = dfQueueTracker.append(df, ignore_index = True)
# ================ count vehicles in lane
for lane in listLanes:
laneNumVehiclesTracker[lane] = traci.lane.getLastStepVehicleNumber(str(lane))
for edge in numVehiclesTracker.keys():
numVehiclesTracker[edge] = laneNumVehiclesTracker[str(edge) + '_' + str(0)] + laneNumVehiclesTracker[str(edge) + '_' + str(1)]
# ================ cum waiting time in minutes
for lane in listLanes:
laneWaitingTracker[lane] = traci.lane.getWaitingTime(str(lane))/60
for edge in waitingTracker.keys():
waitingTracker[edge] = laneWaitingTracker[str(edge) + '_' + str(0)] + laneWaitingTracker[str(edge) + '_' + str(1)]
# df = pd.DataFrame([[currSod, waitingTracker['8949170'], waitingTracker['-164126513'], waitingTracker['52016249'], waitingTracker['-164126511']]])
# dfWaitingTracker = dfWaitingTracker.append(df, ignore_index = True)
# ================ mean speed
# for lane in listLanes:
# laneMeanSpeedTracker[lane] = traci.lane.getLastStepMeanSpeed(str(lane))
# for edge in meanSpeedTracker.keys():
# meanSpeedTracker[edge] = (laneMeanSpeedTracker[str(edge) + '_' + str(0)] + laneMeanSpeedTracker[str(edge) + '_' + str(1)])/2
# ============== CREATE A NEW ENTRY FOR OUR STATE TRACKER
stateDataEntry = []
for edge in listEdges:
stateDataEntry.append(queueTracker[edge])
for edge in listEdges:
stateDataEntry.append(numVehiclesTracker[edge])
for edge in listEdges:
stateDataEntry.append(waitingTracker[edge])
# for edge in listEdges:
# stateDataEntry.append(meanSpeedTracker[edge])
if len(stateData[hod][currPhaseID]) == 0:
stateData[hod][currPhaseID] = np.array(stateDataEntry)
else:
stateData[hod][currPhaseID] = np.vstack([stateData[hod][currPhaseID], stateDataEntry])
# TRACK OBJECTIVE FUNCTION
currObjValue = computeObjValue(queueTracker, waitingTracker)
df = pd.DataFrame([[currSod, currObjValue]]) # todo - fix so plot shows the second of the day
dfObjVals = dfObjVals.append(df, ignore_index=True)
# print 'currPhaseID = ', currPhaseID
# print 'secsThisPhase = ', secsThisPhase
# print 'currSod = ', currSod
# print 'hod = ', hod
# print 'queueTracker = ', queueTracker
# print 'waitingTracker = ', waitingTracker
# # we can make a decision
# if currPhaseID%2 == 0 and secsThisPhase%secsPerInterval == 0 and secsThisPhase >=4: # and currSod > 20000:
# # print 'currPhaseID = ', currPhaseID
# # print 'secsThisPhase = ', secsThisPhase
# # arrayObjVals = np.append(arrayObjVals, currObjValue)
# # arrayQueueSizes = np.append(arrayQueueSizes, queueTracker.values())
# # if day > 0:
# # dynamic = 1
# if dynamic:
# # CONTROL ACTION
# phaseNum = traci.trafficlights.getPhase(SL)
# currState = assignStateNum(phaseNum, queueTracker, waitingTracker, queueBuckets, waitingBuckets)
# # reward = objective value; we want it to be as close to zero as possible (will always be negative)
# reward = currObjValue - lastObjValue
# lastObjValue = currObjValue
# updateQValues(int(lastState), int(lastAction), int(currState), reward, alpha) # alpha controls whether we explore or exploit
# updateQProbs(int(lastState), int(lastAction), epsilon)
# # pick action
# unigen = random.random()
# if QProbs[currState,0] < unigen or secsThisPhase == 0:
# action = 0 # stay in this phase
# else:
# action = 1 # change phases; transition to the next yellow phase immediately
# # TODO - totally change the actions to be the number of seconds (# of 4-second time chunks we are choosing for the cycle)
# traci.trafficlights.setPhase(SL, (int(currPhaseID) + 1)%8)
# #TODO - tell the light how long to run for; not just whether or not to change
# # print 'int(traci.trafficlights.getPhase(SL)) = ', int(traci.trafficlights.getPhase(SL))
# df = pd.DataFrame([[currSod, secsThisPhase, currPhaseID, currState, lastState, action, currObjValue, lastObjValue, reward]]) # todo - fix so plot shows the second of the day
# dfActions = dfActions.append(df, ignore_index=True)
# lastState = currState
# lastAction = action
currSod += 1
# print traci.vehicle.getIDList()
traci.simulationStep()
# print stateData
# totalSize = 0
# for i in actionPhases:
# totalSize += len(stateData[0][i])
# print 'totalSize = ', totalSize
# for i in actionPhases:
# print 'np.std(stateData[0][i], axis = 0) = ', np.std(stateData[0][i], axis = 0)
# print 'sumvariability = ', sum(np.std(stateData[0][i], axis = 0))
# print 'np.std(stateData[0][i], axis = 0) = ', np.std(stateData[0][i], axis = 0)
# print 'sumstd = ', sum(np.std(stateData[0][i], axis = 0))
# totalSize += len(stateData[0][i])
traci.close() # TODO - fix; need to figure out how to plot multiple with different x-axes
print 'dfObjVals = ', dfObjVals
dfMean = dfObjVals.mean(axis = 0)
meanObjVal = dfMean[1]
dfMedian = dfObjVals.median(axis = 0)
medianObjVal = dfMedian[1]
# vMedian = dfMedian[1]
dfMin = dfObjVals.min(axis=0)
minObjVal = dfMin[1]
listMeanObjVals.append(meanObjVal)
listMedianObjVals.append(medianObjVal)
listMinObjVals.append(minObjVal)
for h in range(hoursInDay): #TODo - change to hoursInDay
for a in actionPhases:
numClustersTracker[h][a] = int(sum(np.std(stateData[h][a], axis = 0))) #
print 'h = ', h
print 'a = ', a
print 'numClustersTracker[h][a] = ', numClustersTracker[h][a]
dictClusterObjects[h][a] = KMeans(n_clusters = numClustersTracker[h][a])
dictClusterObjects[h][a].fit(stateData[h][a])
# result = dictClusterObjects[0][a].predict(stateData[0][a])
# print 'result = ', result
# print 'max(result) = ', max(result)
# print 'hod = ', hod
# print 'dictClusterObjects = ', dictClusterObjects
print 'numClustersTracker = ', numClustersTracker
totalClusters = 0
for h in range(hoursInDay): #TODO - change to hoursInDay
for a in actionPhases:
totalClusters += numClustersTracker[h][a]
print 'totalClusters = ', totalClusters
stateCounter = 0
for h in range(hoursInDay): #TODO - change to hoursInDay
mapDiscreteStates[h] = {}
for a in actionPhases:
mapDiscreteStates[h][a] = {}
for c in range(numClustersTracker[h][a]):
mapDiscreteStates[h][a][c] = stateCounter
stateCounter += 1
print 'stateCounter = ', stateCounter
def getMapDiscreteStates():
return mapDiscreteStates
def getInvMapDiscreteStates():
invMapDiscreteStates = {}
for h in range(hoursInDay):
for a in actionPhases:
for c in range(numClustersTracker[h][a]):
invMapDiscreteStates[mapDiscreteStates[h][a][c]] = {'hod':h, 'phase':a, 'num':c}
print getInvMapDiscreteStates
return invMapDiscreteStates
def getDictClusterObjects():
return dictClusterObjects
def getNumClustersTracker():
return numClustersTracker
def plotClusterHistograms():
dfClusters = pd.DataFrame.from_dict(numClustersTracker, orient = 'index')
dfClusters.columns = ['phase 0', 'phase 2', 'phase 4', 'phase 6']
print dfClusters
dfClusters.plot(kind = 'bar', stacked = True)
plt.xlabel('hour of day')
plt.ylabel('number discrete states chosen')
plt.title('Discrete States Selected By K-Means Clustering for each (hour, phase)')
plt.show()
def plotQueueSizes():
pass
def plotWaitingTimes():
pass
def getBaselineMean():
return np.mean(listMeanObjVals)
def getBaselineMedian():
return np.mean(listMedianObjVals)
def getBaselineMin():
return np.mean(listMinObjVals)
# # print 'dfObjValsMaster = ', dfObjValsMaster
# # dfObjVals.columns = ['second', 'day ' + str(day) + '; eps = ' + str(epsilon)]
# # dfObjVals['day ' + str(day) + '; eps = ' + str(epsilon)] = \
# # pd.ewma(dfObjVals['day ' + str(day) + '; eps = ' + str(epsilon)], span=600)
# # dfObjVals.columns = ['second', 'day ' + str(day) + '; eps = ' + str(epsilon)]
# if day < 0:
# dfActions.columns = ['currSod', 'secsThisPhase', 'currPhaseID', 'currState', 'lastState', 'action', 'currObjValue', 'lastObjValue', 'reward']
# dfActions.to_csv('dfActions' + str(day) + '.csv')
# dfQueueTracker.columns = ['hour', 'south', 'north', 'west', 'east']
# dfQueueTracker['hour'] = dfQueueTracker['hour']/secondsInHour
# dfQueueTracker.to_csv('dfQueueTracker' + str(day) + '.csv')
# dfWaitingTracker.columns = ['hour','south', 'north', 'west', 'east']
# dfWaitingTracker['hour'] = dfWaitingTracker['hour']/secondsInHour
# dfWaitingTracker.to_csv('dfWaitingTracker' + str(day) + '.csv')
# dfObjVals.columns = ['second', 'day ' + str(day)]
# dfObjVals['day ' + str(day)] = \
# pd.ewma(dfObjVals['day ' + str(day)], span=1200)
# dfObjVals.columns = ['second', 'day ' + str(day)]
# dfObjVals.to_csv('dfObjVals' + str(day) + '.csv')
# # print 'dfObjVals = ', dfObjVals
# if day == 0:
# dfObjValsMaster = dfObjVals
# dfObjVals.columns = ['second', 'static policy']
# else:
# dfObjValsMaster = dfObjValsMaster.merge(dfObjVals, on = 'second')
# print 'QValues = ', QValues
# np.savetxt('QValues.txt',QValues)
# print 'QProbs = ', QProbs
# np.savetxt('QProbs.txt',QProbs)
# print 'QAlphas = ', QAlphas
# np.savetxt('QAlphas.txt', QAlphas)
# print 'QCounts = ', QCounts
# np.savetxt( 'QCounts.txt', QCounts)
# listEdges = ['south', 'north', 'west', 'east']
# dfQueueTracker.columns = ['hour', 'south', 'north', 'west', 'east']
# dfQueueTracker['hour'] = dfQueueTracker['hour']/secondsInHour
# dfQueueTracker.to_csv('dfQueueTracker.csv')
# for i in listEdges:
# dfQueueTracker[i] = pd.ewma(dfQueueTracker[i], span = 1200)
# dfQueueTracker.to_csv('dfWaitingTracker.csv')
# dfWaitingTracker.columns = ['hour','south', 'north', 'west', 'east']
# dfWaitingTracker['hour'] = dfWaitingTracker['hour']/secondsInHour
# for i in listEdges:
# dfWaitingTracker[i] = pd.ewma(dfWaitingTracker[i], span = 1200)
# dfWaitingTracker.to_csv('dfWaitingTracker.csv')
# dfObjValsMaster['second'] = dfObjValsMaster['second']/secondsInHour
# dfObjValsMaster.to_csv('dfObjValsMaster.csv')
# dfObjValsMaster.plot(x = 'second')
# plt.xlabel('hour')
# plt.title('Moving Average of Objective Function Value (Static Policy)')
# dfQueueTracker.plot(x = 'hour')
# plt.xlabel('hour')
# plt.title('Moving Average of Queue Tracker (Static Policy)')
# dfWaitingTracker.plot(x = 'hour')
# plt.xlabel('hour')
# plt.title('Moving Average Waiting Time By Edge (Static Policy)')
# plt.show() | JDGlick/sumo_reinforcement_learning | getDiscreteStates.py | Python | gpl-3.0 | 22,439 |
from datetime import date, time, datetime
def from_dict(self, values):
"""Merge in items in the values dict into our object if it's one of our columns
"""
json_eager_save = set(getattr(self, '_json_eager_save', []))
for c in self.__table__.columns:
if c.name in values:
setattr(self, c.name, values[c.name])
for c in json_eager_save:
if c in values and values[c] is not None:
attr = getattr(self, c)
attr_class = getattr(self.__class__, c).property.mapper.class_
if isinstance(attr, list):
for x in values[c]:
i = attr_class()
i.from_dict(x)
attr.append(i)
else:
if c in values is not None:
attr = attr_class()
attr.from_dict(values[c])
setattr(self, c, attr)
dthandler = lambda obj: (obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else obj)
def to_dict(self):
"""
Converts all the properties of the object into a dict for use in json.
You can define the following in your class
_json_eager_load :
list of which child classes need to be eagerly loaded. This applies
to one-to-many relationships defined in SQLAlchemy classes.
_base_blacklist :
top level blacklist list of which properties not to include in JSON
_json_blacklist :
blacklist list of which properties not to include in JSON
:param request: Pyramid Request object
:type request: <Request>
:return: dictionary ready to be jsonified
:rtype: <dict>
"""
props = {}
# grab the json_eager_load set, if it exists
# use set for easy 'in' lookups
json_eager_load = set(getattr(self, '_json_eager_load', []))
# now load the property if it exists
# (does this issue too many SQL statements?)
for prop in json_eager_load:
getattr(self, prop, None)
#self.__dict__.get(prop, None)
# we make a copy because the dict will change if the database
# is updated / flushed
options = self.__dict__.copy()
# setup the blacklist
# use set for easy 'in' lookups
blacklist = set(getattr(self, '_base_blacklist', []))
# extend the base blacklist with the json blacklist
blacklist.update(getattr(self, '_json_blacklist', []))
for key in options:
# skip blacklisted, private and SQLAlchemy properties
if key in blacklist or key.startswith(('__', '_sa_', '_')):
continue
# format and date/datetime/time properties to isoformat
#obj = getattr(self, key)
obj = self.__dict__.get(key)
if isinstance(obj, (datetime, date, time)):
props[key] = obj.isoformat()
#props[key] = mktime(obj.timetuple()) * 1000
else:
# get the class property value
attr = obj
# let see if we need to eagerly load it
if key in json_eager_load:
# this is for SQLAlchemy foreign key fields that
# indicate with one-to-many relationships
if not hasattr(attr, 'pk') and attr:
# jsonify all child objects
if isinstance(attr, list):
attr = [x.to_dict() for x in attr]
else:
attr = attr.to_dict()
else:
# convert all non integer strings to string or if
# string conversion is not possible, convert it to
# Unicode
if attr and not isinstance(attr, (int, float)):
try:
attr = str(attr)
except UnicodeEncodeError:
attr = attr.encode('utf-8')
props[key] = attr
return props
| aguirrel/sqlalchemy_rest | sqlalchemy_rest/json/__init__.py | Python | gpl-3.0 | 3,900 |
class TurbolinksMiddleware(object):
def process_response(self, request, response):
response['X-XHR-Current-Location'] = request.get_full_path()
response.set_cookie('request_method', request.method)
#response.set_cookie('request_method') = 'GET'
print '******************************'
print request.method
print '******************************'
return response
| hzlf/openbroadcast | website/tools/turbolinks/middleware.py | Python | gpl-3.0 | 423 |
#! /usr/bin/env python3
import os
import pwd
import logging
import re
from shutil import move
from gi.repository import Gtk
from dialogs import MessageDialog
from treeview import TreeViewHandler
class Logger():
def __init__(self, logPath='', defaultLogLevel='debug', addLogTime=True, rtObject=None, parent=None, maxSizeKB=None):
self.logPath = logPath
if self.logPath != '':
if self.logPath[:1] != '/':
homeDir = pwd.getpwuid(os.getuid()).pw_dir
self.logPath = os.path.join(homeDir, self.logPath)
self.defaultLevel = getattr(logging, defaultLogLevel.upper())
self.logTime = addLogTime
self.rtobject = rtObject
self.typeString = self.getTypeString(self.rtobject)
self.parent = parent
self.maxSizeKB = maxSizeKB
if self.logPath == '':
# Log only to console
logging.basicConfig(level=self.defaultLevel, format='%(levelname)-10s%(message)s')
else:
if os.path.exists(self.logPath) and self.maxSizeKB is not None:
b = os.path.getsize(self.logPath)
if b > self.maxSizeKB * 1024:
old = "%s.old" % self.logPath
if os.path.exists(old):
os.remove(old)
move(self.logPath, "%s.old" % self.logPath)
# Set basic configuration
formatStr = '%(name)-30s%(levelname)-10s%(message)s'
dateFmtStr = None
if addLogTime:
formatStr = '%(asctime)s ' + formatStr
dateFmtStr = '%d-%m-%Y %H:%M:%S'
# Log to file
logging.basicConfig(filename=self.logPath, level=self.defaultLevel, format=formatStr, datefmt=dateFmtStr)
# Define a Handler which writes INFO messages or higher to the console
# Debug messages are written to a specified log file
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)-10s%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
# Write message
def write(self, message, loggerName='log', logLevel='debug', showErrorDialog=True):
message = str(message).strip()
if message != '':
logLevel = logLevel.lower()
myLogger = logging.getLogger(loggerName)
if logLevel == 'debug':
myLogger.debug(message)
elif logLevel == 'info':
myLogger.info(message)
self.rtobjectWrite(message)
elif logLevel == 'warning':
myLogger.warning(message)
self.rtobjectWrite(message)
elif logLevel == 'error':
myLogger.error(message)
self.rtobjectWrite(message)
if showErrorDialog:
MessageDialog('Error', message, Gtk.MessageType.ERROR, self.parent).show()
elif logLevel == 'critical':
myLogger.critical(message)
self.rtobjectWrite(message)
if showErrorDialog:
MessageDialog('Critical', message, Gtk.MessageType.ERROR, self.parent).show()
elif logLevel == 'exception':
myLogger.exception(message)
self.rtobjectWrite(message)
if showErrorDialog:
MessageDialog('Exception', message, Gtk.MessageType.ERROR, self.parent).show()
# Return messge to given object
def rtobjectWrite(self, message):
if self.rtobject is not None and self.typeString != '':
if 'label' in self.typeString.lower():
self.rtobject.set_text(message)
elif 'treeview' in self.typeString.lower():
tvHandler = TreeViewHandler(self.rtobject)
tvHandler.fillTreeview([message], ['str'], [-1], 0, 400, False, True, True, fontSize=10000)
elif 'statusbar' in self.typeString.lower():
self.pushMessage(message)
else:
# For obvious reasons: do not log this...
print(('Return object type not implemented: %s' % self.typeString))
# Return the type string of a object
def getTypeString(self, object):
tpString = ''
tp = str(type(object))
matchObj = re.search("'(.*)'", tp)
if matchObj:
tpString = matchObj.group(1)
return tpString
def pushMessage(self, message):
if message is not None:
context = self.rtobject.get_context_id('message')
self.rtobject.push(context, message)
# Test
#log = Logger('myapp.log') # Log file and console
#log = Logger() # Console only
#log.write('Dit is een debug test', 'myapp.gui', 'debug') # Should not end up in console when writing to log file
#log.write('Dit is een info test', 'myapp.gui', 'info')
#log.write('Dit is een warning test', 'myapp.gui', 'warning')
#log.write('Dit is een error test', 'myapp.gui', 'error')
| ruibarreira/linuxtrail | usr/lib/trail/updatemanager/logger.py | Python | gpl-3.0 | 5,098 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from rest_framework import serializers
from .base import WsBaseModelSerializer
import rest.models
class OrderSerializer(WsBaseModelSerializer):
"""
This is a serializer class for the Order rest model.
"""
user = serializers.PrimaryKeyRelatedField(read_only=True, default=serializers.CurrentUserDefault())
networks = serializers.SlugRelatedField(many=True, read_only=True, slug_field="network_cidr")
domain_names = serializers.SlugRelatedField(many=True, read_only=True, slug_field="name")
scan_config = serializers.PrimaryKeyRelatedField(read_only=True)
def create(self, validated_data):
"""
Perform the creation of this order.
:param validated_data: The validated data sanitized by this serializer.
:return: The newly-created Order.
"""
return rest.models.Order.objects.create_from_user_and_organization(
user=validated_data["user"],
organization=validated_data["organization"],
)
class Meta:
model = rest.models.Order
fields = (
"created",
"uuid",
"started_at",
"completed_at",
"user_email",
"scoped_domains_count",
"scoped_endpoints_count",
"scoped_endpoints_size",
"user",
"networks",
"domain_names",
"scan_config",
)
read_only_fields = (
"created",
"uuid",
"started_at",
"completed_at",
"user_email",
"scoped_domains_count",
"scoped_endpoints_count",
"scoped_endpoints_size",
"user",
)
extra_kwargs = {
"organization": {"read_only": True},
}
| lavalamp-/ws-backend-community | rest/serializers/orders.py | Python | gpl-3.0 | 1,853 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import time
import rospy
import std_msgs
import json
import os
from std_msgs.msg import String
from std_msgs.msg import Float32
from arom.srv import *
from arom.msg import *
import numpy as np
import pylirc
from drivers.__init__ import AromNode
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
recieved = []
def manager_callback(recive):
global recieved
recieved.append(recive.data)
#print recive, recieved
class arom_manager(AromNode):
node_name = "arom_manager"
node_type = "arom_manager"
node_pymlab = True
def __init__(self, rows = 2, cols = 2, file = None):
print os.path.abspath(__file__)
rospy.Subscriber("/arom/manager", std_msgs.msg.String, manager_callback)
AromNode.__init__(self)
self.set_feature('arom_nodes',{'publish': '/arom/manager'})
##
## Konec zakladni inicializace
##
rate = rospy.Rate(10)
while not rospy.is_shutdown():
try:
if len(recieved) > 0:
print recieved
except Exception, e:
print e
time.sleep(0.5)
if __name__ == '__main__':
m = arom_manager()
| Robozor-network/AROM | src/arom_manager.py | Python | gpl-3.0 | 1,298 |
# -*- coding: utf-8 -*-
"""
(C) 2015, Niels Anders, EUBrazilCC
"""
from generic_tools import saveimg, getpoints, gridcellborders
import numpy as np
import sys, time, os.path
def calcAspect(dtm):
[r,c] = dtm.shape
print 'Calculate slope angle (%d x %d)...' % (c, r),
time.sleep(0.1)
t0 = time.time()
G = np.zeros((r,c,9))
# Calculate gradient from center cell to each surrounding cell
G[1:r-1,1:c-1,0] = (dtm[1:r-1,1:c-1] - dtm[0:r-2,0:c-2])/(2**0.5)
G[1:r-1,1:c-1,1] = (dtm[1:r-1,1:c-1] - dtm[1:r-1,0:c-2])
G[1:r-1,1:c-1,2] = (dtm[1:r-1,1:c-1] - dtm[2:r ,0:c-2])/(2**0.5)
G[1:r-1,1:c-1,3] = (dtm[1:r-1,1:c-1] - dtm[0:r-2,1:c-1])
G[1:r-1,1:c-1,4] = 0
G[1:r-1,1:c-1,5] = (dtm[1:r-1,1:c-1] - dtm[2:r ,1:c-1])
G[1:r-1,1:c-1,6] = (dtm[1:r-1,1:c-1] - dtm[0:r-2,2:c ])/(2**0.5)
G[1:r-1,1:c-1,7] = (dtm[1:r-1,1:c-1] - dtm[1:r-1,2:c ])
G[1:r-1,1:c-1,8] = (dtm[1:r-1,1:c-1] - dtm[2:r ,2:c ])/(2**0.5)
Gmax = G.max(axis=2) # max gradient
slope = np.arctan(Gmax)/(np.pi/180) # convert to slope angle
aspect = np.zeros((r,c))*-1
for i in np.arange(r):
for j in np.arange(c):
if Gmax[i,j] > 0:
aspect[i,j] = np.argwhere(G[i,j,:]==Gmax[i,j])*45
# return
t1 = time.time()
print 'finished in %1d seconds' % (t1-t0)
time.sleep(0.1)
return aspect
if __name__=='__main__':
total = len(sys.argv)
cmdargs = str(sys.argv)
if total != 2:
print ("The total numbers of args passed to the script should be 1")
sys.exit()
filename = sys.argv[1]
basename = os.path.splitext(filename)[0]
extension = os.path.splitext(filename)[1]
In = filename
fn = basename+'_aspect.tif'
# read point cloud
x,y,z,c = getpoints(In)
g = c==2 # ground points
# intiate grid system (coordinates of center point grid cells)
res = 1
xi, yi = np.arange(x.min(), x.max()+res/2, res), np.arange(y.min(), y.max()+res/2, res)
extent = [xi.min(), xi.max(), yi.min(), yi.max()] # only used to set extent to plots
grid = np.zeros((len(yi),len(xi)))*np.NaN
# retreive gridcell border coordinates
bx, by = gridcellborders(xi,yi,res)
# georeferencing info
geotransform = (x.min(), res, 0, y.max(), 0, -res) # used for georeference metadata in geotiff
proj = '' # mandatory setting used to store projection information in metadata geotiff (not assigned as metadata is not stored in lidar txt)
# create DTM
from dtm import createDTM
dtm = createDTM(x[g],y[g],z[g],xi,yi,res,geotransform, proj, method='idw')
# calculate slope angle
aspect = calcAspect(dtm)
# write tiff
saveimg(np.flipud(aspect), fn, len(xi), len(yi), geotransform, proj)
| niels-anders/eubrazilcc-lidar | aspect.py | Python | gpl-3.0 | 2,894 |
#! /usr/bin/env python
# coding: utf-8
# This file is part of the OMI Python package.
#
# The OMI Python package is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# The OMI Python Package is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the OMI Python Package. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import division
import logging
import os
import numpy as np
import scipy as sp
import scipy.integrate
import scipy.sparse as sparse
import scipy.sparse.linalg as s_linalg
# constants and satllite parameter (Aura specific)
EARTH_RADIUS = 6378.5
ORBIT_PERIOD = 100.0 * 60.0
GROUND_SPEED = 2.0 * np.pi * EARTH_RADIUS / ORBIT_PERIOD
PACKAGE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), 'data')
class MMatrixLUT(object):
def __init__(self, path):
"""\
Look up table for rows of meaurement matrix M.
"""
self.path = path
self._lut = {}
self.load_lut()
def load_lut(self):
"""\
Load LUT for different indices j=0,1,...,m-2,m-1.
"""
for index in [0, 1, None, -2, -1]:
if index is None:
filename = 'measurement_equation.dat'
else:
filename = 'measurement_equation_{0:+d}.dat'.format(index)
filename = os.path.join(self.path, filename)
data = np.loadtxt(filename)
self._lut[index] = dict((int(d[0]), d[1:]) for d in data)
# load y-direction limits
filename = os.path.join(self.path, 'y_limits.dat')
data = np.loadtxt(filename)
self._limits = dict((int(d[0]), d[1]) for d in data)
def get_limit(self, distance):
distance = int(round(distance))
return self._limits[distance]
def get_values(self, distance, j, m):
distance = int(round(distance))
if j == 0:
values = self._lut[0][distance]
elif j == 1:
values = self._lut[1][distance]
elif j == m-2:
values = self._lut[-2][distance]
elif j == m-1:
values = self._lut[-1][distance]
else:
values = self._lut[None][distance]
return values[np.isfinite(values)]
def norm_box_function(x, half_width):
"""\
A normilised box function on `x` with given `half_width`.
"""
mask = (-half_width <= x) & (x <= half_width)
box = np.zeros_like(x)
box[mask] = 1.0
box /= box.sum()
return box
def instrument_function(y, distance, exposure_time):
"""\
Compute instrument function W(y) for given `distance`
and `exposure_time`.
Parameters
''''''''''
y : array_like (or None)
coordinates with uniform spacing dy; if y is None
appropiate range will be estimated
distance : float
distance between instrument and ground
exposure_time : float
instrument exposure time (e.g. OMI 2.0 seconds)
Returns
'''''''
y : np.ndarray
along-track coordinates
W : np.ndarray
instrument function W(y)
"""
# instantaneous FWHM and scaling coefficient of instrument function
iFWHM = 2.0 * distance * np.tan( np.deg2rad(0.5) )
c = -np.log(0.5) / (0.5 * iFWHM)**4
# estimate suitable range for y, if necessary
if y is None:
iFWHM = 2.0 * distance * np.tan( np.deg2rad(0.5) )
y_width = iFWHM + GROUND_SPEED * exposure_time
y = np.linspace(-y_width, y_width, 5001)
# convolute slit function with box function
dy = y[1] - y[0]
box = norm_box_function(y, 0.5 * GROUND_SPEED * exposure_time)
W = np.convolve( np.exp(-c*y**4), box, 'same')
W /= (W * dy).sum()
return y, W
def create_knot_set(center, dy, y_min, y_max):
"""\
Compute location of knots with respect to the center
of the current pixel:
[..., -0.5 * dy[center], +0.5 * dy[center], ...]
and limited by `y_min` and `y_max`.
"""
yp = [ -0.5 * dy[center], 0.5 * dy[center] ]
indices = [center, center+1]
for i in range(center+1, dy.size):
if yp[-1] >= y_max:
break
yp.append(yp[-1] + dy[i])
indices.append(indices[-1] + 1)
for i in range(center-1, -1, -1):
if yp[0] <= y_min:
break
yp.insert(0, yp[0] - dy[i])
indices.insert(0, indices[0] - 1)
return np.asarray(yp), indices
def compute_fov_limits(y, W, area_under_curve):
"""\
Compute lower and upper boundary of instrument function based on
'area under curve'.
"""
y_min = y[W.cumsum() >= 0.5 * (1.0 - area_under_curve) ][0]
y_max = -y_min
return y_min, -y_min
def look_up_M_matrix_row(row, dy, distance, lut):
"""\
Get row entries in measurement matrix M from LUT and compute
coverage for neighbouring tiled pixels by instrument
function.
Parameter
---------
row : integer
index of current row
dy : array_like, shape(M,)
pixel size
distance : float or int
distance between spacecraft and ground pixel
lut : psm.MMAatrixLUT
look-up table for row entries
Returns
-------
values : array_like
entries for M matrix in row
columns : array_like
column indices for `values` in row
coverage : dictionary
coverage of neighbouring pixels by
instrument function
"""
y_max = lut.get_limit(distance)
y_min = -y_max
yp, indices = create_knot_set(row, dy, y_min, y_max)
values = lut.get_values(distance, row, dy.size)
columns = np.arange(2*indices[0], 2*indices[0] + values.size)
coverage = compute_overlap(yp, y_min, y_max, indices)
return values, columns, coverage
def compute_M_matrix_row(row, dy, distance, exposure_time, area_under_curve):
"""\
Compute row entries of measurement matrix M by
numerical integrations.
Parameter
---------
row : integer
index of current row
dy : array_like, shape(M,)
pixel size
distance : number
distance between spacecraft and ground pixel
exposure_time : number
instrument exposure time (e.g. OMI 2.0 seconds)
area_under_curve : number
fraction of area under curve to be used,
e.g. 0.75 or 0.99
Returns
-------
values : array_like
entries for M matrix in row
columns : array_like
column indices for `values` in row
coverage : dictionary
coverage of neighbouring pixels by
instrument function
"""
y, W = instrument_function(None, distance, exposure_time)
y_min, y_max = compute_fov_limits(y, W, area_under_curve)
yp, indices = create_knot_set(row, dy, y_min, y_max)
values = np.zeros(2*yp.size-1)
columns = np.arange(2*indices[0], 2*indices[0] + values.size)
# compute matrix entries
for i, (y0, y1) in enumerate(zip(yp[:-1], yp[1:])):
mask = (y0 < y) & (y < y1)
t = (y[mask] - y0) / (y1 - y0)
# base functions
phi_p0 = (1 - 4 * t + 3 * t**2)
phi_d = ( 6 * t - 6 * t**2)
phi_p1 = (-2 * t + 3 * t**2)
# integration
for j, phi in enumerate([phi_p0, phi_d, phi_p1]):
values[2*i+j] += scipy.integrate.simps(W[mask] * phi, y[mask])
values / values.sum()
coverage = compute_overlap(yp, y_min, y_max, indices)
return values, columns, coverage
def compute_overlap(yp, y_min, y_max, indices):
"""\
Compute coverage of each tiled pixels if instrument
function lies between `y_min` and `y_max`.
Parameter
---------
yp : array_like, shape(M+1,)
location of knots on lattice
y_min, y_max : number
minimum and maximum location of instrument
function in along-track direction
indices : list of integer
along-track position of tiled pixels
Returns
-------
A dictionary which gives the coverage from
right (alpha, full coverage 0) and left (beta,
full coverage 1).
"""
# compute boundaries on each interval (full coverage: [0,1])
alpha = np.zeros(yp.size - 1)
beta = np.ones(yp.size - 1)
alpha[0] = (y_min - yp[0]) / (yp[1] - yp[0])
beta[0] = 0.0
alpha[-1] = 1.0
beta[-1] = (y_max - yp[-2]) / (yp[-1] - yp[-2])
# coverage of W(y) over knot set
coverage = {}
for i in range(yp.size-1):
coverage[indices[i]] = [alpha[i], beta[i]]
return coverage
def update_coverage(total, new):
"""\
Update dictionary which describes the coverage of each
tiled pixel by the instrument functions of valid
measurements.
"""
for key in new:
if key in total:
if new[key][0] < total[key][0]: # alpha: optimal 0
total[key][0] = new[key][0]
if new[key][1] > total[key][1]: # beta: optimal 1
total[key][1] = new[key][1]
else:
total[key] = new[key]
return total
def M_matrix(values, stddev, dy, distances, exposure_time, missing_values,
area_under_curve=None, lut=None):
"""\
Compute measurement matrix M (normalised with standard deviation) and
the coverage by non-missing values over each interval.
Parameter
---------
values : array_like, shape (M,)
measurement values
stddev : array_like, shape (M,)
standard deviation of measurments
dy : array_like, shape(M,) or shape(,)
length of intervals in along-track direction [y_j, y_{j+1}]
distances : array_like, shape(M,) or shape(,)
distances between instrument and ground pixel
exposure_time : number
instrument exposure time (e.g. OMI 2.0 seconds)
missing_values : array_like, bool, shape(M,N) or None
Missing measurement values. Missing meausrements will not
be included in the coverage dictionary (see returns).
If `None` no missing values.
area_under_curve : float or None
fraction of area under instrument function (e.g. 0.75 or 0.99)
lut : omi.psm.MMatrixLUT object
Look-up table for entries of matrix M (kappa)
Returns
-------
M : sparse.csr_matrix
measurement matrix normalised with standard deviation
coverage : dictionary
coverage of tiled pixels by instrument functions
of non-missing measurements
"""
coverage = {}
if dy.size == 1:
dy = dy.repeat(values.size)
if distances.size == 1:
distances = distances.repeat(values.size)
# create CSR matrix
data = []
indices = []
indptr = [0]
for row in range(values.size):
if np.isfinite(values[row]):
if lut is None:
# numerical integration (simpson)
row_entries, columns, m_coverage = compute_M_matrix_row(row, dy,
distances[row], exposure_time, area_under_curve
)
else:
limits = lut._limits.keys()
if min(limits) <= distances[row] <= max(limits):
logging.debug('Use LUT for distance %d.' % distances[row])
row_entries, columns, m_coverage = look_up_M_matrix_row(row, dy,
distances[row], lut
)
else:
logging.debug('Distance %d not LUT, compute M matrix entries.'
% distances[row])
# numerical integration (simpson)
row_entries, columns, m_coverage = compute_M_matrix_row(row, dy,
distances[row], exposure_time, area_under_curve
)
# add only valid (i.e. measured) values to coverage
if not missing_values[row]:
coverage = update_coverage(coverage, m_coverage)
data.append(row_entries / stddev[row])
indices.append(columns)
indptr.append(indptr[-1] + row_entries.size)
M = sparse.csr_matrix((
np.concatenate(data),
np.concatenate(indices),
np.asarray(indptr)
), shape=(len(indptr)-1, 2*values.size+1))
return M, coverage
def C1_matrix(h):
"""\
Create C1 continity matrix for x = [p0, d0, p1, ...] with
natural boundary condition. The pixel size is `h`.
"""
a = 1.0 / np.asarray(h)
shape = (a.size+1 , 2*a.size+1)
C = sparse.lil_matrix(shape)
C[0,0:3] = [-4.0, +6.0, -2.0]
for row in range(1, a.size):
col = 2 * row
C[row, col-2] = a[row-1]
C[row, col-1] = -3.0 * a[row-1]
C[row, col ] = 2.0 * (a[row-1] + a[row])
C[row, col+1] = -3.0 * a[row]
C[row, col+2] = a[row]
C[a.size, -3:] = [2.0, -6.0, 4.0]
b = np.zeros(a.size+1)
return C.tocsr(), b
def L2_matrix(m):
"""\
Create second-order difference matrix L2
(defined on mean values d_j only)
"""
L2 = sparse.lil_matrix((m-2, 2*m+1))
for row in range(m-2):
col = 2 * (row + 1) + 1
L2[row, col-2] = 1.0
L2[row, col] = -2.0
L2[row, col+2] = 1.0
return L2 / 3.0
def B_inv_matrix(stddev, rho_est):
"""\
Create inverse of diagonal matrix B.
"""
B_inv = 1.0 / (rho_est * stddev[1:-1].copy())
B_inv = sparse.dia_matrix((B_inv, 0), shape=(B_inv.size, B_inv.size))
return B_inv
def penalty_term(stddev, gamma, rho_est):
"""\
Compute penalty term: gamma * L2.T * B * L2
"""
B_inv = B_inv_matrix(stddev, rho_est)
L2 = L2_matrix(stddev.size)
return gamma * L2.T * B_inv * L2
def solve_qpp(H, C, g, b=None):
"""\
Solve quadratic programming problem.
Parameter
---------
H : sparse matrix
Hessian matrix
C : sparse matrix
C1 continuity matrix
g : vector
g = 2y.T S.I M
Returns
-------
parameter vector x of size 2m+1
"""
m,n = C.shape
K = sparse.vstack([
sparse.hstack([H, C.T]),
sparse.hstack([C, sparse.csr_matrix((m,m))])
]).tocsc()
if b is None:
b = np.zeros(m)
b = np.concatenate([-g, b])
res = s_linalg.spsolve(K, b)
return res[:n]
def across_track(d, h):
"""\
Compute parameter vector `p` of tiled parabolic C1
histospline:
p = C.I * b
Parameters
''''''''''
d : array_like, shape(N,)
mean value over each interval
h : array_like, shape(N,)
size of each interval [x_i, x_i+1]
Returns
'''''''
p : np.ndarray, shape(N,)
parameter vector of coefficients `p`, i.e. values of
spline at each knot `x_i`
"""
a = 1.0 / np.asarray(h)
C = sparse.lil_matrix((a.size+1, a.size+1))
b = np.zeros(a.size+1)
# natural boundary condition (left)
C[0,0:2] = [-4.0, -2.0]
b[0] = -6.0 * d[0]
# C1 continuity
for i in range(1, a.size):
C[i, i-1] = a[i-1]
C[i, i ] = 2.0 * (a[i-1] + a[i])
C[i, i+1] = a[i]
b[i] = 3.0 * (d[i-1] * a[i-1] + d[i] * a[i])
# natural boundary condition (right)
C[a.size, -2:] = [2.0, 4.0]
b[a.size] = +6.0 * d[a.size - 1]
# solve using sparse LU decomposition
p = s_linalg.dsolve.factorized( C.tocsc() )(b)
return p
def along_track(values, stddev, dy, gamma, distances, exposure_time=2.0,
auc=0.99, missing_values=None, lut=None, rho_est=None):
"""\
Compute vector of coefficients `x` of smoothing spline.
Parameter
---------
values : array_like, shape (N,)
measurement values.
stddev : array_like, shape (N,) or shape(,)
errors of measurments.
dy : array_like, shape(N,) or shape(,)
length of intervals [y_i, y_{i+1}]
gamma : float
weighting factor
distances : array_like, shape(N,) or shape(,)
distances between instrument and ground pixel
exposure_time : float
instrument's exposure time (default 2.0 for OMI)
auc : float
area under curve (default 0.99)
valid_rho : array_like, bool, shape(N,) or None
Valid measurements are True. Non valid meausrements will not be
included in the coverage dictionary (see returns). Default all
are True, i.e. all values are used.
Returns
------
x : ndarray, shape (2M+1,)
optimal spline parameters [p0, d0, p1, ..., p_N+1]
alpha, beta : ndarrays, shape(N,)
As weighting functions are overlapping in neighbouring knot intervals,
the coefficients α and β are used to describe the coverage
of each knot interval using the unit interval.
β describes, how far the interval is covered from 0 (lower boundary)
into the interval where β=0 is no coverage and β=1 is full coverage.
A β>1 may occur at the boundary of the knot set, if the weighting
function is reaching over the largest knot.
α describes how far the interval is covered from 1 (upper boundary)
into the interval where α=1 is no coverage and α=0 is full coverage.
Similar α<0 may occur on the lower boundary of the knot set.
An interval is fully covered if β>=α (see the following example):
|1-α|
,---------,
|______.______.__|
0 α β 1
`-------------'
|β|
and misses some areas if α>β (indicated by `xxxx`):
xxxx
|_____.____._____|
0 β α 1
`-----' `-----'
|β| |1-α|
α and β can be used to draw parts of the surface spline for missing
or invalid pixels, which are partially covered neighbouring pixels.
"""
values = np.asarray(values)
stddev = np.asarray(stddev)
dy = np.asarray(dy)
distances = np.asarray(distances)
if rho_est is None:
rho_est = 1.0
if missing_values is None:
missing_values = np.zeros(values.shape, bool)
if stddev.size == 1:
stddev = stddev.repeat(values.size)
#mask = np.isfinite(values)
# measurment matrix
M, coverage = M_matrix(values, stddev, dy, distances, exposure_time,
missing_values, area_under_curve=auc, lut=lut
)
d = np.matrix( values / stddev ).T
# penalty term
P = penalty_term(stddev, gamma, rho_est)
# objective function
H = 2.0 * (M.T * M + P)
g = -2.0 * np.array(d.T * M).flatten()
k = float(d.T * d)
# C1 constraint
C, b = C1_matrix(dy)
# inversion of QPP
x = solve_qpp(H, C, g, b)
# compute coverage coefficents alpha and beta
alpha = np.ones_like(values)
beta = np.zeros_like(values)
for i in coverage:
alpha[i] = coverage[i][0]
beta[i] = coverage[i][1]
return x, alpha, beta
def parabolic_spline_algorithm(values, stddev, dx, dy, gamma, rho_est,
distances, exposure_time=2.0, missing_values=None, area_under_curve=0.99,
lut=None):
"""\
Compute coefficients of parabolic surface spline for a function
rho(x,y) of an OMI-like orbit. The surface spline is defined on
a lattice with M rows in along-track direction and N columns in
across-track direction.
Parameter
---------
values : array_like, shape (M,N)
measurement values
stddev : array_like, shape (M,N) or shape(,)
standard deviation of measurments
dx : array_like, shape(M,N) or shape(,)
length of intervals in across-track direction [x_i, x_{i+1}]
dy : array_like, shape(N,) or shape(,)
length of intervals in along-track direction [y_j, y_{j+1}]
gamma : array_like, shape (N,) or shape (,)
smoothing parameter
rho_est : float
estimate of maximum value (for scaling)
distances : array_like, shape(M,N) or shape(,)
distances between instrument and ground pixel
exposure_time : float
instrument's exposure time (default 2.0 for OMI)
missing_values : array_like, bool, shape(M,N) or None
Missing measurement values. Missing meausrements will not
be included in the coverage dictionary (see returns).
If `None` no missing values.
area_under_curve : float
area under curve (default 0.99)
lut : omi.psm.MMatrixLUT object
Look-up table for entries of matrix M (kappa)
Returns
-------
p : array_like, shape(M+1,N+1)
values of spline on lattice knots
d : array_like, shape(M,N)
mean values of each lattice cell
qx : array_like, shape(M+1,N)
line integrals of spline on [x_i, x_{i+1}]
qy : array_like, shape(M,N+1)
line integrals of spline on [y_j, y_{j+1}]
alpha : array_like, shape(M,N)
defines coverage of lattice cells in along-track direction
(see omi.psm.along_track for details)
beta : array_like, shape(M,N)
defines coverage of lattice cells in along-track direction
(see omi.psm.along_track for details)
"""
# scale values to avoid large condition numbers
values /= rho_est
stddev /= rho_est
m,n = values.shape
d = np.empty((m,n))
qx = np.empty((m+1,n))
qy = np.empty((m,n+1))
p = np.empty((m+1,n+1))
alpha = np.ones_like(values)
beta = np.zeros_like(values)
gamma = np.asarray(gamma)
if gamma.size == 1:
gamma = gamma.repeat(n)
distances = np.asarray(distances)
if distances.size == 1:
distances = distances * np.ones_like(values)
stddev = np.asarray(stddev)
if stddev.size == 1:
stddev = stddev * np.ones_like(values)
if missing_values is None:
missing_values = np.zeros(values.shape, bool)
# compute d and qx
for i in range(n):
x, alpha[:,i], beta[:,i] = along_track(values[:,i], stddev[:,i], dy[:,i], gamma[i],
distances[:,i], exposure_time=exposure_time, missing_values=missing_values[:,i],
auc=area_under_curve, lut=lut, rho_est=1.0
)
qx[:,i] = x[::2].copy()
d[:,i] = x[1::2].copy()
# compute qy
for j in range(m):
qy[j,:] = across_track(d[j,:], dx[j,:])
# compute p_ij
p[0,:] = across_track(qx[0,:], dx[0,:])
p[-1,:] = across_track(qx[-1,:], dx[-1,:])
for j in range(1,m):
p[j,:] = across_track(qx[j,:], 0.5 * (dx[j-1,:] + dx[j,:]) )
# scale coefficients up
p *= rho_est
d *= rho_est
qx *= rho_est
qy *= rho_est
values *= rho_est
stddev *= rho_est
return p, d, qx, qy, alpha, beta
if __name__ == '__main__':
pass
| gkuhl/omi | omi/psm.py | Python | gpl-3.0 | 23,137 |
import numpy as np
import sys
from scipy.stats import spearmanr
def read_final(path):
models = []
headings = []
with open(path,'r') as f:
header = f.readline()
tokens = header.replace("\n","").split(",")
headings = tokens[2:]
for i in range(0,len(tokens)-2):
models.append([])
for line in f.readlines():
if "----" in line:
break
tokens = line.replace("\n","").split(",")
for i in range(0,len(tokens)-2):
models[i].append(float(tokens[i+2]))
return models, headings
'''
M1, M2 : the lists of scores for the two models to be compared,
H : the list of human judgements
it : number of iterations
Returns 1-tailed and 2-tailed p-scores
'''
def perm_test(M1,M2,H, it=500):
l = len(M1)
orig_dif = spearmanr(H,M1)[0] - spearmanr(H,M2)[0]
perm_list = []
pdif_list = []
for i in range(it):
perm = np.random.binomial(1,0.5,l).tolist()
while perm in perm_list:
perm = np.random.binomial(1,0.5,l).tolist()
perm_list.append(perm)
M1_perm = [M2[i] if perm[i] else M1[i] for i in range(l)]
M2_perm = [M1[i] if perm[i] else M2[i] for i in range(l)]
perm_dif = spearmanr(H,M1_perm)[0] - spearmanr(H,M2_perm)[0]
pdif_list.append(perm_dif)
pd_arr = np.array(pdif_list)
p1 = np.mean(pd_arr >= orig_dif) # 1-tailed p-score
p2 = np.mean(np.abs(pd_arr) >= abs(orig_dif)) # 2-tailed p-score
return (p1,p2)
if __name__ == "__main__":
models, headings = read_final(sys.argv[1])
sys.stdout.write("_,")
for i in range(0,len(headings)):
sys.stdout.write(headings[i] + "-p1," + headings[i] + "-p2,")
print("")
for i in range(0,len(models)):
sys.stdout.write(headings[i]+",")
for j in range(0, len(models)):
M1 = models[i]
M2 = models[j]
H = models[-1]
p1,p2 = perm_test(M1,M2,H)
sys.stdout.write(str(p1) + "," + str(p2) + ",")
print("")
print("")
| QMUL/wacky | python/final_stats.py | Python | gpl-3.0 | 1,977 |
# Copyright (C) 2011, The SAO/NASA Astrophysics Data System
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
@author: Giovanni Di Milia and Benoit Thiell
Global checks on the entire record.
'''
import logging
from invenio import bibrecord
from merger_settings import FIELD_TO_MARC, \
SYSTEM_NUMBER_SUBFIELD, PUBL_DATE_SUBFIELD, \
PUBL_DATE_TYPE_SUBFIELD, PUBL_DATE_TYPE_VAL_SUBFIELD,\
AUTHOR_NAME_SUBFIELD
from pipeline_log_functions import manage_check_error
import pipeline_settings
logger = logging.getLogger(pipeline_settings.LOGGING_WORKER_NAME)
def check_pub_year_consistency(merged_record, type_check):
"""Function that checks if the publication year is consistent
with the year at the beginning of the bibcode"""
logger.info(' running check_pub_year_consistency')
#definition of the list of dates I don't want to check with this function
dates_to_skip_from_check = ['date-preprint']
try:
system_number_fields = merged_record[FIELD_TO_MARC['system number']]
except KeyError:
manage_check_error('No System Number field!', type_check, logger)
return None
try:
pub_dates_fields = merged_record[FIELD_TO_MARC['publication date']]
except KeyError:
manage_check_error('No Publication Date field!', type_check, logger)
return None
#the system number field should e unique, so if there are more than 1 fields, I have a problem (and I cannot proceed)
if len(system_number_fields) > 1:
manage_check_error('There are more than one System Numbers!', type_check, logger)
return None
system_number = bibrecord.field_get_subfield_values(system_number_fields[0], SYSTEM_NUMBER_SUBFIELD)[0]
num_dates_checked = 0
for date_type_string in PUBL_DATE_TYPE_VAL_SUBFIELD:
#I don't want to check the preprint date
if date_type_string in dates_to_skip_from_check:
continue
#then I have to extract the right date (there can be different in the same field)
pubdate = ''
for field in pub_dates_fields:
if bibrecord.field_get_subfield_values(field, PUBL_DATE_TYPE_SUBFIELD)[0] == date_type_string:
pubdate = bibrecord.field_get_subfield_values(field, PUBL_DATE_SUBFIELD)[0]
break
if len(pubdate) != 0:
num_dates_checked +=1
else:
continue
#final part of the check
if pubdate[0:4] != system_number[0:4]:
manage_check_error('Year of "%s" not consistent with the main bibcode "%s"!' % (date_type_string, system_number), type_check, logger)
if num_dates_checked == 0:
manage_check_error('No dates available for this record!', type_check, logger)
return None
def first_author_bibcode_consistency(merged_record, type_check):
"""Function that checks if the last letter of the main bibcode
is consistent with the first letter of the first author"""
logger.info(' running first_author_bibcode_consistency')
bibstems_to_skip_from_check = ['QB']
try:
system_number_fields = merged_record[FIELD_TO_MARC['system number']]
except KeyError:
manage_check_error('No System Number field!', type_check, logger)
return None
try:
first_author_fields = merged_record[FIELD_TO_MARC['first author']]
except KeyError:
manage_check_error('No First Author field!', type_check, logger)
return None
#the system number field should e unique, so if there are more than 1 fields, I have a problem (and I cannot proceed)
if len(system_number_fields) > 1:
manage_check_error('There are more than one System Numbers!', type_check, logger)
return None
#the first author field should e unique, so if there are more than 1 fields, I have a problem (and I cannot proceed)
if len(first_author_fields) > 1:
manage_check_error('There are more than one First Author!', type_check, logger)
return None
system_number = bibrecord.field_get_subfield_values(system_number_fields[0], SYSTEM_NUMBER_SUBFIELD)[0]
first_author = bibrecord.field_get_subfield_values(first_author_fields[0], AUTHOR_NAME_SUBFIELD)[0]
#If the bibcode has a bibstem to skip, I don't do anything
for elem in bibstems_to_skip_from_check:
if system_number[4:4+len(elem)] == elem:
return None
if first_author[0].lower() != system_number[-1].lower():
#if the last letter of the system number is a dot, then I want to give a different message
if system_number[-1] == '.':
manage_check_error('The main bibcode "%s" doesn\'t have an initial even if there is a First Author "%s"!' % (system_number, first_author), type_check, logger)
else:
manage_check_error('First Author "%s" not consistent with the main bibcode "%s"!' % (first_author, system_number), type_check, logger)
return None
def check_collections_existence(merged_record, type_check):
"""Function that checks if there is at least one collection"""
logger.info(' running check_collections_existence')
try:
collections_fields = merged_record[FIELD_TO_MARC['collection']]
except KeyError:
manage_check_error('No Collection field!', type_check, logger)
return None
if len(collections_fields) == 0:
manage_check_error('No Collection field!', type_check, logger)
return None
| adsabs/ADS_records_merger | merger/global_merging_checks.py | Python | gpl-3.0 | 6,091 |
"""This module's scope covers the operations related to metadata.
"""
__version__ = "0.1.1"
__changelog = {
"0.1.1": {"Tuna": "0.16.5", "Change": "PEP8 and PEP257 compliance."},
"0.1.0": {"Tuna": "0.14.0", "Change": "updated docstrings to new style."},
}
import logging
import re
import sys
class MetadataParser(object):
"""Responsible for translating metadata from ADHOC's ADT to Tuna's internal
representation.
Its constructor signature is:
Parameters:
* file_name : string : defaults to None
Full or relative path and file name for an ADT file.
"""
def __init__(self, file_name = None):
super(MetadataParser, self).__init__()
self.log = logging.getLogger(__name__)
self.__file_name = file_name
self.__results = {}
if self.__file_name != None:
self.run()
def get_metadata(self):
"""Access the parsed metadata.
Returns:
* self.__results : dictionary
Contains the metadata obtained from reading the input file.
"""
return self.__results
def run(self):
"""Verify file format and attempts to parse the metadata accordingly.
"""
self.log.debug("%s %s" % (sys._getframe().f_code.co_name,
sys._getframe().f_code.co_varnames))
if self.__file_name != None:
if (self.__file_name.startswith(".ADT", -4) or
self.__file_name.startswith(".adt", -4)):
self.read_adt_metadata()
else:
self.log("File name %s does not have .ADT or .adt suffix, " \
+ "aborting." % ( self.__file_name ) )
def get_metadata(file_name = None):
"""Conveniently return the metadata, given a file name.
Parameters:
* file_name : string
Containing a valid file name (and optionally, an absolute or relative
path).
Returns:
* parser.get_metadata ( ) : dictionary
Contains the metadata obtained from reading file_name.
"""
log = logging.getLogger(__name__)
log.debug("%s %s" % (sys._getframe().f_code.co_name,
sys._getframe().f_code.co_varnames))
if file_name:
parser = metadata_parser(file_name = file_name)
return parser.get_metadata()
| rcbrgs/tuna | tuna/io/metadata_parser.py | Python | gpl-3.0 | 2,321 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Josef Gajdusek <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
This decoder decodes demodulated data stream used by the Qi standard
for communication from the receiver to the charging station.
'''
from .pd import Decoder
| atalax/libsigrokdecode | decoders/qi/__init__.py | Python | gpl-3.0 | 994 |
#!/usr/bin/env python
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modifications by Merlin Schumacher ([email protected]) for c't magazin für computer technik
import argparse
import os.path
import json
from time import sleep
# Import der für den Google Assistant notwendigen Module
import google.oauth2.credentials
from google.assistant.library import Assistant
from google.assistant.library.event import EventType
from google.assistant.library.file_helpers import existing_file
# Import der für den Google Text-To-Speech Service notwendigen Module. Dadurch kann man eigene Sprachantworten ausgeben
from gtts import gTTS
from subprocess import call
# Import der Module für die Ansteuerung der GPIO-Pins der Raspberry Pi
import RPi.GPIO as GPIO
# Die Nummerierung folgt dem BCM-System.
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# led_pin legt den Pin einer am Raspi angeschlossenen LED fest. Sie kann per Sprachkommando ein und ausgeschaltet werden.
led_pin = 4
# button_pin legt den Pin eines am Raspi angeschlossenen Buttons fest.
button_pin = 14
# status_led_pin legt den Pin einer am Raspi angeschlossenen LED fest. Sie leuchtet, wenn der Assistant zuhört.
status_led_pin = 15
GPIO.setup(led_pin, GPIO.OUT)
GPIO.setup(status_led_pin, GPIO.OUT)
GPIO.setup(button_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.output(led_pin, False)
GPIO.output(status_led_pin, False)
# Muted enthält die Information ob der Assistant zuhört oder nicht.
muted = False
# Threading für das Warten auf einen Knopfdruck
import threading
# Die Funktion mute läuft als Thread und schaltet bei einem Knopfdruck den Assistant stumm.
def mute(assistant, toggle=True, value=True):
global muted
if (toggle):
print("Muting")
muted = not muted
assistant.set_mic_mute(muted)
else:
print("Mute wird auf %s gesetzt" % value)
def listen(assistant):
while True:
GPIO.wait_for_edge(button_pin, GPIO.RISING)
sleep(.5)
print("Trigger button gedrückt")
mute(assistant)
assistant.start_conversation()
# speak_tts erzeugt aus einem übergebenen Text mittel Googles TTS-Dienst eine
# MP3-Datei. Diese wird von sox abgespielt.
# Optional kann eine Sprache angegeben werden.
def speak_tts(ttstext, language="en-us"):
tts = gTTS(text=ttstext, lang=language)
tts.save("answer.mp3")
call(["mpg123", "answer.mp3"])
# turn_on_led schaltet die LED an.
def turn_on_led():
print("LED an")
speak_tts("Turning LED on.")
GPIO.output(led_pin, True)
# turn_on_led schaltet die LED ab.
def turn_off_led():
print("LED aus")
speak_tts("Turning LED off.")
GPIO.output(led_pin, False)
## process_event verarbeitet die von der Google-Assistant-Instanz zurückgegebenen Events.
def process_event(event, assistant):
global muted
# # Um alle Eventtypen zu sehen kommentieren Sie die nachfolgende Zeile ein.
#print(event.type)
#
# # Wurde das Hotword erkannt, beginnt Google mit der Aufzeichnung und Erkennung des Textes
if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
call(["mpg123", "ding.mp3"])
print("Bitte sprechen Sie jetzt.")
#
# # Nach dem Ende der Spracheingabe verarbeitet der Assistant den Text.
if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
command = event.args['text']
print("Erkannter Text:"+command)
print("Antworte")
# Falls der erkannte Text einem der lokalen Befehle entspricht, wird der Dialog mit dem Assistant abgebrochen
# und die zugehörige lokale Funktion ausgeführt.
if command == 'turn LED on':
assistant.stop_conversation()
turn_on_led()
elif command == 'turn LED off':
assistant.stop_conversation()
turn_off_led()
# Nach dem Ende der Konversation wartet Google wieder auf das Hotword. Ist das Argument 'with_follow_on_turn' wahr,
# ist der Dialog noch nicht beendet und Google wartet auf weitere Anweisungen vom Nutzer.
if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and
event.args and not event.args['with_follow_on_turn']):
muted = False
mute(assistant)
# Falls der Assistant ein Mute-Event auslöst, wird ein Hinweis ausgegeben
# und die LED entspreched umgeschaltet.
if (event.type == EventType.ON_MUTED_CHANGED):
muted = bool(event.args['is_muted'])
print("Assistant hört zu: " + str(not muted))
GPIO.output(status_led_pin, not muted)
def main():
# Mittels des Parameters --crendentials kann eine eigene Credentials-Datei angegeben werden.
# Wurde keine angegeben, greift das Programm auf die Datei ~/.config/google-oauthlib-tool/credentials.json zurück.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--credentials', type=existing_file,
metavar='OAUTH2_CREDENTIALS_FILE',
default=os.path.join(
os.path.expanduser('~/.config'),
'google-oauthlib-tool',
'credentials.json'
),
help='Path to store and read OAuth2 credentials')
args = parser.parse_args()
# Die für die Anmeldung an Googles API notwendigen Credentials werden geladen.
with open(args.credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
# Die Instanz des Google Assistant in assistant beginnt Ereignisse auszulösen, diese werden mittels process_event
# verarbeitet.
with Assistant(credentials) as assistant:
# Started den Thread, der auf den Mute-Button reagiert
button_thread = threading.Thread(target=listen, args=(assistant,))
button_thread.start()
print("Warte auf Hotword")
eventlist = assistant.start()
mute(assistant)
for event in eventlist:
process_event(event, assistant)
if __name__ == '__main__':
main()
| merlinschumacher/ct-google-assistant-sdk | assistant_button.py | Python | gpl-3.0 | 6,753 |
#!/usr/bin/env python
places = {
'Shire': {'leave': 'Bree', 'stay': 'DEATH'},
'DEATH': {},
'Bree': {'with Strider': 'Rivendell', 'alone': 'DEATH'},
'Rivendell': {'over mountains': 'DEATH', 'through Moria': 'Lorien'},
'Lorien': {'down Anduin': 'Falls of Rauros'},
'Falls of Rauros': {'down Anduin': 'Minas Tirith',
'east': 'Ithilien'},
'Ithilien': {'south': 'Black Gate'},
'Black Gate': {'in': 'DEATH', 'follow Gollum': 'Minas Morgul'},
'Minas Morgul': {'road': 'DEATH', 'tunnel': 'Mordor'},
'Mordor': {'eagles': 'Minas Tirith'},
'Minas Tirith': {'return home': 'Shire (tired)'},
'Shire (tired)': {'stay': 'Shire (tired)', 'retire': 'the West'},
'the West': {}
}
place = 'Shire'
while True:
print('You are in: {0}. Possible actions:'.format(place))
# Print actions with numbers, so user only has to type a number
actions = sorted(places[place].keys()) + ['EXIT GAME']
for i, action in enumerate(actions):
print(' (%s) %s' % (i, action))
choice = input('Your action? ')
if choice.isdigit() and 0 <= int(choice) < len(actions):
action = actions[int(choice)]
if action == 'EXIT GAME':
break
place = places[place][action]
| veltzer/demos-python | src/exercises/advanced/advanced_dict_usage/solution1.py | Python | gpl-3.0 | 1,263 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2016/08/23 Version 34 - parameters24 input file needed
# 2017/10/27 Version 39 - Reformatted PEP8 Code
# 2017/11/05 Version 40 - Corrections to tdifmin, tstda calculations
# 2019/10/15 Version pympa - xcorr substitued with correlate_template from obspy
# First Version August 2014 - Last October 2017 (author: Alessandro Vuan)
# Code for the detection of microseismicity based on cross correlation
# of template events. The code exploits multiple cores to speed up time
#
# Method's references:
# The code is developed and maintained at
# Istituto Nazionale di Oceanografia e Geofisica di Trieste (OGS)
# and was inspired by collaborating with Aitaro Kato and collegues at ERI.
# Kato A, Obara K, Igarashi T, Tsuruoka H, Nakagawa S, Hirata N (2012)
# Propagation of slow slip leading up to the 2011 Mw 9.0 Tohoku-Oki
# earthquake. Science doi:10.1126/science.1215141
#
# For questions comments and suggestions please send an email to [email protected]
# The kernel function xcorr used from Austin Holland is modified in pympa
# Recommended the use of Obspy v. 1.2.0 with the substitution of xcorr function with
# correlate_template
# Software Requirements: the following dependencies are needed (check import
# and from statements below)
# Python "obspy" package installed via Anaconda with all numpy and scipy
# packages
# Python "math" libraries
# Python "bottleneck" utilities to speed up numpy array operations
#
# import useful libraries
import os
import os.path
import datetime
from math import log10
from time import perf_counter
import bottleneck as bn
import numpy as np
import pandas as pd
from obspy import read, Stream, Trace
from obspy.core import UTCDateTime
from obspy.core.event import read_events
from obspy.io.zmap.core import _is_zmap
from obspy.io.quakeml.core import _is_quakeml
from obspy.signal.trigger import coincidence_trigger
from obspy.signal.cross_correlation import correlate_template
# LIST OF USEFUL FUNCTIONS
def listdays(year, month, day, period):
# create a list of days for scanning by templates
datelist = pd.date_range(
datetime.datetime(year, month, day), periods=period
).tolist()
a = list(map(pd.Timestamp.to_pydatetime, datelist))
days = []
for i in a:
days.append(i.strftime("%y%m%d"))
return days
def read_parameters(par):
# read 'parameters24' file to setup useful variables
with open(par) as fp:
data = fp.read().splitlines()
stations = data[23].split(" ")
print(stations)
channels = data[24].split(" ")
print(channels)
networks = data[25].split(" ")
print(networks)
lowpassf = float(data[26])
highpassf = float(data[27])
sample_tol = int(data[28])
cc_threshold = float(data[29])
nch_min = int(data[30])
temp_length = float(data[31])
utc_prec = int(data[32])
cont_dir = "./" + data[33] + "/"
temp_dir = "./" + data[34] + "/"
travel_dir = "./" + data[35] + "/"
dateperiod = data[36].split(" ")
ev_catalog = str(data[37])
start_itemp = int(data[38])
print("starting template = ", start_itemp)
stop_itemp = int(data[39])
print("ending template = ", stop_itemp)
factor_thre = int(data[40])
stdup = float(data[41])
stddown = float(data[42])
chan_max = int(data[43])
nchunk = int(data[44])
return (
stations,
channels,
networks,
lowpassf,
highpassf,
sample_tol,
cc_threshold,
nch_min,
temp_length,
utc_prec,
cont_dir,
temp_dir,
travel_dir,
dateperiod,
ev_catalog,
start_itemp,
stop_itemp,
factor_thre,
stdup,
stddown,
chan_max,
nchunk,
)
def trim_fill(tc, t1, t2):
tc.trim(starttime=t1, endtime=t2, pad=True, fill_value=0)
return tc
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def process_input(itemp, nn, ss, ich, stream_df):
st_cft = Stream()
# itemp = template number, nn = network code, ss = station code,
# ich = channel code, stream_df = Stream() object as defined in obspy
# library
temp_file = "%s.%s.%s..%s.mseed" % (str(itemp), nn, ss, ich)
finpt = "%s%s" % (temp_dir, temp_file)
if os.path.isfile(finpt):
try:
tsize = os.path.getsize(finpt)
if tsize > 0:
st_temp = Stream()
st_temp = read(finpt, dtype="float32")
tt = st_temp[0]
# continuous data are stored in stream_df
sc = stream_df.select(station=ss, channel=ich)
if sc.__nonzero__():
tc = sc[0]
fct = correlate_template(
tc.data, tt.data, normalize="full", method="auto"
)
fct = np.nan_to_num(fct)
stats = {
"network": tc.stats.network,
"station": tc.stats.station,
"location": "",
"channel": tc.stats.channel,
"starttime": tc.stats.starttime,
"npts": len(fct),
"sampling_rate": tc.stats.sampling_rate,
"mseed": {"dataquality": "D"},
}
trnew = Trace(data=fct, header=stats)
tc = trnew.copy()
st_cft = Stream(traces=[tc])
# else:
# print("warning no stream is found")
else:
print("warning template event is empty")
except OSError:
pass
return st_cft
def quality_cft(trac):
std_trac = np.nanstd(abs(trac.data))
return std_trac
def stack(stall, df, tstart, npts, stdup, stddown, nch_min):
std_trac = np.empty(len(stall))
td = np.empty(len(stall))
"""
Function to stack traces in a stream with different trace.id and
different starttime but the same number of datapoints.
Returns a trace having as starttime
the earliest startime within the stream
"""
for itr, tr in enumerate(stall):
std_trac[itr] = quality_cft(tr)
avestd = np.nanmean(std_trac[0:])
avestdup = avestd * stdup
avestddw = avestd * stddown
for jtr, tr in enumerate(stall):
if std_trac[jtr] >= avestdup or std_trac[jtr] <= avestddw:
stall.remove(tr)
print("removed Trace n Stream = ...", tr, std_trac[jtr], avestd)
td[jtr] = 99.99
else:
sta = tr.stats.station
chan = tr.stats.channel
net = tr.stats.network
s = "%s.%s.%s" % (net, sta, chan)
td[jtr] = float(d[s])
itr = len(stall)
if itr >= nch_min:
tdifmin = min(td)
tdat = np.nansum([tr.data for tr in stall], axis=0) / itr
sta = "STACK"
cha = "BH"
net = "XX"
header = {
"network": net,
"station": sta,
"channel": cha,
"starttime": tstart,
"sampling_rate": df,
"npts": npts,
}
tt = Trace(data=tdat, header=header)
else:
tdifmin = None
sta = "STACK"
cha = "BH"
net = "XX"
header = {
"network": net,
"station": sta,
"channel": cha,
"starttime": tstart,
"sampling_rate": df,
"npts": npts,
}
tt = Trace(data=np.zeros(npts), header=header)
return tt, tdifmin
def csc(
stall, stcc, trg, tstda, sample_tol, cc_threshold, nch_min, day, itemp, itrig, f1
):
"""
The function check_singlechannelcft compute the maximum CFT's
values at each trigger time and counts the number of channels
having higher cross-correlation
nch, cft_ave, crt are re-evaluated on the basis of
+/- 2 sample approximation. Statistics are written in stat files
"""
# important parameters: a sample_tolerance less than 2 results often
# in wrong magnitudes
sample_tolerance = sample_tol
single_channelcft = cc_threshold
#
trigger_time = trg["time"]
tcft = stcc[0]
t0_tcft = tcft.stats.starttime
trigger_shift = trigger_time.timestamp - t0_tcft.timestamp
trigger_sample = int(round(trigger_shift / tcft.stats.delta))
max_sct = np.empty(len(stall))
max_trg = np.empty(len(stall))
max_ind = np.empty(len(stall))
chan_sct = np.chararray(len(stall), 12)
nch = 0
for icft, tsc in enumerate(stall):
# get cft amplitude value at corresponding trigger and store it in
# check for possible 2 sample shift and eventually change
# trg['cft_peaks']
chan_sct[icft] = (
tsc.stats.network + "." + tsc.stats.station + " " + tsc.stats.channel
)
tmp0 = trigger_sample - sample_tolerance
if tmp0 < 0:
tmp0 = 0
tmp1 = trigger_sample + sample_tolerance + 1
max_sct[icft] = max(tsc.data[tmp0:tmp1])
max_ind[icft] = np.nanargmax(tsc.data[tmp0:tmp1])
max_ind[icft] = sample_tolerance - max_ind[icft]
max_trg[icft] = tsc.data[trigger_sample : trigger_sample + 1]
nch = (max_sct > single_channelcft).sum()
if nch >= nch_min:
nch09 = (max_sct > 0.9).sum()
nch07 = (max_sct > 0.7).sum()
nch05 = (max_sct > 0.5).sum()
nch03 = (max_sct > 0.3).sum()
cft_ave = np.nanmean(max_sct[:])
crt = cft_ave / tstda
cft_ave_trg = np.nanmean(max_trg[:])
crt_trg = cft_ave_trg / tstda
max_sct = max_sct.T
max_trg = max_trg.T
chan_sct = chan_sct.T
for idchan in range(0, len(max_sct)):
str22 = "%s %s %s %s \n" % (
chan_sct[idchan].decode(),
max_trg[idchan],
max_sct[idchan],
max_ind[idchan],
)
f1.write(str22)
else:
nch = 1
cft_ave = 1
crt = 1
cft_ave_trg = 1
crt_trg = 1
nch03 = 1
nch05 = 1
nch07 = 1
nch09 = 1
return nch, cft_ave, crt, cft_ave_trg, crt_trg, nch03, nch05, nch07, nch09
def mag_detect(magt, amaxt, amaxd):
"""
mag_detect(mag_temp,amax_temp,amax_detect)
Returns the magnitude of the new detection by using the template/detection
amplitude trace ratio
and the magnitude of the template event
"""
amaxr = amaxt / amaxd
magd = magt - log10(amaxr)
return magd
def reject_moutliers(data, m=1.0):
nonzeroind = np.nonzero(data)[0]
nzlen = len(nonzeroind)
data = data[nonzeroind]
datamed = np.nanmedian(data)
d = np.abs(data - datamed)
mdev = 2 * np.median(d)
if mdev == 0:
inds = np.arange(nzlen)
data[inds] = datamed
else:
s = d / mdev
inds = np.where(s <= m)
return data[inds]
def mad(dmad):
# calculate daily median absolute deviation
ccm = dmad[dmad != 0]
med_val = np.nanmedian(ccm)
tstda = np.nansum(abs(ccm - med_val) / len(ccm))
return tstda
start_time = perf_counter()
# read 'parameters24' file to setup useful variables
[
stations,
channels,
networks,
lowpassf,
highpassf,
sample_tol,
cc_threshold,
nch_min,
temp_length,
utc_prec,
cont_dir,
temp_dir,
travel_dir,
dateperiod,
ev_catalog,
start_itemp,
stop_itemp,
factor_thre,
stdup,
stddown,
chan_max,
nchunk,
] = read_parameters("parameters24")
# set time precision for UTCDATETIME
UTCDateTime.DEFAULT_PRECISION = utc_prec
# read Catalog of Templates Events
print("event catalog should be ZMAP or QUAKEML")
if _is_zmap(ev_catalog):
print("reading ZMAP catalog")
elif _is_quakeml(ev_catalog):
print("reading QUAKEML catalog")
else:
print("warning error in reading ZMAP or QUAKEML")
cat = read_events(ev_catalog)
ncat = len(cat)
# read template from standard input
# startTemplate = input("INPUT: Enter Starting template ")
# stopTemplate = input("INPUT: Enter Ending template ")
t_start = start_itemp
t_stop = stop_itemp
# loop over days
# generate list of days "
year = int(dateperiod[0])
month = int(dateperiod[1])
day = int(dateperiod[2])
period = int(dateperiod[3])
days = listdays(year, month, day, period)
"""
initialise stt as a stream of templates
and stream_df as a stream of continuous waveforms
"""
stt = Stream()
stream_df = Stream()
stream_cft = Stream()
stall = Stream()
ccmad = Trace()
for day in days:
# settings to cut exactly 24 hours file from without including
# previous/next day
iday = "%s" % (day[4:6])
imonth = "%s" % (day[2:4])
iyear = "20%s" % (day[0:2])
iiyear = int(iyear)
iimonth = int(imonth)
iiday = int(iday)
iihour = 23
iimin = 59
iisec = 0
for itemp in range(t_start, t_stop):
stt.clear()
# open file containing detections
fout = "%s.%s.cat" % (str(itemp), day[0:6])
f = open(fout, "w+")
print("itemp == ...", str(itemp))
# open statistics file for each detection
fout1 = "%s.%s.stats" % (str(itemp), day[0:6])
f1 = open(fout1, "w+")
# open file including magnitude information
fout2 = "%s.%s.stats.mag" % (str(itemp), day[0:6])
f2 = open(fout2, "w+")
# open file listing exceptions
fout3 = "%s.%s.except" % (str(itemp), day[0:6])
f3 = open(fout3, "w+")
ot = cat[itemp].origins[0].time
mt = cat[itemp].magnitudes[0].mag
lon = cat[itemp].origins[0].longitude
lat = cat[itemp].origins[0].latitude
dep = cat[itemp].origins[0].depth
# read ttimes, select the num_ttimes (parameters,
# last line) channels
# and read only these templates
travel_file = "%s%s.ttimes" % (travel_dir, str(itemp))
with open(travel_file, "r") as ttim:
d = dict(x.rstrip().split(None, 1) for x in ttim)
ttim.close()
s = d.items()
v = sorted(s, key=lambda x: (float(x[1])))[0:chan_max]
vv = [x[0] for x in v]
for vvc in vv:
n_net = vvc.split(".")[0]
n_sta = vvc.split(".")[1]
n_chn = vvc.split(".")[2]
filename = "%s%s.%s.%s..%s.mseed" % (
temp_dir,
str(itemp),
str(n_net),
str(n_sta),
str(n_chn),
)
stt += read(filename, dtype="float32")
if len(stt) >= nch_min:
tc = Trace()
bandpass = [lowpassf, highpassf]
chunks = []
h24 = 86400
chunk_start = UTCDateTime(iiyear, iimonth, iiday)
end_time = chunk_start + h24
while chunk_start < end_time:
chunk_end = chunk_start + h24 / nchunk
if chunk_end > end_time:
chunk_end = end_time
chunks.append((chunk_start, chunk_end))
chunk_start += h24 / nchunk
for t1, t2 in chunks:
stream_df.clear()
for tr in stt:
finpc1 = "%s%s.%s.%s" % (
cont_dir,
str(day),
str(tr.stats.station),
str(tr.stats.channel),
)
if os.path.exists(finpc1) and os.path.getsize(finpc1) > 0:
try:
st = read(finpc1, starttime=t1, endtime=t2, dtype="float32")
if len(st) != 0:
st.merge(method=1, fill_value=0)
tc = st[0]
stat = tc.stats.station
chan = tc.stats.channel
tc.detrend("constant")
# 24h continuous trace starts 00 h 00 m 00.0s
trim_fill(tc, t1, t2)
tc.filter(
"bandpass",
freqmin=bandpass[0],
freqmax=bandpass[1],
zerophase=True,
)
# store detrended and filtered continuous data
# in a Stream
stream_df += Stream(traces=[tc])
except IOError:
pass
if len(stream_df) >= nch_min:
ntl = len(stt)
amaxat = np.empty(ntl)
# for each template event
# md=np.empty(ntl)
md = np.zeros(ntl)
damaxat = {}
for il, tr in enumerate(stt):
amaxat[il] = max(abs(tr.data))
sta_t = tr.stats.station
cha_t = tr.stats.channel
tid_t = "%s.%s" % (sta_t, cha_t)
damaxat[tid_t] = float(amaxat[il])
# define travel time file for each template
# for synchronizing CFTs are obtained
# running calcTT01.py
travel_file = "%s%s.ttimes" % (travel_dir, str(itemp))
# store ttimes info in a dictionary
with open(travel_file, "r") as ttim:
d = dict(x.rstrip().split(None, 1) for x in ttim)
ttim.close()
# clear global_variable
stream_cft.clear()
stcc = Stream()
for nn in networks:
for ss in stations:
for ich in channels:
stream_cft += process_input(
itemp, nn, ss, ich, stream_df
)
stall.clear()
stcc.clear()
stnew = Stream()
tr = Trace()
tc_cft = Trace()
tsnew = UTCDateTime()
# seconds in 24 hours
nfile = len(stream_cft)
tstart = np.empty(nfile)
tend = np.empty(nfile)
tdif = np.empty(nfile)
for idx, tc_cft in enumerate(stream_cft):
# get station name from trace
sta = tc_cft.stats.station
chan = tc_cft.stats.channel
net = tc_cft.stats.network
delta = tc_cft.stats.delta
npts = (h24 / nchunk) / delta
s = "%s.%s.%s" % (net, sta, chan)
tdif[idx] = float(d[s])
for idx, tc_cft in enumerate(stream_cft):
# get stream starttime
tstart[idx] = tc_cft.stats.starttime + tdif[idx]
# waveforms should have the same
# number of npts
# and should be synchronized to the
# S-wave travel time
secs = (h24 / nchunk) + 60
tend[idx] = tstart[idx] + secs
check_npts = (tend[idx] - tstart[idx]) / tc_cft.stats.delta
ts = UTCDateTime(tstart[idx], precision=utc_prec)
te = UTCDateTime(tend[idx], precision=utc_prec)
stall += tc_cft.trim(
starttime=ts,
endtime=te,
nearest_sample=True,
pad=True,
fill_value=0,
)
# reft and min_time_value are calculated from the pool of stations
# that have a CFT in the stack
new_stt = Stream()
st_list = []
for tr_ls in stall:
idt = (
tr_ls.stats.network
+ "."
+ tr_ls.stats.station
+ "."
+ tr_ls.stats.channel
)
st_list.append(idt)
new_stt += stt.select(
station=tr_ls.stats.station, channel=tr_ls.stats.channel
)
reft = min([tr.stats.starttime for tr in new_stt])
new_d = {st: d[st] for st in st_list}
time_values = [float(v) for v in new_d.values()]
min_time_value = min(time_values)
tstart = min([tr.stats.starttime for tr in stall])
df = stall[0].stats.sampling_rate
npts = stall[0].stats.npts
# compute mean cross correlation from the stack of
# CFTs (see stack function)
ccmad, tdifmin = stack(
stall, df, tstart, npts, stdup, stddown, nch_min
)
if tdifmin is not None:
# compute mean absolute deviation of abs(ccmad)
tstda = mad(ccmad.data)
# define threshold as 9 times std and quality index
thresholdd = factor_thre * tstda
# Trace ccmad is stored in a Stream
stcc = Stream(traces=[ccmad])
# Run coincidence trigger on a single CC trace
# resulting from the CFTs stack
# essential threshold parameters
# Cross correlation thresholds
xcor_cut = thresholdd
thr_on = thresholdd
thr_off = thresholdd - 0.15 * thresholdd
thr_coincidence_sum = 1.0
similarity_thresholds = {"BH": thr_on}
trigger_type = None
triglist = coincidence_trigger(
trigger_type,
thr_on,
thr_off,
stcc,
thr_coincidence_sum,
trace_ids=None,
similarity_thresholds=similarity_thresholds,
delete_long_trigger=False,
trigger_off_extension=3.0,
details=True,
)
ntrig = len(triglist)
tt = np.empty(ntrig)
cs = np.empty(ntrig)
nch = np.empty(ntrig)
cft_ave = np.empty(ntrig)
crt = np.empty(ntrig)
cft_ave_trg = np.empty(ntrig)
crt_trg = np.empty(ntrig)
nch3 = np.empty(ntrig)
nch5 = np.empty(ntrig)
nch7 = np.empty(ntrig)
nch9 = np.empty(ntrig)
mm = np.empty(ntrig)
timex = UTCDateTime()
tdifmin = min(tdif[0:])
for itrig, trg in enumerate(triglist):
# tdifmin is computed for contributing channels
# within the stack function
if tdifmin == min_time_value:
tt[itrig] = trg["time"] + min_time_value
elif tdifmin != min_time_value:
diff_time = min_time_value - tdifmin
tt[itrig] = trg["time"] + diff_time + min_time_value
cs[itrig] = trg["coincidence_sum"]
cft_ave[itrig] = trg["cft_peak_wmean"]
crt[itrig] = trg["cft_peaks"][0] / tstda
# traceID = trg['trace_ids']
# check single channel CFT
[
nch[itrig],
cft_ave[itrig],
crt[itrig],
cft_ave_trg[itrig],
crt_trg[itrig],
nch3[itrig],
nch5[itrig],
nch7[itrig],
nch9[itrig],
] = csc(
stall,
stcc,
trg,
tstda,
sample_tol,
cc_threshold,
nch_min,
day,
itemp,
itrig,
f1,
)
if int(nch[itrig]) >= nch_min:
nn = len(stream_df)
# nn=len(stt)
amaxac = np.zeros(nn)
md = np.zeros(nn)
# for each trigger, detrended,
# and filtered continuous
# data channels are trimmed and
# amplitude useful to
# estimate magnitude is measured.
damaxac = {}
mchan = {}
timestart = UTCDateTime()
timex = UTCDateTime(tt[itrig])
for il, tc in enumerate(stream_df):
ss = tc.stats.station
ich = tc.stats.channel
netwk = tc.stats.network
if stt.select(
station=ss, channel=ich
).__nonzero__():
ttt = stt.select(station=ss, channel=ich)[0]
s = "%s.%s.%s" % (netwk, ss, ich)
uts = UTCDateTime(ttt.stats.starttime).timestamp
utr = UTCDateTime(reft).timestamp
if tdifmin <= 0:
timestart = (
timex + abs(tdifmin) + (uts - utr)
)
elif tdifmin > 0:
timestart = (
timex - abs(tdifmin) + (uts - utr)
)
timend = timestart + temp_length
ta = tc.copy()
ta.trim(
starttime=timestart,
endtime=timend,
pad=True,
fill_value=0,
)
amaxac[il] = max(abs(ta.data))
tid_c = "%s.%s" % (ss, ich)
damaxac[tid_c] = float(amaxac[il])
dct = damaxac[tid_c]
dtt = damaxat[tid_c]
if dct != 0 and dtt != 0:
md[il] = mag_detect(
mt, damaxat[tid_c], damaxac[tid_c]
)
mchan[tid_c] = md[il]
str00 = "%s %s\n" % (tid_c, mchan[tid_c])
f2.write(str00)
mdr = reject_moutliers(md, 1)
mm[itrig] = round(np.mean(mdr), 2)
cft_ave[itrig] = round(cft_ave[itrig], 3)
crt[itrig] = round(crt[itrig], 3)
cft_ave_trg[itrig] = round(cft_ave_trg[itrig], 3)
crt_trg[itrig] = round(crt_trg[itrig], 3)
str33 = (
"%s %s %s %s %s %s %s %s %s "
"%s %s %s %s %s %s %s\n"
% (
day[0:6],
str(itemp),
str(itrig),
str(UTCDateTime(tt[itrig])),
str(mm[itrig]),
str(mt),
str(nch[itrig]),
str(tstda),
str(cft_ave[itrig]),
str(crt[itrig]),
str(cft_ave_trg[itrig]),
str(crt_trg[itrig]),
str(nch3[itrig]),
str(nch5[itrig]),
str(nch7[itrig]),
str(nch9[itrig]),
)
)
f1.write(str33)
f2.write(str33)
str1 = "%s %s %s %s %s %s %s %s\n" % (
str(itemp),
str(UTCDateTime(tt[itrig])),
str(mm[itrig]),
str(cft_ave[itrig]),
str(crt[itrig]),
str(cft_ave_trg[itrig]),
str(crt_trg[itrig]),
str(int(nch[itrig])),
)
f.write(str1)
else:
str_except2 = "%s %s %s %s %s\n" % (
day[0:6],
str(itemp),
str(t1),
str(t2),
" num. correlograms lower than nch_min",
)
f3.write(str_except2)
pass
else:
str_except1 = "%s %s %s %s %s\n" % (
day[0:6],
str(itemp),
str(t1),
str(t2),
" num. 24h channels lower than nch_min",
)
f3.write(str_except1)
pass
else:
str_except0 = "%s %s %s\n" % (
day[0:6],
str(itemp),
" num. templates lower than nch_min",
)
f3.write(str_except0)
pass
f1.close()
f2.close()
f3.close()
f.close()
print(" elapsed time ", perf_counter() - start_time, " seconds")
| avuan/PyMPA37 | main.pympa.dir/pympa_obspy1.2.1.py | Python | gpl-3.0 | 32,655 |
#!/bin/python
# -*- coding: utf-8 -*-
# ####################################################################
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
class ConnectionCall(object):
def __init__(self, system):
self._system = system
def __getattr__(self, action):
raise NotImplementedError()
class ConnectionCallAsync(ConnectionCall):
def __getattr__(self, action):
service_name = self._system.get_service(action)['name']
connection = self._system.get_connection(service_name)
return connection.get_action(action, async=True)
class ConnectionCallSync(ConnectionCall):
def __getattr__(self, action):
service_name = self._system.get_service(action)['name']
connection = self._system.get_connection(service_name)
return connection.get_action(action, async=False)
if __name__ == "__main__":
sys.exit(1)
| gofed/gofed-ng | common/system/connectionCall.py | Python | gpl-3.0 | 1,736 |
###
# Copyright (c) 2013, KG-Bot
# All rights reserved.
#
#
###
import re
import json
import urllib2
import urllib
import supybot.schedule as schedule
import datetime
import time
from operator import itemgetter
import locale
import string
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('ESim')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
internationalizeDocstring = lambda x:x
def flatten_subdicts(dicts, flat=None):
"""Change dict of dicts into a dict of strings/integers. Useful for
using in string formatting."""
if flat is None:
flat = {}
if isinstance(dicts, list):
return flatten_subdicts(dict(enumerate(dicts)))
elif isinstance(dicts, dict):
for key, value in dicts.items():
if isinstance(value, dict):
value = dict(flatten_subdicts(value))
for subkey, subvalue in value.items():
flat['%s__%s' % (key, subkey)] = subvalue
else:
flat[key] = value
return flat
else:
return dicts
class Template(string.Template):
# Original string.Template does not accept variables starting with a
# number.
idpattern = r'[_a-z0-9]+'
class ESim(callbacks.Plugin):
threaded = True
def citinfo(self, irc, msg, args, server, name):
"""<server> <name>
Provides info about citizen."""
if name is None:
if server == 'secura':
with open('Esim/Profiles/Secura/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
base = 'http://cscpro.org/%s/citizen/%s.json'
# data = json.load(utils.web.getUrlFd(base % (server, '%20'.join(name))))
data = json.load(utils.web.getUrlFd(base % (server, id)))
name = data['name']
strength = data['strength']
rank = data['rank']['name']
rankdmg = data['rank']['damage']
q1dmg = data['hit']['q1']
q2dmg = data['hit']['q2']
q3dmg = data['hit']['q3']
q4dmg = data['hit']['q4']
q5dmg = data['hit']['q5']
level = data['level']
age = data['age']
ecoSkill = data['economy_skill']
news = data['newspaper']['name']
mu = data['military_unit']['name']
muid = data['military_unit']['id']
color = 3 if data['is_online'] else 4
party = data['party']['name']
partyid = data['party']['id']
id = data['id']
avatar = data['avatar_link']
irc.reply('\x034Name:\x03 \x030%i%s\x03, \x034Strength:\x03 %s, \x034Rank-name:\x03 %s, \x034Damage:\x03 %s, \x034Damage with:\x03 \x02\x0310q1\x03\x02-%s, \x02\x0310q2\x03\x02-%s, \x02\x0310q3\x03\x02-%s, \x02\x0310q4\x03\x02-%s, \x02\x0310q5\x03\x02-%s,'
' \x034Level:\x03 %s, \x034Age:\x03 %s, \x034Eco skill:\x03 %s, \x034News:\x03 %s, \x034MU:\x03 %s, \x034MU id:\x03 %s, \x034Party:\x03 %s, \x034Party id:\x03 %s, \x034ID:\x03 %s, \x034Avatar:\x03 %s' % (color, name, strength, rank, rankdmg,
q1dmg, q2dmg, q3dmg, q4dmg, q5dmg, level, age, ecoSkill, news, mu, muid, party, partyid, id, avatar))
elif server == 'primera':
with open('Esim/Profiles/Primera/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
base = 'http://cscpro.org/%s/citizen/%s.json'
# data = json.load(utils.web.getUrlFd(base % (server, '%20'.join(name))))
data = json.load(utils.web.getUrlFd(base % (server, id)))
name = data['name']
strength = data['strength']
rank = data['rank']['name']
rankdmg = data['rank']['damage']
q1dmg = data['hit']['q1']
q2dmg = data['hit']['q2']
q3dmg = data['hit']['q3']
q4dmg = data['hit']['q4']
q5dmg = data['hit']['q5']
level = data['level']
age = data['age']
ecoSkill = data['economy_skill']
news = data['newspaper']['name']
mu = data['military_unit']['name']
muid = data['military_unit']['id']
color = 3 if data['is_online'] else 4
party = data['party']['name']
partyid = data['party']['id']
id = data['id']
avatar = data['avatar_link']
irc.reply('\x034Name:\x03 \x030%i%s\x03, \x034Strength:\x03 %s, \x034Rank-name:\x03 %s, \x034Damage:\x03 %s, \x034Damage with:\x03 \x02\x0310q1\x03\x02-%s, \x02\x0310q2\x03\x02-%s, \x02\x0310q3\x03\x02-%s, \x02\x0310q4\x03\x02-%s, \x02\x0310q5\x03\x02-%s,'
' \x034Level:\x03 %s, \x034Age:\x03 %s, \x034Eco skill:\x03 %s, \x034News:\x03 %s, \x034MU:\x03 %s, \x034MU id:\x03 %s, \x034Party:\x03 %s, \x034Party id:\x03 %s, \x034ID:\x03 %s, \x034Avatar:\x03 %s' % (color, name, strength, rank, rankdmg,
q1dmg, q2dmg, q3dmg, q4dmg, q5dmg, level, age, ecoSkill, news, mu, muid, party, partyid, id, avatar))
elif server == 'suna':
with open('Esim/Profiles/Secura/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
base = 'http://cscpro.org/%s/citizen/%s.json'
# data = json.load(utils.web.getUrlFd(base % (server, '%20'.join(name))))
data = json.load(utils.web.getUrlFd(base % (server, id)))
name = data['name']
strength = data['strength']
rank = data['rank']['name']
rankdmg = data['rank']['damage']
q1dmg = data['hit']['q1']
q2dmg = data['hit']['q2']
q3dmg = data['hit']['q3']
q4dmg = data['hit']['q4']
q5dmg = data['hit']['q5']
level = data['level']
age = data['age']
ecoSkill = data['economy_skill']
news = data['newspaper']['name']
mu = data['military_unit']['name']
muid = data['military_unit']['id']
color = 3 if data['is_online'] else 4
party = data['party']['name']
partyid = data['party']['id']
id = data['id']
avatar = data['avatar_link']
irc.reply('\x034Name:\x03 \x030%i%s\x03, \x034Strength:\x03 %s, \x034Rank-name:\x03 %s, \x034Damage:\x03 %s, \x034Damage with:\x03 \x02\x0310q1\x03\x02-%s, \x02\x0310q2\x03\x02-%s, \x02\x0310q3\x03\x02-%s, \x02\x0310q4\x03\x02-%s, \x02\x0310q5\x03\x02-%s,'
' \x034Level:\x03 %s, \x034Age:\x03 %s, \x034Eco skill:\x03 %s, \x034News:\x03 %s, \x034MU:\x03 %s, \x034MU id:\x03 %s, \x034Party:\x03 %s, \x034Party id:\x03 %s, \x034ID:\x03 %s, \x034Avatar:\x03 %s' % (color, name, strength, rank, rankdmg,
q1dmg, q2dmg, q3dmg, q4dmg, q5dmg, level, age, ecoSkill, news, mu, muid, party, partyid, id, avatar))
else:
irc.reply("You didn't provide any valid e-Sim server.")
else:
base = 'http://cscpro.org/%s/citizen/%s.json'
data = json.load(utils.web.getUrlFd(base % (server, '%20'.join(name))))
name = data['name']
strength = data['strength']
rank = data['rank']['name']
rankdmg = data['rank']['damage']
q1dmg = data['hit']['q1']
q2dmg = data['hit']['q2']
q3dmg = data['hit']['q3']
q4dmg = data['hit']['q4']
q5dmg = data['hit']['q5']
level = data['level']
age = data['age']
ecoSkill = data['economy_skill']
news = data['newspaper']['name']
mu = data['military_unit']['name']
muid = data['military_unit']['id']
color = 3 if data['is_online'] else 4
party = data['party']['name']
partyid = data['party']['id']
id = data['id']
avatar = data['avatar_link']
irc.reply('\x034Name:\x03 \x030%i%s\x03, \x034Strength:\x03 %s, \x034Rank-name:\x03 %s, \x034Damage:\x03 %s, \x034Damage with:\x03 \x02\x0310q1\x03\x02-%s, \x02\x0310q2\x03\x02-%s, \x02\x0310q3\x03\x02-%s, \x02\x0310q4\x03\x02-%s, \x02\x0310q5\x03\x02-%s,'
' \x034Level:\x03 %s, \x034Age:\x03 %s, \x034Eco skill:\x03 %s, \x034News:\x03 %s, \x034MU:\x03 %s, \x034MU id:\x03 %s, \x034Party:\x03 %s, \x034Party id:\x03 %s, \x034ID:\x03 %s, \x034Avatar:\x03 %s' % (color, name, strength, rank, rankdmg,
q1dmg, q2dmg, q3dmg, q4dmg, q5dmg, level, age, ecoSkill, news, mu, muid, party, partyid, id, avatar))
citinfo = wrap(citinfo, ['something', optional(many('something'))])
def battinfo(self, irc, msg, args, server, battle, round):
"""<server> <battle-id> [<round>]
Gives info about <battle-id>, you can specify [<round>] if you want to see info about some round."""
base = 'http://cscpro.org/%s/battle/%s-%s.json'
data = json.load(utils.web.getUrlFd(base % (server, battle, round)))
status = data['status']
region = data['region']['name']
attacker = data['attacker']['name']
atthero = data['attacker']['hero']
admg = data['attacker']['damage']
aproc = data['attacker']['bar']
defender = data['defender']['name']
defhero = data['defender']['hero']
ddmg = data['defender']['damage']
dproc = data['defender']['bar']
durationh = data['time']['hour']
durationm = data['time']['minute']
durations = data['time']['second']
round = data['round']
irc.reply('\x034Status:\x03 %s, \x034Region:\x03 %s, \x0310Attacker name:\x03 %s, \x0310Attacker hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x0310Damage done by attacker:\x03 %s, \x0310Attacker damage in procents:\x03 %s'
' \x037Defender name:\x03 %s, \x037Defender hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x037Damage done by defender:\x03 %s, \x037Defender damage in procents:\x03 %s'
' \x034Battle duration:\x03 %sh, %sm, %ss, \x034Battle round:\x03 %s' % (status, region, attacker, server, atthero, admg, aproc, defender, server, defhero, ddmg, dproc, durationh, durationm, durations, round))
battinfo = wrap(battinfo, ['something', 'int', optional('int')])
def doinfo(self, irc, msg, args, server, id):
"""<server> <id>
Provides info about MU and daily orders"""
base = 'http://cscpro.org/%s/units/%s.json'
data = json.load(utils.web.getUrlFd(base % (server, id)))
muname = data['army']['name']
murank = data['army']['rank']
totdamage = data['army']['damage']['total']
toddamage = data['army']['damage']['today']
memcurr = data['army']['member']['current']
memmax = data['army']['member']['max']
leadid = data['army']['leader']['id']
leadname = data['army']['leader']['name']
orderbatid = data['order']['battleid']
orderreg = data['order']['region']
orderside = data['order']['side']
orderstat = data['order']['status']
irc.reply('\x034MU name:\x03 %s, \x034MU rank:\x03 %s, \x034Total damage:\x03 %s, \x034Today damage:\x03 %s, \x034Current members:\x03 %s, \x034Max members:\x03 %s, \x034Leader id:\x03 %s, \x034Leader name:\x03 %s, \x034DO id:\x03 %s, \x034DO region:\x03 %s, \x034DO side:\x03 %s, \x034DO status:\x03 %s' % (muname, murank, totdamage, toddamage, memcurr, memmax,
leadid, leadname, orderbatid, orderreg, orderside, orderstat))
doinfo = wrap(doinfo, ['something', 'int'])
def partyinfo(self, irc, msg, args, server, id, page):
"""<server> <id> <page>
Gives basic info about party"""
base = 'http://cscpro.org/%s/party/%s-%s.json'
page = page or 1
data = json.load(utils.web.getUrlFd(base % (server, id, page)))
partyid = data['party']['id']
partyname = data['party']['name']
partyava = data['party']['avatar']
partymem = data['party']['member']
leaderid = data['leader']['id']
leadername = data['leader']['name']
irc.reply('\x034Party name:\x03 %s, \x034Party ID:\x03 %s, \x034Number of members:\x03 %s, \x034Party avatar:\x03 %s , \x034Leader ID:\x03 %s, \x034Leader name:\x03 %s' % (partyname, partyid, partymem, partyava, leaderid, leadername))
partyinfo = wrap(partyinfo, ['something', 'int', optional('int')])
def medals(self, irc, msg, args, server, name):
"""<primera | secura> <name>
Gives info about medals"""
if name is None:
if server == 'secura':
try:
with open('Esim/Profiles/Secura/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
# base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, id))
bata = json.load(utils.web.getUrlFd(base))
nam = bata['name']
medals = bata['medal']
medadict = dict(i.items()[0] for i in medals)
medasum = sum(medadict.values())
congressman = medadict.values()[7]
president = medadict.values()[6]
ss = medadict.values()[4]
sb = medadict.values()[5]
mm = medadict.values()[0]
hw = medadict.values()[1]
bh = medadict.values()[3]
rh = medadict.values()[8]
tester = medadict.values()[2]
irc.reply('%s has the following medal(s): Congressman: \x02%s\x02, President: \x02%s\x02, Super Soldier: \x02%s\x02, Society Builder: \x02%s\x02, Media Mogul: \x02%s\x02, Hard Worker: \x02%s\x02, Battle Hero: \x02%s\x02, Resistance Hero: \x02%s\x02, Tester: \x02%s\x02. Total number of medals is: \x02%s\x02.' % (nam, congressman, president, ss, sb, mm, hw, bh, rh, tester, medasum))
except IOError:
irc.reply("You didn't linked any profile from Secura server with your IRC nick.")
elif server == 'primera':
try:
with open('Esim/Profiles/Primera/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
# base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, id))
bata = json.load(utils.web.getUrlFd(base))
nam = bata['name']
medals = bata['medal']
medadict = dict(i.items()[0] for i in medals)
medasum = sum(medadict.values())
congressman = medadict.values()[7]
president = medadict.values()[6]
ss = medadict.values()[4]
sb = medadict.values()[5]
mm = medadict.values()[0]
hw = medadict.values()[1]
bh = medadict.values()[3]
rh = medadict.values()[8]
tester = medadict.values()[2]
irc.reply('%s has the following medal(s): Congressman: \x02%s\x02, President: \x02%s\x02, Super Soldier: \x02%s\x02, Society Builder: \x02%s\x02, Media Mogul: \x02%s\x02, Hard Worker: \x02%s\x02, Battle Hero: \x02%s\x02, Resistance Hero: \x02%s\x02, Tester: \x02%s\x02. Total number of medals is: \x02%s\x02.' % (nam, congressman, president, ss, sb, mm, hw, bh, rh, tester, medasum))
except IOError:
irc.reply("You didn't linked any profile from Secura server with your IRC nick.")
elif server == 'suna':
try:
with open('Esim/Profiles/Suna/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
# base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, id))
bata = json.load(utils.web.getUrlFd(base))
nam = bata['name']
medals = bata['medal']
medadict = dict(i.items()[0] for i in medals)
medasum = sum(medadict.values())
congressman = medadict.values()[7]
president = medadict.values()[6]
ss = medadict.values()[4]
sb = medadict.values()[5]
mm = medadict.values()[0]
hw = medadict.values()[1]
bh = medadict.values()[3]
rh = medadict.values()[8]
tester = medadict.values()[2]
irc.reply('%s has the following medal(s): Congressman: \x02%s\x02, President: \x02%s\x02, Super Soldier: \x02%s\x02, Society Builder: \x02%s\x02, Media Mogul: \x02%s\x02, Hard Worker: \x02%s\x02, Battle Hero: \x02%s\x02, Resistance Hero: \x02%s\x02, Tester: \x02%s\x02. Total number of medals is: \x02%s\x02.' % (nam, congressman, president, ss, sb, mm, hw, bh, rh, tester, medasum))
except IOError:
irc.reply("You didn't linked any profile from Secura server with your IRC nick.")
else:
irc.reply("You didn't provide any valid e-Sim server.")
else:
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
bata = json.load(utils.web.getUrlFd(base))
nam = bata['name']
medals = bata['medal']
medadict = dict(i.items()[0] for i in medals)
medasum = sum(medadict.values())
congressman = medadict.values()[7]
president = medadict.values()[6]
ss = medadict.values()[4]
sb = medadict.values()[5]
mm = medadict.values()[0]
hw = medadict.values()[1]
bh = medadict.values()[3]
rh = medadict.values()[8]
tester = medadict.values()[2]
irc.reply('%s has the following medal(s): Congressman: \x02%s\x02, President: \x02%s\x02, Super Soldier: \x02%s\x02, Society Builder: \x02%s\x02, Media Mogul: \x02%s\x02, Hard Worker: \x02%s\x02, Battle Hero: \x02%s\x02, Resistance Hero: \x02%s\x02, Tester: \x02%s\x02. Total number of medals is: \x02%s\x02.' % (nam, congressman, president, ss, sb, mm, hw, bh, rh, tester, medasum))
medals = wrap(medals, ['something', optional(many('something'))])
def battles(self, irc, msg, args, server):
"""<server>
Returns all active battles on <server>"""
base = ('http://cscpro.org/%s/battles/1.json' % (server))
data = json.load(utils.web.getUrlFd(base))
battle_ids = []
for battle in data.keys():
battle_ids.append(data[battle]['id'])
irc.reply('Active battles are: %s' % ', '.join(battle_ids))
battles = wrap(battles, ['something'])
def partymem(self, irc, msg, args, server, id, page):
"""<server> <party id> <page>
Gives list of party members"""
base = ('http://cscpro.org/%s/party/%s-%s.json' % (server, id, page))
data = json.load(utils.web.getUrlFd(base))
members = [', '.join(['%s: %s' % x for x in d.items()]) for d in data['members']]
irc.reply('This party has following members: %s' % ('\x0310<=====>\x03'.join(members)))
partymem = wrap(partymem, ['something', 'something', 'something'])
def monex(self, irc, msg, args, server, buy, sell):
"""<server> <buy> <sell>
Gives info about offers on monetary market, <buy> is currency that you want to buy, and <sell> is currency you want to sell, currency codes can be viewed here 'http://tinyurl.com/qgzmtmj'."""
try:
base = ('http://cscpro.org/%s/exchange/%s-%s.json' % (server, buy, sell))
data = json.load(utils.web.getUrlFd(base))
seller = data['offer'][0]['seller']['name'] # Seller name
seller_id = data['offer'][0]['seller']['id'] # Seller ID
amount = data['offer'][0]['amount']
rate = data['offer'][0]['rate']
irc.reply('Seler name/ID: \x02%s/%s\x02, Rate: \x02%s\x02, Amount: \x02%s\x02' % (seller, seller_id, amount, rate))
except:
irc.reply('Wrong currency or no server specified.')
monex = wrap(monex, ['something', 'something', 'something'])
def elink(self, irc, msg, args, server, name):
"""<name>
Return link for <name>."""
if name is None:
if server == 'secura':
try:
with open('Esim/Profiles/Secura/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
# base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, id))
data = json.load(utils.web.getUrlFd(base))
# id = data['id']
na = data['name']
irc.reply("%s's link is http://secura.e-sim.org/profile.html?id=%s" % (na, id))
except IOError:
irc.reply("You didn't linked any profile from Secura server with your IRC nick.")
elif server == 'primera':
try:
with open('Esim/Profiles/Primera/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
# base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, id))
data = json.load(utils.web.getUrlFd(base))
# id = data['id']
na = data['name']
irc.reply("%s's link is http://secura.e-sim.org/profile.html?id=%s" % (na, id))
except IOError:
irc.reply("You didn't linked any profile from Primera server with your IRC nick.")
elif server == 'suna':
try:
with open('Esim/Profiles/Suna/%s.json' % msg.nick, 'r') as profile:
profile_dict = json.loads(profile.read())
id = profile_dict['id']
# base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, id))
data = json.load(utils.web.getUrlFd(base))
# id = data['id']
na = data['name']
irc.reply("%s's link is http://secura.e-sim.org/profile.html?id=%s" % (na, id))
except IOError:
irc.reply("You didn't linked any profile from Suna server with your IRC nick.")
else:
irc.reply("You didn't provide any valid e-Sim server.")
else:
base = ('http://cscpro.org/%s/citizen/%s.json' % (server, '%20'.join(name)))
data = json.load(utils.web.getUrlFd(base))
id = data['id']
na = data['name']
irc.reply("%s's link is http://secura.e-sim.org/profile.html?id=%s" % (na, id))
elink = wrap(elink, ['something', optional(many('something'))])
def simid(self, irc, msg, args, server, id):
"""<profile id>
Links your profile with IRC nick."""
nick = msg.nick
profile_dict = {}
profile_dict['id'] = id
if id:
if server == "secura":
with open('Esim/Profiles/Secura/%s.json' % nick, 'w') as profile:
profile.write(json.dumps(profile_dict))
irc.reply("You have successfully linked your Secura profile with your IRC nick.")
elif server == "primera":
with open('Esim/Profiles/Primera/%s.json' % nick, 'w') as profile:
profile.write(json.dumps(profile_dict))
irc.reply("You have successfully linked your Primera profile with your IRC nick.")
elif server == "suna":
with open('Esim/Profiles/Secura/%s.json' % nick, 'w') as profile:
profile.write(json.dumps(profile_dict))
irc.reply("You have successfully linked your Secura profile with your IRC nick.")
else:
irc.reply("You didn't provide any valid server for linking.")
else:
irc.reply("You didn't provide any valid ID for linking.")
simid = wrap(simid, ['something', 'int'])
def return_battle_info(self, server, battle):
base = 'http://cscpro.org/%s/battle/%s.json'
data = json.load(utils.web.getUrlFd(base % (server, battle)))
return data
def battles_list(self, server):
base = ('http://cscpro.org/%s/battles/1.json' % (server))
data = json.load(utils.web.getUrlFd(base))
battle_ids = []
battles = data['battles']
battle_len = len(battles)
battle_minus_1 = battle_len - 1
for i in xrange(battle_minus_1):
battle_ids.append(battles[i]['id'])
return battle_ids
def transform_to_seconds(self, minutes):
seconds = minutes * 60
return seconds
def stop_previous_task(self, task):
try:
schedule.removeEvent(task)
return 'Stopped'
pass
except:
pass
def track(self, irc, msg, args, server, battle, minutes):
"""<server> <battle> <minutes>
Starts watching battle and returning info every <minutes> minutes."""
channel = msg.args[0]
schedule_name = '%s-esim-battle-track' % channel
self.stop_previous_task(schedule_name)
if channel.startswith('#'):
opers = irc.state.channels[channel].ops
nick = msg.nick
if nick not in opers:
irc.reply("Only channel ops can use this command.")
else:
if minutes < 5 or minutes > 10:
irc.reply("You can't use minutes lower than 5 or higher than 10.")
else:
seconds = self.transform_to_seconds(minutes)
if server == "suna":
battles = self.battles_list(server)
if battle in battles:
get_battle_info = self.return_battle_info(server, battle)
if get_battle_info['defender']['name']:
def start_collecting():
data = self.return_battle_info(server, battle)
status = data['status']
if status == 'active':
region = data['region']['name']
attacker = data['attacker']['name']
atthero = data['attacker']['hero']
admg = data['attacker']['damage']
aproc = data['attacker']['bar']
defender = data['defender']['name']
defhero = data['defender']['hero']
ddmg = data['defender']['damage']
dproc = data['defender']['bar']
durationh = data['time']['hour']
durationm = data['time']['minute']
durations = data['time']['second']
round = data['round']
irc.reply('\x034Status:\x03 %s, \x034Region:\x03 %s, \x0310Attacker name:\x03 %s, \x0310Attacker hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x0310Damage done by attacker:\x03 %s, \x0310Attacker damage in procents:\x03 %s'
' \x037Defender name:\x03 %s, \x037Defender hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x037Damage done by defender:\x03 %s, \x037Defender damage in procents:\x03 %s'
' \x034Battle duration:\x03 %sh, %sm, %ss, \x034Battle round:\x03 %s' % (status, region, attacker, server, atthero, locale.format('%d', admg, True), aproc, defender, server, defhero, locale.format('%d', ddmg, True), dproc, durationh, durationm, durations, round))
else:
irc.reply("This battle is over, tracking is stopped.")
schedule.addPeriodicEvent(start_collecting, seconds, schedule_name)
else:
irc.reply("There was some internal API problem, this isn't available now, try again later.")
else:
self.stop_previous_task(schedule_name)
irc.reply("This battle is already finished, I won't track it.")
elif server == "primera":
battles = self.battles_list(server)
if battle in battles:
get_battle_info = self.return_battle_info(server, battle)
if get_battle_info['defender']['name']:
def start_collecting():
data = self.return_battle_info(server, battle)
status = data['status']
if status == 'active':
region = data['region']['name']
attacker = data['attacker']['name']
atthero = data['attacker']['hero']
admg = data['attacker']['damage']
aproc = data['attacker']['bar']
defender = data['defender']['name']
defhero = data['defender']['hero']
ddmg = data['defender']['damage']
dproc = data['defender']['bar']
durationh = data['time']['hour']
durationm = data['time']['minute']
durations = data['time']['second']
round = data['round']
irc.reply('\x034Status:\x03 %s, \x034Region:\x03 %s, \x0310Attacker name:\x03 %s, \x0310Attacker hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x0310Damage done by attacker:\x03 %s, \x0310Attacker damage in procents:\x03 %s'
' \x037Defender name:\x03 %s, \x037Defender hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x037Damage done by defender:\x03 %s, \x037Defender damage in procents:\x03 %s'
' \x034Battle duration:\x03 %sh, %sm, %ss, \x034Battle round:\x03 %s' % (status, region, attacker, server, atthero, locale.format('%d', admg, True), aproc, defender, server, defhero, locale.format('%d', ddmg, True), dproc, durationh, durationm, durations, round))
else:
self.stop_previous_task(schedule_name)
irc.reply("This battle is over, tracking is stopped.")
schedule.addPeriodicEvent(start_collecting, seconds, schedule_name)
else:
irc.reply("There was some internal API problem, this isn't available now, try again later.")
else:
irc.reply("This battle is already finished, I won't track it.")
elif server == "ssecura":
battles = self.battles_list(server)
if battle in battles:
get_battle_info = self.return_battle_info(server, battle)
if get_battle_info['defender']['name']:
def start_collecting():
data = self.return_battle_info(server, battle)
status = data['status']
if status == 'active':
region = data['region']['name']
attacker = data['attacker']['name']
atthero = data['attacker']['hero']
admg = data['attacker']['damage']
aproc = data['attacker']['bar']
defender = data['defender']['name']
defhero = data['defender']['hero']
ddmg = data['defender']['damage']
dproc = data['defender']['bar']
durationh = data['time']['hour']
durationm = data['time']['minute']
durations = data['time']['second']
round = data['round']
irc.reply('\x034Status:\x03 %s, \x034Region:\x03 %s, \x0310Attacker name:\x03 %s, \x0310Attacker hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x0310Damage done by attacker:\x03 %s, \x0310Attacker damage in procents:\x03 %s'
' \x037Defender name:\x03 %s, \x037Defender hero:\x03 http://%s.e-sim.org/profile.html?id=%s, \x037Damage done by defender:\x03 %s, \x037Defender damage in procents:\x03 %s'
' \x034Battle duration:\x03 %sh, %sm, %ss, \x034Battle round:\x03 %s' % (status, region, attacker, server, atthero, locale.format('%d', admg, True), aproc, defender, server, defhero, locale.format('%d', ddmg, True), dproc, durationh, durationm, durations, round))
else:
self.stop_previous_task(schedule_name)
irc.reply("This battle is over, tracking is stopped.")
schedule.addPeriodicEvent(start_collecting, seconds, schedule_name)
else:
irc.reply("There was some internal API problem, this isn't available now, try again later.")
else:
irc.reply("This battle is already finished, I won't track it.")
else:
irc.reply("You've given me invalid server and I can't find anything about it.")
else:
irc.reply("This channel is available only on channel.")
track = wrap(track, ['something', 'int', 'int'])
def stoptrack(self, irc, msg, args):
"""Takes no arguments
Stops battle tracker."""
channel = msg.args[0]
if channel.startswith('#'):
opers = irc.state.channels[channel].ops
nick = msg.nick
if nick in opers:
schedule_name = '%s-esim-battle-track' % channel
stop_it = self.stop_previous_task(schedule_name)
if stop_it == 'Stopped':
irc.reply("Tracking stopped.")
else:
irc.reply("There was no tracker for this channel, so I couldn't stop it.")
else:
irc.reply("You're not OP and you can't stop battle track.")
else:
irc.reply("This command is available only on channel.")
stoptrack = wrap(stoptrack)
Class = ESim
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: | kg-bot/SupyBot | plugins/ESim/plugin.py | Python | gpl-3.0 | 40,915 |
from os.path import getctime, isfile
from datetime import datetime, timedelta
from flask import Flask, render_template, request
from raptiformica_map.graph_data import insert_graph_data
from raptiformica_map.update_graph import generate_graph, GRAPH_FILE
app = Flask(__name__)
app.config.from_pyfile('settings.cfg')
def get_ip():
return request.headers.get('x-real-ip')
@app.context_processor
def add_ip():
return {
'ip': get_ip()
}
def update_graph_if_graph_needs_to_be_updated():
"""
Update the graph if it needs to be (re)generated
:return None:
"""
time_difference = datetime.now() - timedelta(seconds=60)
if not isfile(GRAPH_FILE) or datetime.fromtimestamp(
getctime(GRAPH_FILE)
) < time_difference:
generate_graph()
@app.route('/')
@app.route('/network')
def page_network():
update_graph_if_graph_needs_to_be_updated()
return render_template('network.html', page='network')
@app.route('/send_graph', methods=['POST'])
def page_send_graph():
print("Receiving graph from {}".format(request.remote_addr))
version = int(request.form.get('version', '1'))
ret = insert_graph_data(
ip=get_ip(),
config=app.config,
data=request.form['data'],
version=version
)
return 'Error: {}'.format(ret) if ret else 'OK'
if __name__ == '__main__':
app.run(host='::1', port=3000)
| vdloo/raptiformica-map | raptiformica_map/web.py | Python | gpl-3.0 | 1,405 |
import os
import zmq
import logging
class Tank(object):
def __init__(self, host, port):
self.host = host
self.port = port
def run(self):
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PUSH)
logging.info('binding to \'tcp://0.0.0.0:%s\'' % (self.port, ))
self.sock.bind('tcp://0.0.0.0:%s' % (self.port, ))
def start(self):
logging.info('Starting tank')
os.system("ssh %s /bin/bash -c ~/start_tank.sh" % (self.host, ))
def check(self):
logging.info('Checking tank')
if not self.sock.poll(100, zmq.POLLOUT):
raise Exception('Tank is not ready')
#self.sock.send_json((0,), zmq.NOBLOCK)
def stop(self):
logging.info('Stopping tank')
try:
self.sock.send_json(('stop', ), zmq.NOBLOCK)
except:
logging.error('Tank is not responding')
def fire(self, url, drum):
logging.debug('Firing with drum %s to url %s' % (drum, url))
self.sock.send_json([0, url, drum], zmq.NOBLOCK)
| yandex/opensourcestand | tank.py | Python | gpl-3.0 | 1,076 |
import unittest
import redeemer.tokenwords.tokenconverter as tk
class test_tokenconverter(unittest.TestCase):
def setUp(self):
pass
def test_first_word_in_dictionary(self):
result = tk._get_word_for_number(0)
self.assertEquals(result,'A')
def test_last_word_in_dictionary(self):
result = tk._get_word_for_number(2047)
self.assertEquals(result,'YOKE')
def test_no_word_for_2048(self):
with self.assertRaises(ValueError):
word = tk._get_word_for_number(2048)
def test_first_number_in_dictionary(self):
result = tk._get_number_for_word('A')
self.assertEquals(result,0)
def test_last_number_in_dictionary(self):
result = tk._get_number_for_word('YOKE')
self.assertEquals(result,2047)
def test_no_number_for_foo(self):
with self.assertRaises(ValueError):
word = tk._get_number_for_word('FOO')
def test_case_is_ignored(self):
result = tk._get_number_for_word('yokE')
self.assertEquals(2047, result)
def test_five_numbers_to_skeystring(self):
result = tk._get_wordstring_for_numbers([0,1,2,3,2047])
self.assertEquals('A ABE ACE ACT YOKE', result)
def test_skeystring_to_five_numbers(self):
result = tk._get_numbers_for_wordstring('YEAH YEAR YELL YOGA AGO')
self.assertEquals([2043,2044,2045,2046,7], result)
def test_correct_hexstring_to_skeystring(self):
result = tk.get_skeystring_for_hexstring('D1854218EBBB0B51')
self.assertTrue(result.startswith('ROME MUG FRED SCAN LIVE LACE'))
def test_hexstring_with_wrong_lenght_to_skeystring_throws_exception(self):
with self.assertRaises(ValueError):
result = tk.get_skeystring_for_hexstring('1234ABCD')
def test_correct_skeystring_to_hexstring(self):
result = tk.get_hexstring_for_skeystring('CHEW GRIM WU HANG BUCK SAID')
self.assertEquals('65d20d1949b5f7ab', result)
def test_skeystring_with_wrong_lenght_to_hexstring_throws_exception(self):
with self.assertRaises(ValueError):
result = tk.get_skeystring_for_hexstring('CHEW GRIM WU HANG BUCK')
def test_skeystring_to_hexstring_parity_detection(self):
result = tk.get_hexstring_for_skeystring('FOWL KID MASH DEAD DUAL OAF')
self.assertEquals('85c43ee03857765b', result)
with self.assertRaises(ValueError):
result = tk.get_skeystring_for_hexstring('FOWL KID MASH DEAD DUAL NUT')
with self.assertRaises(ValueError):
result = tk.get_skeystring_for_hexstring('FOWL KID MASH DEAD DUAL O')
with self.assertRaises(ValueError):
result = tk.get_skeystring_for_hexstring('FOWL KID MASH DEAD DUAL OAK')
def test_correct_tokenhex_to_tokenstring(self):
result = tk.get_tokenstring_for_tokenhex('0060080080') # 3 2 1 0 in 11bit chunks
self.assertEquals('ACT ACE ABE A', result)
def test_tokenhex_with_wrong_lenght_to_tokenstring_throws_exception(self):
with self.assertRaises(ValueError):
result = tk.get_tokenstring_for_tokenhex('1234567890A')
with self.assertRaises(ValueError):
result = tk.get_tokenstring_for_tokenhex('123456789')
def test_correct_tokenstring_to_tokenhex(self):
result = tk.get_tokenhex_for_tokenstring('YOKE ACE ABE A') # 2047 2 1 0 in 11bit chunks
self.assertEquals('ffe0080080', result)
def test_nonzero_last_nibble_tokenstring_to_tokenhex_causes_exception(self):
with self.assertRaises(ValueError):
result = tk.get_tokenhex_for_tokenstring('YOKE ACE ABE ABE') # 2047 2 1 1 in 11bit chunks
def test_wrong_length_tokenstring_to_tokenhex_causes_exception(self):
with self.assertRaises(ValueError):
result = tk.get_tokenhex_for_tokenstring('YOKE')
if __name__ == '__main__':
unittest.main()
| montaggroup/montag-token-redeemer | redeemer/testing/unittests/test_tokenconverter.py | Python | gpl-3.0 | 3,918 |
# coding=utf-8
"""
Test unit, using simple graph made in Signavio editor for import/export operation
"""
import os
import unittest
import bpmn_python.bpmn_diagram_rep as diagram
class BPMNEditorTests(unittest.TestCase):
"""
This class contains test for bpmn-python package functionality using an example BPMN diagram, which contains
multiple pool and lane elements.
"""
output_directory = "./output/test-lane/"
example_path = "../examples/xml_import_export/lanes.bpmn"
output_file_with_di = "lanes-example-output.xml"
output_file_no_di = "lanes-example-output-no-di.xml"
def test_load_lanes_example(self):
"""
Test for importing a simple BPMNEditor diagram example (as BPMN 2.0 XML) into inner representation
and later exporting it to XML file
"""
bpmn_graph = diagram.BpmnDiagramGraph()
bpmn_graph.load_diagram_from_xml_file(os.path.abspath(self.example_path))
bpmn_graph.export_xml_file(self.output_directory, self.output_file_with_di)
bpmn_graph.export_xml_file_no_di(self.output_directory, self.output_file_no_di)
if __name__ == '__main__':
unittest.main()
| KrzyHonk/bpmn-python | tests/xml_import_export/lanes_test.py | Python | gpl-3.0 | 1,170 |
# -*- coding:Utf8 -*-
# affichage des 20 premiers termes de la table par 7,
# avec signalement des multiples de 3 :
i = 1 # compteur : prendra successivement les valeurs de 1 à 20
while i < 21:
# calcul du terme à afficher :
t = i * 7
# affichage sans saut à la ligne (utilisation de 'end =') :
print(t, end=" ")
# ce terme est-il un multiple de 3 ? (utilisation de l'opérateur modulo) :
if t % 3 == 0:
print("*", end=' ') # affichage d'une astérisque dans ce cas
i = i + 1 # incrémentation du compteur dans tous les cas | widowild/messcripts | exercice/python3/solutions_exercices/exercice_4_07.py | Python | gpl-3.0 | 601 |
'''
Created on Mar 11, 2013
@author: stefano
'''
from HomeoUniselector import *
from Helpers.General_Helper_Functions import *
import unittest, string, random
class HomeoUniselectorTest(unittest.TestCase):
def setUp(self):
self.uniselector = HomeoUniselector()
def tearDown(self):
pass
def testHomeoUniselectorClassType(self):
"""
Test that HomeoUniselector knows its types include only itself and its subclasses
"""
currentClasses = []
bogusClasses = []
currentClasses.extend([class_.__name__ for class_ in withAllSubclasses(HomeoUniselector)])
for bogusClass in xrange(5):
newRandomName = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
bogusClasses.extend(newRandomName)
for currentClass in currentClasses:
self.assertTrue(HomeoUniselector.includesType(currentClass))
for bogusClass in bogusClasses:
self.assertFalse(HomeoUniselector.includesType(bogusClass))
def testIntervalBounds(self):
"""
Test that HomeoUniselector has a positive interval
"""
self.assertTrue((self.uniselector.upperBound - self.uniselector.lowerBound) >= 0)
def testHomeoUniselectorTriggered(self):
"""
Cannot test that HomeoUniselector is activated when its input exceeds the normal bounds:
it is a HomeoUnit's responsibility to do so. HomeoUniselector only returns values when called
"""
self.assertTrue(True)
def testHomeoUniselectorIsAbstractClass(self):
'''
The core method of HomeUniselector is advance, which is only defined in its subclasses
'''
self.assertRaises(SubclassResponsibility, self.uniselector.advance)
def testBasicAshbyRandom(self):
'''
Test the basic Ashbian mechanism of returning a random value equal to 1/25
of the interval between upperBound and lowerBound
'''
interval = self.uniselector.upperBound - self.uniselector.lowerBound
for i in xrange(1000):
value = self.uniselector.ashbyRandom()
self.assertAlmostEqual(abs(value),interval/25,delta = 0.0001)
if __name__ == "__main__":
unittest.main() | cleinias/Homeo | src/Core/HomeoUniselectorTest.py | Python | gpl-3.0 | 2,342 |
# This file is part of eddylicious
# (c) Timofey Mukha
# The code is released under the GNU GPL Version 3 licence.
# See LICENCE.txt and the Legal section in the User Guide for more information
"""Module containing functions for writing out the inflow fields in various
file formats.
"""
from .hdf5_writers import *
from .ofnative_writers import *
__all__ = ["hdf5_writers", "ofnative_writers"]
__all__.extend(hdf5_writers.__all__)
__all__.extend(ofnative_writers.__all__) | timofeymukha/eddylicious | eddylicious/writers/__init__.py | Python | gpl-3.0 | 476 |
# coding: utf-8
"""
weasyprint.text
---------------
Interface with Pango to decide where to do line breaks and to draw text.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
# XXX No unicode_literals, cffi likes native strings
import pyphen
import cffi
import cairocffi as cairo
from .compat import basestring
ffi = cffi.FFI()
ffi.cdef('''
typedef enum {
PANGO_STYLE_NORMAL,
PANGO_STYLE_OBLIQUE,
PANGO_STYLE_ITALIC
} PangoStyle;
typedef enum {
PANGO_WEIGHT_THIN = 100,
PANGO_WEIGHT_ULTRALIGHT = 200,
PANGO_WEIGHT_LIGHT = 300,
PANGO_WEIGHT_BOOK = 380,
PANGO_WEIGHT_NORMAL = 400,
PANGO_WEIGHT_MEDIUM = 500,
PANGO_WEIGHT_SEMIBOLD = 600,
PANGO_WEIGHT_BOLD = 700,
PANGO_WEIGHT_ULTRABOLD = 800,
PANGO_WEIGHT_HEAVY = 900,
PANGO_WEIGHT_ULTRAHEAVY = 1000
} PangoWeight;
typedef enum {
PANGO_VARIANT_NORMAL,
PANGO_VARIANT_SMALL_CAPS
} PangoVariant;
typedef enum {
PANGO_STRETCH_ULTRA_CONDENSED,
PANGO_STRETCH_EXTRA_CONDENSED,
PANGO_STRETCH_CONDENSED,
PANGO_STRETCH_SEMI_CONDENSED,
PANGO_STRETCH_NORMAL,
PANGO_STRETCH_SEMI_EXPANDED,
PANGO_STRETCH_EXPANDED,
PANGO_STRETCH_EXTRA_EXPANDED,
PANGO_STRETCH_ULTRA_EXPANDED
} PangoStretch;
typedef enum {
PANGO_WRAP_WORD,
PANGO_WRAP_CHAR,
PANGO_WRAP_WORD_CHAR
} PangoWrapMode;
typedef unsigned int guint;
typedef int gint;
typedef gint gboolean;
typedef void* gpointer;
typedef ... cairo_t;
typedef ... PangoLayout;
typedef ... PangoContext;
typedef ... PangoFontMetrics;
typedef ... PangoLanguage;
typedef ... PangoFontDescription;
typedef ... PangoLayoutIter;
typedef ... PangoAttrList;
typedef ... PangoAttrClass;
typedef struct {
const PangoAttrClass *klass;
guint start_index;
guint end_index;
} PangoAttribute;
typedef struct {
PangoLayout *layout;
gint start_index;
gint length;
/* ... */
} PangoLayoutLine;
double pango_units_to_double (int i);
int pango_units_from_double (double d);
void g_object_unref (gpointer object);
void g_type_init (void);
PangoLayout * pango_cairo_create_layout (cairo_t *cr);
void pango_layout_set_width (PangoLayout *layout, int width);
void pango_layout_set_attributes(
PangoLayout *layout, PangoAttrList *attrs);
void pango_layout_set_text (
PangoLayout *layout, const char *text, int length);
void pango_layout_set_font_description (
PangoLayout *layout, const PangoFontDescription *desc);
void pango_layout_set_wrap (
PangoLayout *layout, PangoWrapMode wrap);
PangoFontDescription * pango_font_description_new (void);
void pango_font_description_free (PangoFontDescription *desc);
void pango_font_description_set_family (
PangoFontDescription *desc, const char *family);
void pango_font_description_set_variant (
PangoFontDescription *desc, PangoVariant variant);
void pango_font_description_set_style (
PangoFontDescription *desc, PangoStyle style);
void pango_font_description_set_stretch (
PangoFontDescription *desc, PangoStretch stretch);
void pango_font_description_set_weight (
PangoFontDescription *desc, PangoWeight weight);
void pango_font_description_set_absolute_size (
PangoFontDescription *desc, double size);
PangoAttrList * pango_attr_list_new (void);
void pango_attr_list_unref (PangoAttrList *list);
void pango_attr_list_insert (
PangoAttrList *list, PangoAttribute *attr);
PangoAttribute * pango_attr_letter_spacing_new (int letter_spacing);
void pango_attribute_destroy (PangoAttribute *attr);
PangoLayoutIter * pango_layout_get_iter (PangoLayout *layout);
void pango_layout_iter_free (PangoLayoutIter *iter);
gboolean pango_layout_iter_next_line (PangoLayoutIter *iter);
PangoLayoutLine * pango_layout_iter_get_line_readonly (
PangoLayoutIter *iter);
int pango_layout_iter_get_baseline (PangoLayoutIter *iter);
typedef struct {
int x;
int y;
int width;
int height;
} PangoRectangle;
void pango_layout_line_get_extents (
PangoLayoutLine *line,
PangoRectangle *ink_rect, PangoRectangle *logical_rect);
PangoContext * pango_layout_get_context (PangoLayout *layout);
PangoFontMetrics * pango_context_get_metrics (
PangoContext *context, const PangoFontDescription *desc,
PangoLanguage *language);
void pango_font_metrics_unref (PangoFontMetrics *metrics);
int pango_font_metrics_get_ascent (PangoFontMetrics *metrics);
int pango_font_metrics_get_descent (PangoFontMetrics *metrics);
int pango_font_metrics_get_approximate_char_width
(PangoFontMetrics *metrics);
int pango_font_metrics_get_approximate_digit_width
(PangoFontMetrics *metrics);
int pango_font_metrics_get_underline_thickness
(PangoFontMetrics *metrics);
int pango_font_metrics_get_underline_position
(PangoFontMetrics *metrics);
int pango_font_metrics_get_strikethrough_thickness
(PangoFontMetrics *metrics);
int pango_font_metrics_get_strikethrough_position
(PangoFontMetrics *metrics);
void pango_cairo_update_layout (cairo_t *cr, PangoLayout *layout);
void pango_cairo_show_layout_line (cairo_t *cr, PangoLayoutLine *line);
''')
def dlopen(ffi, *names):
"""Try various names for the same library, for different platforms."""
for name in names:
try:
return ffi.dlopen(name)
except OSError:
pass
# Re-raise the exception.
return ffi.dlopen(names[0]) # pragma: no cover
gobject = dlopen(ffi, 'gobject-2.0', 'libgobject-2.0-0', 'libgobject-2.0.so',
'libgobject-2.0.dylib')
pango = dlopen(ffi, 'pango-1.0', 'libpango-1.0-0', 'libpango-1.0.so',
'libpango-1.0.dylib')
pangocairo = dlopen(ffi, 'pangocairo-1.0', 'libpangocairo-1.0-0',
'libpangocairo-1.0.so', 'libpangocairo-1.0.dylib')
gobject.g_type_init()
units_to_double = pango.pango_units_to_double
units_from_double = pango.pango_units_from_double
PYPHEN_DICTIONARY_CACHE = {}
PANGO_STYLE = {
'normal': pango.PANGO_STYLE_NORMAL,
'oblique': pango.PANGO_STYLE_OBLIQUE,
'italic': pango.PANGO_STYLE_ITALIC,
}
PANGO_VARIANT = {
'normal': pango.PANGO_VARIANT_NORMAL,
'small-caps': pango.PANGO_VARIANT_SMALL_CAPS,
}
PANGO_STRETCH = {
'ultra-condensed': pango.PANGO_STRETCH_ULTRA_CONDENSED,
'extra-condensed': pango.PANGO_STRETCH_EXTRA_CONDENSED,
'condensed': pango.PANGO_STRETCH_CONDENSED,
'semi-condensed': pango.PANGO_STRETCH_SEMI_CONDENSED,
'normal': pango.PANGO_STRETCH_NORMAL,
'semi-expanded': pango.PANGO_STRETCH_SEMI_EXPANDED,
'expanded': pango.PANGO_STRETCH_EXPANDED,
'extra-expanded': pango.PANGO_STRETCH_EXTRA_EXPANDED,
'ultra-expanded': pango.PANGO_STRETCH_ULTRA_EXPANDED,
}
PANGO_WRAP_MODE = {
'WRAP_WORD': pango.PANGO_WRAP_WORD,
'WRAP_CHAR': pango.PANGO_WRAP_CHAR,
'WRAP_WORD_CHAR': pango.PANGO_WRAP_WORD_CHAR
}
def utf8_slice(string, slice_):
return string.encode('utf-8')[slice_].decode('utf-8')
def unicode_to_char_p(string):
"""Return ``(pointer, bytestring)``.
The byte string must live at least as long as the pointer is used.
"""
bytestring = string.encode('utf8').replace(b'\x00', b'')
return ffi.new('char[]', bytestring), bytestring
def get_size(line):
logical_extents = ffi.new('PangoRectangle *')
pango.pango_layout_line_get_extents(line, ffi.NULL, logical_extents)
return (units_to_double(logical_extents.width),
units_to_double(logical_extents.height))
def get_ink_position(line):
ink_extents = ffi.new('PangoRectangle *')
pango.pango_layout_line_get_extents(line, ink_extents, ffi.NULL)
return (units_to_double(ink_extents.x), units_to_double(ink_extents.y))
def first_line_metrics(first_line, text, layout, resume_at, hyphenated=False):
length = first_line.length
if not hyphenated and resume_at:
# Create layout with final text, remove trailing spaces if needed
first_line_text = utf8_slice(text, slice(length)).rstrip(' ')
layout.set_text(first_line_text)
first_line = next(layout.iter_lines(), None)
length = first_line.length if first_line is not None else 0
width, height = get_size(first_line)
baseline = units_to_double(pango.pango_layout_iter_get_baseline(ffi.gc(
pango.pango_layout_get_iter(layout.layout),
pango.pango_layout_iter_free)))
return layout, length, resume_at, width, height, baseline
class Layout(object):
"""Object holding PangoLayout-related cdata pointers."""
def __init__(self, hinting, font_size, style):
self.dummy_context = (
cairo.Context(cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1))
if hinting else
cairo.Context(cairo.PDFSurface(None, 1, 1)))
self.layout = ffi.gc(
pangocairo.pango_cairo_create_layout(ffi.cast(
'cairo_t *', self.dummy_context._pointer)),
gobject.g_object_unref)
self.font = font = ffi.gc(
pango.pango_font_description_new(),
pango.pango_font_description_free)
assert not isinstance(style.font_family, basestring), (
'font_family should be a list')
family_p, family = unicode_to_char_p(','.join(style.font_family))
pango.pango_font_description_set_family(font, family_p)
pango.pango_font_description_set_variant(
font, PANGO_VARIANT[style.font_variant])
pango.pango_font_description_set_style(
font, PANGO_STYLE[style.font_style])
pango.pango_font_description_set_stretch(
font, PANGO_STRETCH[style.font_stretch])
pango.pango_font_description_set_weight(font, style.font_weight)
pango.pango_font_description_set_absolute_size(
font, units_from_double(font_size))
pango.pango_layout_set_font_description(self.layout, font)
def iter_lines(self):
layout_iter = ffi.gc(
pango.pango_layout_get_iter(self.layout),
pango.pango_layout_iter_free)
while 1:
yield pango.pango_layout_iter_get_line_readonly(layout_iter)
if not pango.pango_layout_iter_next_line(layout_iter):
return
def set_text(self, text):
text, bytestring = unicode_to_char_p(text)
self.text = text
self.text_bytes = bytestring
pango.pango_layout_set_text(self.layout, text, -1)
def get_font_metrics(self):
context = pango.pango_layout_get_context(self.layout)
return FontMetrics(context, self.font)
def set_wrap(self, wrap_mode):
pango.pango_layout_set_wrap(self.layout, wrap_mode)
class FontMetrics(object):
def __init__(self, context, font):
self.metrics = ffi.gc(
pango.pango_context_get_metrics(context, font, ffi.NULL),
pango.pango_font_metrics_unref)
def __dir__(self):
return ['ascent', 'descent',
'approximate_char_width', 'approximate_digit_width',
'underline_thickness', 'underline_position',
'strikethrough_thickness', 'strikethrough_position']
def __getattr__(self, key):
if key in dir(self):
return units_to_double(
getattr(pango, 'pango_font_metrics_get_' + key)(self.metrics))
def create_layout(text, style, hinting, max_width):
"""Return an opaque Pango layout with default Pango line-breaks.
:param text: Unicode
:param style: a :class:`StyleDict` of computed values
:param hinting: whether to enable text hinting or not
:param max_width:
The maximum available width in the same unit as ``style.font_size``,
or ``None`` for unlimited width.
"""
layout = Layout(hinting, style.font_size, style)
layout.set_text(text)
# Make sure that max_width * Pango.SCALE == max_width * 1024 fits in a
# signed integer. Treat bigger values same as None: unconstrained width.
if max_width is not None and max_width < 2 ** 21:
pango.pango_layout_set_width(
layout.layout, units_from_double(max_width))
word_spacing = style.word_spacing
letter_spacing = style.letter_spacing
if letter_spacing == 'normal':
letter_spacing = 0
if text and (word_spacing != 0 or letter_spacing != 0):
letter_spacing = units_from_double(letter_spacing)
space_spacing = units_from_double(word_spacing) + letter_spacing
attr_list = pango.pango_attr_list_new()
def add_attr(start, end, spacing):
attr = pango.pango_attr_letter_spacing_new(spacing)
attr.start_index = start
attr.end_index = end
pango.pango_attr_list_insert(attr_list, attr)
text_bytes = layout.text_bytes
add_attr(0, len(text_bytes) + 1, letter_spacing)
position = text_bytes.find(b' ')
while position != -1:
add_attr(position, position + 1, space_spacing)
position = text_bytes.find(b' ', position + 1)
pango.pango_layout_set_attributes(layout.layout, attr_list)
pango.pango_attr_list_unref(attr_list)
return layout
def split_first_line(text, style, hinting, max_width, line_width):
"""Fit as much as possible in the available width for one line of text.
Return ``(layout, length, resume_at, width, height, baseline)``.
``layout``: a pango Layout with the first line
``length``: length in UTF-8 bytes of the first line
``resume_at``: The number of UTF-8 bytes to skip for the next line.
May be ``None`` if the whole text fits in one line.
This may be greater than ``length`` in case of preserved
newline characters.
``width``: width in pixels of the first line
``height``: height in pixels of the first line
``baseline``: baseline in pixels of the first line
"""
# Step #1: Get a draft layout with the first line
layout = None
if max_width:
expected_length = int(max_width / style.font_size * 2.5)
if expected_length < len(text):
# Try to use a small amount of text instead of the whole text
layout = create_layout(
text[:expected_length], style, hinting, max_width)
lines = layout.iter_lines()
first_line = next(lines, None)
second_line = next(lines, None)
if second_line is None:
# The small amount of text fits in one line, give up and use
# the whole text
layout = None
if layout is None:
layout = create_layout(text, style, hinting, max_width)
lines = layout.iter_lines()
first_line = next(lines, None)
second_line = next(lines, None)
resume_at = None if second_line is None else second_line.start_index
# Step #2: Don't hyphenize when it's not needed
if max_width is None:
# The first line can take all the place needed
return first_line_metrics(first_line, text, layout, resume_at)
first_line_width, _height = get_size(first_line)
if second_line is None and first_line_width <= max_width:
# The first line fits in the available width
return first_line_metrics(first_line, text, layout, resume_at)
# Step #3: Try to put the first word of the second line on the first line
if first_line_width <= max_width:
# The first line may have been cut too early by Pango
second_line_index = second_line.start_index
first_line_text = utf8_slice(text, slice(second_line_index))
second_line_text = utf8_slice(text, slice(second_line_index, None))
else:
# The first word is longer than the line, try to hyphenize it
first_line_text = ''
second_line_text = text
next_word = second_line_text.split(' ', 1)[0]
if next_word:
# next_word might fit without a space afterwards
new_first_line_text = first_line_text + next_word
layout.set_text(new_first_line_text)
lines = layout.iter_lines()
first_line = next(lines, None)
second_line = next(lines, None)
first_line_width, _height = get_size(first_line)
if second_line is None and first_line_width <= max_width:
# The next word fits in the first line, keep the layout
resume_at = len(new_first_line_text.encode('utf-8')) + 1
if resume_at == len(text.encode('utf-8')):
resume_at = None
return first_line_metrics(first_line, text, layout, resume_at)
elif first_line_text:
# We found something on the first line but we did not find a word on
# the next line, no need to hyphenate, we can keep the current layout
return first_line_metrics(first_line, text, layout, resume_at)
# Step #4: Try to hyphenize
hyphens = style.hyphens
lang = style.lang and pyphen.language_fallback(style.lang)
total, left, right = style.hyphenate_limit_chars
hyphenated = False
# Automatic hyphenation possible and next word is long enough
if hyphens not in ('none', 'manual') and lang and len(next_word) >= total:
first_line_width, _height = get_size(first_line)
space = max_width - first_line_width
if style.hyphenate_limit_zone.unit == '%':
limit_zone = max_width * style.hyphenate_limit_zone.value / 100.
else:
limit_zone = style.hyphenate_limit_zone.value
if space > limit_zone or space < 0:
# The next word does not fit, try hyphenation
dictionary_key = (lang, left, right, total)
dictionary = PYPHEN_DICTIONARY_CACHE.get(dictionary_key)
if dictionary is None:
dictionary = pyphen.Pyphen(lang=lang, left=left, right=right)
PYPHEN_DICTIONARY_CACHE[dictionary_key] = dictionary
for first_word_part, _ in dictionary.iterate(next_word):
hyphenated_first_line_text = (
first_line_text + first_word_part +
style.hyphenate_character)
temp_layout = create_layout(
hyphenated_first_line_text, style, hinting, max_width)
temp_lines = temp_layout.iter_lines()
temp_first_line = next(temp_lines, None)
temp_second_line = next(temp_lines, None)
if (temp_second_line is None and space >= 0) or space < 0:
hyphenated = True
resume_at = len(
(first_line_text + first_word_part).encode('utf8'))
layout = temp_layout
first_line = temp_first_line
second_line = temp_second_line
temp_first_line_width, _height = get_size(temp_first_line)
if temp_first_line_width <= max_width:
break
# Step 5: Try to break word if it's too long for the line
overflow_wrap = style.overflow_wrap
first_line_width, _height = get_size(first_line)
space = max_width - first_line_width
# If we can break words and the first line is too long
if overflow_wrap == 'break-word' and space < 0:
# Is it really OK to remove hyphenation for word-break ?
hyphenated = False
# TODO: Modify code to preserve W3C condition:
# "Shaping characters are still shaped as if the word were not broken"
# The way new lines are processed in this function (one by one with no
# memory of the last) prevents shaping characters (arabic, for
# instance) from keeping their shape when wrapped on the next line with
# pango layout. Maybe insert Unicode shaping characters in text ?
temp_layout = create_layout(text, style, hinting, max_width)
temp_layout.set_wrap(PANGO_WRAP_MODE['WRAP_WORD_CHAR'])
temp_lines = temp_layout.iter_lines()
temp_first_line = next(temp_lines, None)
temp_second_line = next(temp_lines, None)
temp_second_line_index = (
len(text.encode('utf-8')) if temp_second_line is None
else temp_second_line.start_index)
resume_at = temp_second_line_index
first_line_text = utf8_slice(text, slice(temp_second_line_index))
layout = create_layout(first_line_text, style, hinting, max_width)
lines = layout.iter_lines()
first_line = next(lines, None)
return first_line_metrics(first_line, text, layout, resume_at, hyphenated)
def line_widths(text, style, enable_hinting, width):
"""Return the width for each line."""
layout = create_layout(text, style, enable_hinting, width)
for line in layout.iter_lines():
width, _height = get_size(line)
yield width
def show_first_line(context, pango_layout, hinting):
"""Draw the given ``line`` to the Cairo ``context``."""
context = ffi.cast('cairo_t *', context._pointer)
if hinting:
pangocairo.pango_cairo_update_layout(context, pango_layout.layout)
# Set an infinite width as we don't want to break lines when drawing, the
# lines have already been split and the size may differ for example because
# of hinting.
pango.pango_layout_set_width(pango_layout.layout, -1)
pangocairo.pango_cairo_show_layout_line(
context, next(pango_layout.iter_lines()))
| aESeguridad/GERE | venv/lib/python2.7/site-packages/weasyprint/text.py | Python | gpl-3.0 | 22,476 |
# -*- coding: utf-8 -*-
# IDD3 - Propositional Idea Density from Dependency Trees
# Copyright (C) 2014-2015 Andre Luiz Verucci da Cunha
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from idd3.base import *
config = Config()
all_transformations = []
all_rulesets = []
def use_language(module):
"""Configure idd3's global variables (config, all_transformations,
and all_rulesets) using those from module.
"""
global config, all_transformations, all_rulesets
for key, value in module.config.items():
config[key] = value
while len(all_transformations):
del all_transformations[0]
all_transformations.extend(module.all_transformations)
while len(all_rulesets):
del all_rulesets[0]
all_rulesets.extend(module.all_rulesets)
from idd3 import rules
from idd3 import transform
| andrecunha/idd3 | idd3/__init__.py | Python | gpl-3.0 | 1,432 |
# Copyright (C) 2011-2014 2ndQuadrant Italia (Devise.IT S.r.L.)
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
import pytest
from barman import xlog
# noinspection PyMethodMayBeStatic
class Test(object):
def test_encode_segment_name(self):
assert xlog.encode_segment_name(
0, 0, 0) == '000000000000000000000000'
assert xlog.encode_segment_name(
1, 1, 1) == '000000010000000100000001'
assert xlog.encode_segment_name(
10, 10, 10) == '0000000A0000000A0000000A'
assert xlog.encode_segment_name(
17, 17, 17) == '000000110000001100000011'
assert xlog.encode_segment_name(
0, 2, 1) == '000000000000000200000001'
assert xlog.encode_segment_name(
1, 0, 2) == '000000010000000000000002'
assert xlog.encode_segment_name(
2, 1, 0) == '000000020000000100000000'
def test_decode_segment_name(self):
assert xlog.decode_segment_name(
'000000000000000000000000') == [0, 0, 0]
assert xlog.decode_segment_name(
'000000010000000100000001') == [1, 1, 1]
assert xlog.decode_segment_name(
'0000000A0000000A0000000A') == [10, 10, 10]
assert xlog.decode_segment_name(
'000000110000001100000011') == [17, 17, 17]
assert xlog.decode_segment_name(
'000000000000000200000001') == [0, 2, 1]
assert xlog.decode_segment_name(
'000000010000000000000002') == [1, 0, 2]
assert xlog.decode_segment_name(
'000000020000000100000000') == [2, 1, 0]
assert xlog.decode_segment_name(
'00000001000000000000000A.00000020.backup') == [1, 0, 10]
assert xlog.decode_segment_name(
'00000001.history') == [1, None, None]
with pytest.raises(xlog.BadXlogSegmentName):
xlog.decode_segment_name('00000000000000000000000')
with pytest.raises(xlog.BadXlogSegmentName):
xlog.decode_segment_name('0000000000000000000000000')
with pytest.raises(xlog.BadXlogSegmentName):
xlog.decode_segment_name('000000000000X00000000000')
def test_enumerate_segments(self):
assert tuple(
xlog.enumerate_segments(
'0000000100000001000000FD',
'000000010000000200000002',
90200
)) == (
'0000000100000001000000FD',
'0000000100000001000000FE',
'000000010000000200000000',
'000000010000000200000001',
'000000010000000200000002')
assert tuple(
xlog.enumerate_segments(
'0000000100000001000000FD',
'0000000100000001000000FF',
90200
)) == (
'0000000100000001000000FD',
'0000000100000001000000FE')
assert tuple(
xlog.enumerate_segments(
'0000000100000001000000FD',
'000000010000000200000002',
90300
)) == (
'0000000100000001000000FD',
'0000000100000001000000FE',
'0000000100000001000000FF',
'000000010000000200000000',
'000000010000000200000001',
'000000010000000200000002')
assert tuple(
xlog.enumerate_segments(
'0000000100000001000000FD',
'0000000100000001000000FF',
90300
)) == (
'0000000100000001000000FD',
'0000000100000001000000FE',
'0000000100000001000000FF',)
def test_hash_dir(self):
assert xlog.hash_dir(
'000000000000000200000001') == '0000000000000002'
assert xlog.hash_dir(
'000000010000000000000002') == '0000000100000000'
assert xlog.hash_dir(
'000000020000000100000000') == '0000000200000001'
assert xlog.hash_dir(
'00000001.history') == ''
assert xlog.hash_dir(
'00000002.history') == ''
assert xlog.hash_dir(
'00000001000000000000000A.00000020.backup') == '0000000100000000'
assert xlog.hash_dir(
'00000002000000050000000A.00000020.backup') == '0000000200000005'
with pytest.raises(xlog.BadXlogSegmentName):
xlog.hash_dir('00000000000000000000000')
with pytest.raises(xlog.BadXlogSegmentName):
xlog.hash_dir('0000000000000000000000000')
with pytest.raises(xlog.BadXlogSegmentName):
xlog.hash_dir('000000000000X00000000000') | huddler/pgbarman | tests/test_xlog.py | Python | gpl-3.0 | 5,245 |
# Generated by Django 2.2.2 on 2021-02-11 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scoping', '0337_auto_20210126_2126'),
]
operations = [
migrations.AddField(
model_name='category',
name='selection_tiers',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='docusercat',
name='selection_tier',
field=models.IntegerField(default=1),
),
]
| mcallaghan/tmv | BasicBrowser/scoping/migrations/0338_auto_20210211_1810.py | Python | gpl-3.0 | 556 |
"""empty message
Revision ID: cfc5f3fd5efe
Revises: 2040f0c804b0
Create Date: 2018-06-07 18:52:31.059396
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cfc5f3fd5efe'
down_revision = '2040f0c804b0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('notes', sa.Column('creator_id', sa.Integer(), nullable=True))
op.drop_constraint(u'notes_user_id_fkey', 'notes', type_='foreignkey')
op.create_foreign_key(None, 'notes', 'users', ['creator_id'], ['id'])
op.drop_column('notes', 'user_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('notes', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'notes', type_='foreignkey')
op.create_foreign_key(u'notes_user_id_fkey', 'notes', 'users', ['user_id'], ['id'])
op.drop_column('notes', 'creator_id')
# ### end Alembic commands ###
| lucyparsons/OpenOversight | OpenOversight/migrations/versions/cfc5f3fd5efe_.py | Python | gpl-3.0 | 1,091 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author:
# Anna Chabuda <[email protected]>
#
import pygame
pygame.mixer.init()
import os.path, time
from pygame.locals import *
from arrow.draw_arrow import DrawArrow
from arrow.draw_wii_arrow import DrawWiiArrow
from screen_text.screen_text import *
from screen_text.render_textrect import render_textrect
from constants.constants_arrow import ARROW_PROPORTION, ARROW_SIZE, ARROW_COLORS_LEVELS, ARROW_LEVELS_LINES
from constants.constants_game import SIZE_OBJECT, SCREEN_SIZE, RECT_TEXT_SIZE
from obci.configs import settings
from obci.acquisition import acquisition_helper
GAME_DATA_PATH = os.path.join(settings.MAIN_DIR, 'exps/ventures/maze_game/game_data')
class MazeScreen(object):
def __init__(self, time_board_display, number_of_levels, session_number, session_type, session_condition):
super(MazeScreen, self).__init__()
pygame.init()
self.session_type = session_type
self.session_condition = session_condition
self.size_object = SIZE_OBJECT
self.arrow_proportion = ARROW_PROPORTION
self.arrow_size = ARROW_SIZE
self.arrow_colors_levels = ARROW_COLORS_LEVELS
self.arrow_levels_lines = ARROW_LEVELS_LINES
self.screen_size = SCREEN_SIZE
self.time_board_display = time_board_display
self.session_number = session_number
self.number_of_levels = number_of_levels
self.screen = pygame.display.set_mode(self.screen_size, FULLSCREEN)
pygame.display.init()
self._load_font()
self._load_sound()
self._load_image()
self._init_arrows()
self.animation_offset_x = 0
self.animation_offset_y = 0
self.text_rect = pygame.Rect(RECT_TEXT_SIZE)
def _load_font(self):
pygame.font.init()
self.font_game = pygame.font.Font(os.path.join(GAME_DATA_PATH,'impact.ttf'), 18)
self.font_text = pygame.font.Font(os.path.join(GAME_DATA_PATH,'impact.ttf'), 24)
def _load_sound(self):
self.hit_wall_sound = pygame.mixer.Sound(os.path.join(GAME_DATA_PATH,'Boom.wav'))
self.fall_sound = pygame.mixer.Sound(os.path.join(GAME_DATA_PATH,'Fall.wav'))
self.enter_sound = pygame.mixer.Sound(os.path.join(GAME_DATA_PATH, 'Enter.wav'))
def _load_image(self):
self.ball = pygame.image.load(os.path.join(GAME_DATA_PATH,'ball.png'))
self.block = pygame.image.load(os.path.join(GAME_DATA_PATH,'block.gif'))
self.floor_block = pygame.image.load(os.path.join(GAME_DATA_PATH,'floor.gif'))
self.hole_block = pygame.image.load(os.path.join(GAME_DATA_PATH,'hole.gif'))
if self.session_condition in ['motor', 'key_motor']:
self.start_block = pygame.image.load(os.path.join(GAME_DATA_PATH,'start_path.gif'))
else:
self.start_block = pygame.image.load(os.path.join(GAME_DATA_PATH,'start.gif'))
self.finish_block = pygame.image.load(os.path.join(GAME_DATA_PATH,'finish.gif'))
self.black_screen = pygame.image.load(os.path.join(GAME_DATA_PATH,'blank.gif'))
self.floor_path_block = pygame.image.load(os.path.join(GAME_DATA_PATH,'floor_path.gif'))
self.floor_active_path_block = pygame.image.load(os.path.join(GAME_DATA_PATH,'floor_path_2.gif'))
def _init_arrows(self):
if self.session_condition in ['cognitive', 'key_motor']:
self.arrow_right = DrawArrow(self.screen, 'right', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
self.arrow_left = DrawArrow(self.screen, 'left', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
self.arrow_up = DrawArrow(self.screen, 'up', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
self.arrow_down = DrawArrow(self.screen, 'down', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
elif self.session_condition in ['motor', 'cognitive_motor']:
self.arrow_right = DrawWiiArrow(self.screen, 'right', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
self.arrow_left = DrawWiiArrow(self.screen, 'left', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
self.arrow_up = DrawWiiArrow(self.screen, 'up', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
self.arrow_down = DrawWiiArrow(self.screen, 'down', self.arrow_colors_levels,
self.arrow_proportion, self.arrow_size, self.arrow_levels_lines)
def get_arrow(self, type_):
if type_ == 'right':
return self.arrow_right
elif type_ == 'left':
return self.arrow_left
elif type_ == 'up':
return self.arrow_up
elif type_ == 'down':
return self.arrow_down
def load_wii_level_arrow_proportion(self, direction, proportion):
self.get_arrow(direction).set_arrow_proportion(proportion)
def _calc_grid_offsets(self, level):
self.x_offset = (self.screen_size[0] -(((len(level))*self.size_object)))/2
self.y_offset = (self.screen_size[1] - (((len(level[0]))*self.size_object)))/2
def _get_position(self, xm, ym):
x_position = (xm*self.size_object) + self.x_offset
y_position = (ym*self.size_object) + self.y_offset
return (x_position, y_position)
def draw_game(self, level_array, ball_position_x, ball_position_y, current_level,
level_time, session_time, path, active_path):
self._draw_level(level_array, path, active_path)
self._draw_ball(ball_position_x, ball_position_y)
self._draw_level_info(current_level, level_time, session_time)
self._display()
def draw_game_with_arrow(self, arrow_type, level_array, ball_position_x, ball_position_y,
current_level, level_time, session_time, path, active_path):
self._draw_level(level_array, path, active_path)
self._draw_ball(ball_position_x, ball_position_y)
self._draw_level_info(current_level, level_time, session_time)
self._draw_arrow(arrow_type, ball_position_x, ball_position_y)
self._display()
def draw_game_with_arrow_update(self, arrow_type, arrow_level, level_array, ball_position_x,
ball_position_y, current_level, level_time, session_time, path, active_path):
self._draw_level(level_array, path, active_path)
self._draw_ball(ball_position_x, ball_position_y)
self._draw_level_info(current_level, level_time, session_time)
self.get_arrow(arrow_type).draw_level(arrow_level)
self._display()
def draw_game_with_wii_arrow(self, arrow_type, arrow_level, arrow_area_param, level_array, ball_position_x,
ball_position_y, current_level, level_time, session_time, path, active_path):
self._draw_level(level_array, path, active_path)
self._draw_ball(ball_position_x, ball_position_y)
self._draw_level_info(current_level, level_time, session_time)
self._draw_arrow(arrow_type, ball_position_x, ball_position_y)
self.get_arrow(arrow_type).draw_level(arrow_level, arrow_area_param)
self._display()
def _draw_level(self, level_array, path, active_path):
self.screen.blit(self.black_screen, (0,0))
self._calc_grid_offsets(level_array)
for ym in range(len(level_array)):
for xm in range(len(level_array[0])):
if level_array[ym][xm] == 0:
self.screen.blit(self.floor_block, self._get_position(xm, ym))
elif level_array[ym][xm] == 1:
self.screen.blit(self.block, self._get_position(xm, ym))
elif level_array[ym][xm] == 2:
self.screen.blit(self.hole_block, self._get_position(xm, ym))
elif level_array[ym][xm] == 3:
self.screen.blit(self.start_block, self._get_position(xm, ym))
elif level_array[ym][xm] == 4:
self.screen.blit(self.finish_block, self._get_position(xm, ym))
if self.session_condition in ['motor', 'key_motor']:
for ym, xm in path:
if not (level_array[ym][xm] in [3, 4]):
if (ym, xm) in active_path:
self.screen.blit(self.floor_active_path_block, self._get_position(xm, ym))
else:
self.screen.blit(self.floor_path_block, self._get_position(xm, ym))
else:
pass
def _draw_ball(self, ball_x, ball_y):
x_position, y_position = self._get_position(ball_x, ball_y)
self.screen.blit(self.ball, (x_position+self._get_animation_offset_x(),
y_position+self._get_animation_offset_y()))
def _draw_level_info(self, current_level, level_time, session_time):
level_text = self.font_game.render('{}: {}/{}'.format('POZIOM', current_level, self.number_of_levels),
1,
(250, 250, 250))
self.screen.blit(level_text, (0, 20))
#level_text = self.font_game.render('{}: {}'.format('POZIOM', level_time),
# 1,
# (250, 250, 250))
#self.screen.blit(level_text, (0, 40))
if self.session_type == 'experiment':
level_text = self.font_game.render('{}: {}'.format('CZAS', session_time),
1,
(250, 250, 250))
self.screen.blit(level_text, (0, 40))
def _draw_arrow(self, type_, ball_x, ball_y):
if type_ == 'right':
self._draw_arrow_right(ball_x, ball_y)
elif type_ == 'left':
self._draw_arrow_left(ball_x, ball_y)
elif type_ == 'up':
self._draw_arrow_up(ball_x, ball_y)
elif type_ == 'down':
self._draw_arrow_down(ball_x, ball_y)
def _draw_arrow_right(self, ball_x, ball_y):
x_position, y_position = self._get_position(ball_x, ball_y)
self.arrow_right.init_position((x_position+self.size_object, y_position+int(0.5*self.size_object)))
self.arrow_right.init_draw_arrow()
def _draw_arrow_left(self, ball_x, ball_y):
x_position, y_position = self._get_position(ball_x, ball_y)
self.arrow_left.init_position((x_position, y_position+int(0.5*self.size_object)))
self.arrow_left.init_draw_arrow()
def _draw_arrow_up(self, ball_x, ball_y):
x_position, y_position = self._get_position(ball_x, ball_y)
self.arrow_up.init_position((x_position+int(self.size_object*0.5), y_position))
self.arrow_up.init_draw_arrow()
def _draw_arrow_down(self, ball_x, ball_y):
x_position, y_position = self._get_position(ball_x, ball_y)
self.arrow_down.init_position((x_position+int(self.size_object*0.5), y_position+self.size_object))
self.arrow_down.init_draw_arrow()
def play_sound(self, action):
if action == 'win':
self.enter_sound.play()
elif action == 'level_down':
self.fall_sound.play()
elif action == 'hit_wall':
self.hit_wall_sound.play()
elif action == 'fall':
self.fall_sound.play()
def _display_screen_helper(self, image, text='', color=(250, 250, 250)):
self.screen.blit(self.black_screen, (0, 0))
rendered_text = render_textrect(text, self.font_text, self.text_rect, color, (0, 0, 0), 1)
text = self.font_text.render(text ,1, color)
self.screen.blit(rendered_text, self.text_rect.topleft)
def display_screen(self, action):
if action == 'win':
self._display_screen_helper(text=get_win_level_text(self.session_type, self.session_condition), image=self.black_screen)
self._display()
time.sleep(self.time_board_display+1)
elif action == 'start':
self._display_screen_helper(text=get_start_session_text(self.session_number, self.session_type, self.session_condition), image=self.black_screen)
self._display()
elif action == 'repeat_level_1':
self._display_screen_helper(text=get_repeat_level_text(1, self.session_type, self.session_condition), image=self.black_screen)
self._display()
time.sleep(self.time_board_display)
elif action == 'repeat_level_2':
self._display_screen_helper(text=get_repeat_level_text(2, self.session_type, self.session_condition), image=self.black_screen)
self._display()
time.sleep(self.time_board_display)
elif action == 'level_down':
self._display_screen_helper(text=get_repeat_level_text(3, self.session_type, self.session_condition), image=self.black_screen)
self._display()
time.sleep(self.time_board_display+2)
elif action == 'level_timeout':
self._display_screen_helper(text=get_timeout_level(self.session_type, self.session_condition), image=self.black_screen)
self._display()
time.sleep(self.time_board_display)
elif action == 'pause':
self._display_screen_helper(text=get_pause_text(self.session_type, self.session_condition), image=self.black_screen)
self._display()
elif action == 'finish':
self._display_screen_helper(text=get_finish_session_text(self.session_number, self.session_type, self.session_condition), image=self.black_screen)
self._display()
time.sleep(self.time_board_display)
elif action == 'instruction1':
self._display_screen_helper(text=get_instruction_1(self.session_type, self.session_condition), image=self.black_screen)
self._display()
elif action == 'instruction2':
self._display_screen_helper(text=get_instruction_2(self.session_type, self.session_condition), image=self.black_screen)
self._display()
elif action == 'exit':
self._display_screen_helper(text=get_exit_text(self.session_type, self.session_condition), image=self.black_screen)
self._display()
def _display(self):
pygame.display.flip()
def _get_animation_offset_x(self):
return self.animation_offset_x
def set_animation_offset_x(self, value):
self.animation_offset_x = value
def update_animation_offset_x(self, value):
self.animation_offset_x += value
def _get_animation_offset_y(self):
return self.animation_offset_y
def set_animation_offset_y(self, value):
self.animation_offset_y = value
def update_animation_offset_y(self, value):
self.animation_offset_y += value
| BrainTech/openbci | obci/exps/ventures/maze_game/maze_screen.py | Python | gpl-3.0 | 16,119 |
#############################################################################
# This file holds some global variables for some of the input options.
# These global parameters should be read only -- they are not modified anywhere
# else in the code except when reading the input options.
#
# This dictionary will also be used to track output.
#############################################################################
import sys
import timeit
import lib.core as CORE
#############################################################################
class StrictDict(dict):
# This prevents additional keys from being added to the global params dict in
# any other part of the code, just to help me limit it's scope
# https://stackoverflow.com/questions/32258706/how-to-prevent-key-creation-through-dkey-val
def __setitem__(self, key, value):
if key not in self:
raise KeyError("{} is not a legal key of this StricDict".format(repr(key)));
dict.__setitem__(self, key, value);
#############################################################################
def init():
script_name = "default-script-name";
globs_init = {
'version' : 'Beta 1.0',
'releasedate' : "October 2021",
'authors' : "",
'doi' : '',
'http' : '',
'github' : '',
'starttime' : timeit.default_timer(),
'startdatetime' : CORE.getOutTime(),
# Meta info
'pyver' : ".".join(map(str, sys.version_info[:3])),
# System info
'call' : "",
# Script call info
'input-file' : False,
'input-dir' : False,
'other-file' : False,
'outdir' : False,
'output-file' : False,
# Input options
'results' : {},
# The main dictionary to track results
'run-name' : script_name,
'logfilename' : script_name + '.errlog',
'logdir' : '',
'overwrite' : False,
# I/O options
'num-procs' : 1,
# Number of processes to use
'info' : False,
'dryrun' : False,
'quiet' : False,
# Other user options
'pad' : 82,
'endprog' : False,
'exit-code' : 0,
'log-v' : 1,
'stats' : True,
'progstarttime' : 0,
'stepstarttime' : 0,
'pids' : "",
'psutil' : False,
'qstats' : False,
'norun' : False,
'debug' : False,
'nolog' : False,
# Internal stuff
}
globs_init['logfilename'] = script_name + "-" + globs_init['startdatetime'] + ".errlog";
# Add the runtime to the error log file name.
globs = StrictDict(globs_init);
# Restrict the dict from having keys added to it after this
return globs;
############################################################################# | gwct/core | python/python-template/lib/params.py | Python | gpl-3.0 | 2,831 |
#!/usr/local/bin/python3
__author__ = 'apaeffgen'
# -*- coding: utf-8 -*-
# This file is part of Panconvert.
#
# Panconvert is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Panconvert is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Panconvert. If not, see <http://www.gnu.org/licenses/>.
from PyQt5 import QtWidgets
from PyQt5.QtCore import QSettings
from PyQt5 import QtCore
from PyQt5.QtCore import QPoint, QSize
from source.gui.panconvert_diag_prefpane import Ui_DialogPreferences
from distutils.util import strtobool as str2bool
import platform, os
global path_pandoc, path_dialog
def strtobool(input):
"""
safe strtobool : if input is a boolean
it return the input
"""
if isinstance(input,bool):
return input
return str2bool(input)
# dictionary for all the languages that have translations
lang = {}
lang['en'] = 'English'
lang['de'] = 'Deutsch'
lang['es'] = 'Español'
lang['fr'] = 'Français'
class PreferenceDialog(QtWidgets.QDialog):
global path_pandoc, path_dialog
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_DialogPreferences()
self.ui.setupUi(self)
self.ui.ButtonSave.clicked.connect(self.settings)
self.ui.ButtonSave_2.clicked.connect(self.settings)
self.ui.ButtonCancel.clicked.connect(self.cancel_dialog)
self.ui.ButtonCancel_2.clicked.connect(self.cancel_dialog)
self.ui.ButtonPandocPath.clicked.connect(self.DirectoryPandoc)
self.ui.ButtonMarkdownPath.clicked.connect(self.DirectoryMarkdown)
self.ui.ButtonOpenSavePath.clicked.connect(self.DirectoryOpenSave)
#Initialize Settings
settings = QSettings('Pandoc', 'PanConvert')
#Language Settings
for longLang in lang.values():
self.ui.comboBoxLanguageSelector.addItem(longLang)
default_language = settings.value('default_language')
self.ui.comboBoxLanguageSelector.setCurrentText(default_language)
self.ui.comboBoxLanguageSelector.currentIndexChanged.connect(self.SetLanguage)
#Checkbox Size of Main Window and DockWindow
Window_Size = settings.value('Window_Size', True)
Dock_Size = settings.value('Dock_Size', True)
Dialog_Size = settings.value('Dialog_Size', True)
Hide_Batch = settings.value('Hide_Batch', True)
#Checkbox Gui Old / New / BatchMode
Button_OldGui = settings.value('Button_OldGui', False)
Button_NewGui = settings.value('Button_NewGui', True)
#Standard Tab of the New Gui
Tab_StandardConverter = settings.value('Tab_StandardConverter', True)
Tab_ManualConverter = settings.value('Tab_ManualConverter', False)
Tab_BatchConverter = settings.value('Tab_BatchConverter', False)
#Size of Dialog Windows
self.resize(settings.value("Preference_size", QSize(270, 225)))
self.move(settings.value("Preference_pos", QPoint(50, 50)))
#Paths and Parameters
path_pandoc = settings.value('path_pandoc')
self.ui.Pandoc_Path.insert(path_pandoc)
path_multimarkdown = settings.value('path_multimarkdown')
self.ui.Markdown_Path.insert(path_multimarkdown)
path_dialog = settings.value('path_dialog')
self.ui.Dialog_Path.insert(path_dialog)
fromParameter = settings.value('fromParameter')
self.ui.FromParameter.insert(fromParameter)
toParameter = settings.value('toParameter')
self.ui.ToParameter.insert(toParameter)
xtraParameter = settings.value('xtraParameter')
self.ui.XtraParameter.insert(xtraParameter)
#Buffer Save Parameters
BufferSaveSuffix = settings.value('BufferSaveSuffix')
self.ui.BufferSaveSuffix.insert(BufferSaveSuffix)
BufferSaveName = settings.value('BufferSaveName')
self.ui.BufferSaveName.insert(BufferSaveName)
#Checkboxes
Standard_Conversion = settings.value('Standard_Conversion', False)
Batch_Conversion = settings.value('Batch_Conversion', False)
From_Markdown = settings.value('From_Markdown', False)
From_Html = settings.value('From_Html', False)
From_Latex = settings.value('From_Latex', False)
From_Opml = settings.value('From_Opml', False)
To_Markdown = settings.value('To_Markdown', False)
To_Html = settings.value('To_Html', False)
To_Latex = settings.value('To_Latex', False)
To_Opml = settings.value('To_Opml', False)
To_Lyx = settings.value('To_Lyx', False)
if settings.value('From_Markdown') is not None:
if platform.system() == 'Darwin':
self.ui.ButtonFromMarkdown.setChecked(From_Markdown)
self.ui.ButtonFromHtml.setChecked(From_Html)
self.ui.ButtonFromLatex.setChecked(From_Latex)
self.ui.ButtonFromOpml.setChecked(From_Opml)
self.ui.ButtonToMarkdown.setChecked(To_Markdown)
self.ui.ButtonToHtml.setChecked(To_Html)
self.ui.ButtonToLatex.setChecked(To_Latex)
self.ui.ButtonToOpml.setChecked(To_Opml)
self.ui.ButtonToLyx.setChecked(To_Lyx)
self.ui.StandardConversion.setChecked(Standard_Conversion)
self.ui.BatchConversion.setChecked(Batch_Conversion)
self.ui.Window_Size.setChecked(Window_Size)
self.ui.Dock_Size.setChecked(Dock_Size)
self.ui.Dialog_Size.setChecked(Dialog_Size)
self.ui.Button_OldGui.setChecked(Button_OldGui)
self.ui.Button_NewGui.setChecked(Button_NewGui)
self.ui.Tab_StandardConverter.setChecked(Tab_StandardConverter)
self.ui.Tab_ManualConverter.setChecked(Tab_ManualConverter)
self.ui.Hide_Batch.setChecked(Hide_Batch)
else:
self.ui.ButtonFromMarkdown.setChecked(strtobool(From_Markdown))
self.ui.ButtonFromHtml.setChecked(strtobool(From_Html))
self.ui.ButtonFromLatex.setChecked(strtobool(From_Latex))
self.ui.ButtonFromOpml.setChecked(strtobool(From_Opml))
self.ui.ButtonToMarkdown.setChecked(strtobool(To_Markdown))
self.ui.ButtonToHtml.setChecked(strtobool(To_Html))
self.ui.ButtonToLatex.setChecked(strtobool(To_Latex))
self.ui.ButtonToOpml.setChecked(strtobool(To_Opml))
self.ui.ButtonToLyx.setChecked(strtobool(To_Lyx))
self.ui.StandardConversion.setChecked(strtobool(Standard_Conversion))
self.ui.BatchConversion.setChecked(strtobool(Batch_Conversion))
self.ui.Window_Size.setChecked(strtobool(Window_Size))
self.ui.Dock_Size.setChecked(strtobool(Dock_Size))
self.ui.Dialog_Size.setChecked(strtobool(Dialog_Size))
self.ui.Button_OldGui.setChecked(strtobool(Button_OldGui))
self.ui.Button_NewGui.setChecked(strtobool(Button_NewGui))
self.ui.Tab_StandardConverter.setChecked(strtobool(Tab_StandardConverter))
self.ui.Tab_ManualConverter.setChecked(strtobool(Tab_ManualConverter))
self.ui.Hide_Batch.setChecked(strtobool(Hide_Batch))
def cancel_dialog(self):
PreferenceDialog.close(self)
def settings(self):
settings = QSettings('Pandoc', 'PanConvert')
settings.setValue('Window_Size', self.ui.Window_Size.isChecked())
settings.setValue('Dock_Size', self.ui.Dock_Size.isChecked())
settings.setValue('Dialog_Size', self.ui.Dialog_Size.isChecked())
settings.setValue('Hide_Batch', self.ui.Hide_Batch.isChecked())
settings.setValue('Button_OldGui', self.ui.Button_OldGui.isChecked())
settings.setValue('Button_NewGui', self.ui.Button_NewGui.isChecked())
settings.setValue('Tab_StandardConverter', self.ui.Tab_StandardConverter.isChecked())
settings.setValue('Tab_ManualConverter', self.ui.Tab_ManualConverter.isChecked())
settings.setValue('path_pandoc', self.ui.Pandoc_Path.text())
settings.setValue('path_multimarkdown', self.ui.Markdown_Path.text())
settings.setValue('path_dialog', self.ui.Dialog_Path.text())
settings.setValue('BufferSaveSuffix', self.ui.BufferSaveSuffix.text())
settings.setValue('BufferSaveName', self.ui.BufferSaveName.text())
settings.setValue('fromParameter', self.ui.FromParameter.text())
settings.setValue('toParameter', self.ui.ToParameter.text())
settings.setValue('xtraParameter', self.ui.XtraParameter.text())
settings.setValue('Standard_Conversion', self.ui.StandardConversion.isChecked())
settings.setValue('Batch_Conversion', self.ui.BatchConversion.isChecked())
settings.setValue('From_Markdown', self.ui.ButtonFromMarkdown.isChecked())
settings.setValue('From_Html', self.ui.ButtonFromHtml.isChecked())
settings.setValue('From_Latex', self.ui.ButtonFromLatex.isChecked())
settings.setValue('From_Opml', self.ui.ButtonFromOpml.isChecked())
settings.setValue('To_Markdown', self.ui.ButtonToMarkdown.isChecked())
settings.setValue('To_Html', self.ui.ButtonToHtml.isChecked())
settings.setValue('To_Latex', self.ui.ButtonToLatex.isChecked())
settings.setValue('To_Opml', self.ui.ButtonToOpml.isChecked())
settings.setValue('To_Lyx', self.ui.ButtonToLyx.isChecked())
Dialog_Size = settings.value('Dialog_Size')
if Dialog_Size is True or Dialog_Size == 'true':
settings.setValue("Preference_size", self.size())
settings.setValue("Preference_pos", self.pos())
settings.sync()
settings.status()
PreferenceDialog.close(self)
def DirectoryPandoc(self):
self.ui.Pandoc_Path.clear()
fd = QtWidgets.QFileDialog(self)
fd.setDirectory(QtCore.QDir.homePath())
PandocDirectory = fd.getExistingDirectory()
self.ui.Pandoc_Path.insert(PandocDirectory)
def DirectoryMarkdown(self):
self.ui.Markdown_Path.clear()
fd = QtWidgets.QFileDialog(self)
fd.setDirectory(QtCore.QDir.homePath())
MarkdownDirectory = fd.getExistingDirectory()
self.ui.Markdown_Path.insert(MarkdownDirectory)
def DirectoryOpenSave(self):
self.ui.Dialog_Path.clear()
fd = QtWidgets.QFileDialog(self)
fd.setDirectory(QtCore.QDir.homePath())
OpenSaveDirectory = fd.getExistingDirectory()
self.ui.Dialog_Path.insert(OpenSaveDirectory)
def SetLanguage(self):
settings = QSettings('Pandoc', 'PanConvert')
Longname = self.ui.comboBoxLanguageSelector.currentText()
# asserting one code per language
codeLang = [key for key, value in lang.items() if value == Longname][0]
settings.setValue('default_language', codeLang)
settings.sync()
settings.status()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
myapp = PreferenceDialog()
myapp.show()
myapp.exec_()
| apaeffgen/PanConvert | source/dialogs/dialog_preferences.py | Python | gpl-3.0 | 11,698 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from setuptools import setup, find_packages
import mylittlebudget
with open('README.md') as f:
long_description = f.read()
with open('requirements.txt') as f:
install_requires = f.read()
setup(name='mylittlebudget',
version='0.1',
author='Florian Briand',
license='GPLv3',
long_description=long_description,
#package_dir={'': 'src'},
packages=find_packages(),
entry_points = {
'console_scripts': [
'mylittlebudget = mylittlebudget.core:main'
]
},
install_requires=install_requires,
#scripts=['scripts/mylittlebudget.py']
) | Nabellaleen/MyLittleBudget | setup.py | Python | gpl-3.0 | 674 |
"""
Platform dependend OS calls - linux edition
"""
import subprocess
import logging
LOGGER=logging.getLogger(__name__)
# Check if xdotool is available
xdotool=None
p = subprocess.run(['which', 'xdotool'], stdout=subprocess.PIPE)
xdotool = p.stdout.rstrip()
if p.returncode != 0 or p.stdout.rstrip() == '':
LOGGER.warn('"xdotool" is not available. Change the active window might not work. To resolve this please install "xdotool"')
xdotool = None
def get_foreground_window():
""" Returns the name (or pointer, or whatever is
required as set_foreground_window argument)
of the currently active window
"""
if not xdotool:
return None
cmd = ['xdotool', 'getwindowfocus', 'getwindowname']
p = subprocess.run(cmd, stdout=subprocess.PIPE)
return p.stdout.rstrip()
def set_foreground_window(name):
""" Changes the currently active window """
if not xdotool:
return None
cmd = ['xdotool', 'search', '--name', name, 'windowactivate']
subprocess.run(cmd)
def find_icon_by_name(iconName):
""" Returns the content and content type of an Icon by name or path. """
return (None, None)
# vim: set fenc=utf-8 ts=4 sw=4 noet :
| Cybso/gameplay | gameplay/platform/linux.py | Python | gpl-3.0 | 1,137 |
def filfun():
return "returning from file1"
| nageswau/python_class | pack/file1.py | Python | gpl-3.0 | 48 |
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import spearmint_lite
import os
import sys
os.chdir('../bci_framework')
sys.path.append('./BCI_Framework')
import Main
import Configuration_BCI
import Single_Job_runner as SJR
import numpy as np
import itertools
from time import sleep
import sklearn
import BO_BCI
if __name__ == '__main__':
mypath = '../Candidates'
chooser_modules = ["GPEIOptChooser", "GPEIOptChooser1", "GPEIOptChooser2", "GPEIOptChooser3", "GPEIOptChooser4", "RandomForestEIChooser", "RandomForestEIChooser1", "RandomForestEIChooser2", "RandomForestEIChooser3", "RandomForestEIChooser4", "RandomChooser","RandomChooser1", "RandomChooser2", "RandomChooser3", "RandomChooser4"]
datasets = ['BCICIII3b', 'BCICIV2b', 'BCICIV2a']
optimization_types_dict = {('BCICIII3b','BP'):[2], ('BCICIII3b','morlet'):[1], ('BCICIV2b','BP') : [2], ('BCICIV2b','morlet') : [1], ('BCICIV2a','BP') : [4], ('BCICIV2a','morlet') : [3]}
features = ['BP']
for dataset in datasets:
config = Configuration_BCI.Configuration_BCI('BCI_Framework', dataset)
subjects = config.configuration['subject_names_str']
for feature in features:
for chooser_module in chooser_modules:
for subject in subjects:
file_name = "results_" + str(optimization_types_dict[(dataset,feature)][0]) + '_' + str(chooser_module) + '_LogisticRegression_' + feature + '.dat_' + subject
# print file_name
with open(os.path.join(mypath, file_name)) as cand_file:
all_candidates = cand_file.readlines()
if len(all_candidates) < 40:
print len(all_candidates), file_name
| lol/BCI-BO-old | check_if_all_jobs_finished.py | Python | gpl-3.0 | 1,803 |
from django.shortcuts import render
from django.views.generic.base import TemplateView
from django.views.generic import FormView
from lsmentries.forms import EntryAddForm
class AddView(FormView):
template_name="lsmentries/add.html"
form_class = EntryAddForm
success_url = "/manage"
| acuriel/lsmdico | lsmentries/views.py | Python | gpl-3.0 | 297 |
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from rootpy.testdata import get_file
from nose.tools import assert_equal, with_setup
from nose.plugins.skip import SkipTest
from tempfile import mkdtemp
import os
import shutil
TEMPDIR = None
def setup_func():
global TEMPDIR
TEMPDIR = mkdtemp()
def teardown_func():
shutil.rmtree(TEMPDIR)
@with_setup(setup_func, teardown_func)
def test_root2hdf5():
try:
import tables
except ImportError:
raise SkipTest
from rootpy.root2hdf5 import root2hdf5, tables_open
rfile = get_file('test_tree.root')
hfilename = os.path.join(TEMPDIR, 'out.h5')
root2hdf5(rfile, hfilename)
hfile = tables_open(hfilename)
assert_equal(len(hfile.root.test), 1000)
hfile.close()
@with_setup(setup_func, teardown_func)
def test_root2hdf5_chunked():
try:
import tables
except ImportError:
raise SkipTest
from rootpy.root2hdf5 import root2hdf5, tables_open
rfile = get_file('test_tree.root')
hfilename = os.path.join(TEMPDIR, 'out.h5')
root2hdf5(rfile, hfilename, entries=10)
hfile = tables_open(hfilename)
assert_equal(len(hfile.root.test), 1000)
hfile.close()
@with_setup(setup_func, teardown_func)
def test_root2hdf5_chunked_selected():
try:
import tables
except ImportError:
raise SkipTest
from rootpy.root2hdf5 import root2hdf5, tables_open
rfile = get_file('test_tree.root')
hfilename = os.path.join(TEMPDIR, 'out.h5')
root2hdf5(rfile, hfilename, entries=90, selection='i % 2 == 0')
hfile = tables_open(hfilename)
assert_equal(len(hfile.root.test), 500)
hfile.close()
if __name__ == "__main__":
import nose
nose.runmodule()
| henryiii/rootpy | rootpy/tests/test_root2hdf5.py | Python | gpl-3.0 | 1,801 |
"""util.Split is a special block for splitting documents."""
import math
from udapi.core.basereader import BaseReader
# pylint: disable=abstract-method
# read_tree() does not need to be installed here
class Split(BaseReader):
"""Split Udapi document (with sentence-aligned trees in bundles) into several parts."""
def __init__(self, parts=None, bundles_per_doc=None, **kwargs):
"""Args:
parts: into how many parts should the document be split
bundles_per_doc: number of bundles per the newly created part
"""
super().__init__(**kwargs)
if parts is None and bundles_per_doc is None:
raise ValueError('parts or bundles_per_doc must be specified')
if parts is not None and bundles_per_doc is not None:
raise ValueError('Cannot specify both parts and bundles_per_doc')
self.parts = parts
self.bundles_per_doc = bundles_per_doc
self.buffer = None
@staticmethod
def is_multizone_reader():
return False
def process_document(self, document):
if not self.buffer:
self.buffer = document.bundles
document.bundles = []
if self.bundles_per_doc is None:
self.bundles_per_doc = math.ceil(len(self.buffer) / self.parts)
self.buffer.extend(document.bundles)
document.bundles = self.buffer[:self.bundles_per_doc]
self.buffer = self.buffer[self.bundles_per_doc:]
self.finished = not self.buffer
| udapi/udapi-python | udapi/block/util/split.py | Python | gpl-3.0 | 1,506 |
# -*- coding: utf-8 -*-
import urlparse,urllib2,urllib,re
import os
import sys
from core import config
from core import logger
from core.item import Item
DEBUG = True
CHANNELNAME = "channelselector"
def getmainlist():
logger.info("channelselector.getmainlist")
itemlist = []
# Obtiene el idioma, y el literal
idioma = config.get_setting("languagefilter")
logger.info("channelselector.getmainlist idioma=%s" % idioma)
langlistv = [config.get_localized_string(30025),config.get_localized_string(30026),config.get_localized_string(30027),config.get_localized_string(30028),config.get_localized_string(30029)]
try:
idiomav = langlistv[int(idioma)]
except:
idiomav = langlistv[0]
# Añade los canales que forman el menú principal
itemlist.append( Item(title=config.get_localized_string(30118)+" ("+idiomav+")" , channel="channelselector" , action="channeltypes", thumbnail = urlparse.urljoin(get_thumbnail_path(),"channelselector.png") ) )
itemlist.append( Item(title=config.get_localized_string(30103) , channel="buscador" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(),"buscador.png")) )
itemlist.append( Item(title=config.get_localized_string(30128) , channel="trailertools" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(),"trailertools.png")) )
itemlist.append( Item(title=config.get_localized_string(30102) , channel="favoritos" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(),"favoritos.png")) )
if config.get_platform() in ("wiimc","rss") :itemlist.append( Item(title="Wiideoteca (Beta)" , channel="wiideoteca" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"wiideoteca.png")) )
if config.get_platform()=="rss":itemlist.append( Item(title="pyLOAD (Beta)" , channel="pyload" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(),"pyload.png")) )
itemlist.append( Item(title=config.get_localized_string(30101) , channel="descargas" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"descargas.png")) )
if "xbmceden" in config.get_platform():
itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"configuracion.png"), folder=False) )
else:
itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"configuracion.png")) )
#if config.get_setting("fileniumpremium")=="true":
# itemlist.append( Item(title="Torrents (Filenium)" , channel="descargasfilenium" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"torrents.png")) )
#if config.get_library_support():
if config.get_platform()!="rss": itemlist.append( Item(title=config.get_localized_string(30104) , channel="ayuda" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"ayuda.png")) )
return itemlist
# TODO: (3.1) Pasar el código específico de XBMC al laucher
def mainlist(params,url,category):
logger.info("channelselector.mainlist")
# Verifica actualizaciones solo en el primer nivel
if config.get_platform()!="boxee":
try:
from core import updater
except ImportError:
logger.info("channelselector.mainlist No disponible modulo actualizaciones")
else:
if config.get_setting("updatecheck2") == "true":
logger.info("channelselector.mainlist Verificar actualizaciones activado")
try:
updater.checkforupdates()
except:
import xbmcgui
dialog = xbmcgui.Dialog()
dialog.ok("No se puede conectar","No ha sido posible comprobar","si hay actualizaciones")
logger.info("channelselector.mainlist Fallo al verificar la actualización")
pass
else:
logger.info("channelselector.mainlist Verificar actualizaciones desactivado")
itemlist = getmainlist()
for elemento in itemlist:
logger.info("channelselector.mainlist item="+elemento.title)
addfolder(elemento.title , elemento.channel , elemento.action , thumbnail=elemento.thumbnail, folder=elemento.folder)
# Label (top-right)...
import xbmcplugin
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category="" )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
if config.get_setting("forceview")=="true":
# Confluence - Thumbnail
import xbmc
xbmc.executebuiltin("Container.SetViewMode(500)")
def getchanneltypes():
logger.info("channelselector getchanneltypes")
itemlist = []
itemlist.append( Item( title=config.get_localized_string(30121) , channel="channelselector" , action="listchannels" , category="*" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"channelselector")))
itemlist.append( Item( title=config.get_localized_string(30122) , channel="channelselector" , action="listchannels" , category="F" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"peliculas")))
itemlist.append( Item( title=config.get_localized_string(30123) , channel="channelselector" , action="listchannels" , category="S" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"series")))
itemlist.append( Item( title=config.get_localized_string(30124) , channel="channelselector" , action="listchannels" , category="A" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"anime")))
itemlist.append( Item( title=config.get_localized_string(30125) , channel="channelselector" , action="listchannels" , category="D" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"documentales")))
itemlist.append( Item( title=config.get_localized_string(30136) , channel="channelselector" , action="listchannels" , category="VOS" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"versionoriginal")))
itemlist.append( Item( title=config.get_localized_string(30126) , channel="channelselector" , action="listchannels" , category="M" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"musica")))
itemlist.append( Item( title=config.get_localized_string(30127) , channel="channelselector" , action="listchannels" , category="G" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"servidores")))
#itemlist.append( Item( title=config.get_localized_string(30134) , channel="channelselector" , action="listchannels" , category="NEW" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"novedades")))
return itemlist
def channeltypes(params,url,category):
logger.info("channelselector.mainlist channeltypes")
lista = getchanneltypes()
for item in lista:
addfolder(item.title,item.channel,item.action,item.category,item.thumbnail,item.thumbnail)
# Label (top-right)...
import xbmcplugin
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category="" )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
if config.get_setting("forceview")=="true":
# Confluence - Thumbnail
import xbmc
xbmc.executebuiltin("Container.SetViewMode(500)")
def listchannels(params,url,category):
logger.info("channelselector.listchannels")
lista = filterchannels(category)
for channel in lista:
if channel.type=="xbmc" or channel.type=="generic":
if channel.channel=="personal":
thumbnail=config.get_setting("personalchannellogo")
elif channel.channel=="personal2":
thumbnail=config.get_setting("personalchannellogo2")
elif channel.channel=="personal3":
thumbnail=config.get_setting("personalchannellogo3")
elif channel.channel=="personal4":
thumbnail=config.get_setting("personalchannellogo4")
elif channel.channel=="personal5":
thumbnail=config.get_setting("personalchannellogo5")
else:
thumbnail=urlparse.urljoin(get_thumbnail_path(),channel.channel+".png")
addfolder(channel.title , channel.channel , "mainlist" , channel.channel, thumbnail = thumbnail)
# Label (top-right)...
import xbmcplugin
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
if config.get_setting("forceview")=="true":
# Confluence - Thumbnail
import xbmc
xbmc.executebuiltin("Container.SetViewMode(500)")
def filterchannels(category):
returnlist = []
if category=="NEW":
channelslist = channels_history_list()
for channel in channelslist:
channel.thumbnail = urlparse.urljoin(get_thumbnail_path(),channel.channel+".png")
channel.plot = channel.category.replace("VOS","Versión original subtitulada").replace("F","Películas").replace("S","Series").replace("D","Documentales").replace("A","Anime").replace(",",", ")
returnlist.append(channel)
else:
try:
idioma = config.get_setting("languagefilter")
logger.info("channelselector.filterchannels idioma=%s" % idioma)
langlistv = ["","ES","EN","IT","PT"]
idiomav = langlistv[int(idioma)]
logger.info("channelselector.filterchannels idiomav=%s" % idiomav)
except:
idiomav=""
channelslist = channels_list()
for channel in channelslist:
# Pasa si no ha elegido "todos" y no está en la categoría elegida
if category<>"*" and category not in channel.category:
#logger.info(channel[0]+" no entra por tipo #"+channel[4]+"#, el usuario ha elegido #"+category+"#")
continue
# Pasa si no ha elegido "todos" y no está en el idioma elegido
if channel.language<>"" and idiomav<>"" and idiomav not in channel.language:
#logger.info(channel[0]+" no entra por idioma #"+channel[3]+"#, el usuario ha elegido #"+idiomav+"#")
continue
channel.thumbnail = urlparse.urljoin(get_thumbnail_path(),channel.channel+".png")
channel.plot = channel.category.replace("VOS","Versión original subtitulada").replace("F","Películas").replace("S","Series").replace("D","Documentales").replace("A","Anime").replace(",",", ")
returnlist.append(channel)
return returnlist
def channels_history_list():
itemlist = []
itemlist.append( Item( title="Newpct (08/03/2013)" , channel="newpct" , language="ES" , category="F,S,D,A" , type="generic" )) # jesus 08/03/2013
itemlist.append( Item( title="Malvin.tv (12/02/2013)" , channel="malvin" , language="ES" , category="F,D" , type="generic" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="PelisX (01/02/2013)" , channel="pelisx" , language="ES" , category="F" , type="generic" )) # ZeDinis 01/02/2013
itemlist.append( Item( title="Nukety (25/12/2012)" , channel="nukety" , language="ES" , category="F,S" , type="generic" ))
itemlist.append( Item( title="Film per tutti (IT) (27/11/2012)" , channel="filmpertutti" , language="IT" , category="F,S,A" , type="generic" ))
itemlist.append( Item( title="Watch Cartoon Online (23/11/2012)" , channel="watchcartoononline" , language="EN" , category="F,S", type="generic" )) # jesus 23/11/2012
itemlist.append( Item( title="Series Online TV (12/11/2012)" , channel="seriesonlinetv", language="ES" , category="S" , type="generic" )) # jesus 12/11/2012
itemlist.append( Item( title="Novelas de TV (12/11/2012)" , channel="novelasdetv", language="ES" , category="S" , type="generic" )) # jesus 12/11/2012
itemlist.append( Item( title="Quiero Dibujos Animados (12/11/2012)", channel="quierodibujosanimados", language="ES" , category="S" , type="generic" )) # jesus 12/11/2012
#itemlist.append( Item( title="Cinemastreaming (IT) (5/11/2012)" , channel="cinemastreaming" , language="IT" , category="F,S,A" , type="generic" )) # jesus 5/11/2012
itemlist.append( Item( title="Peliculamos (IT) (5/11/2012)" , channel="peliculamos" , language="IT" , category="F,S,A" , type="generic" )) # jesus 5/11/2012
itemlist.append( Item( title="JKanime (15/10/2012)" , channel="jkanime" , language="ES" , category="A" , type="generic" )) # jesus 15/10/2012
itemlist.append( Item( title="Ver Telenovelas (15/10/2012)" , channel="vertelenovelas" , language="ES" , category="S" , type="generic" )) # jesus 15/10/2012
itemlist.append( Item( title="Yaske.net (09/10/2012)" , channel="yaske" , language="ES" , category="F" , type="generic" )) # jesus 09/10/2012
itemlist.append( Item( title="Divxatope (27/08/2012)" , channel="divxatope" , language="ES" , category="F,S" , type="generic" )) # jesus 27/08/2012
itemlist.append( Item( title="Mejortorrent (20/08/2012)" , channel="mejortorrent" , language="ES" , category="F,S" , type="generic" )) # jesus 20/08/2012
itemlist.append( Item( title="Newdivxonline (07/08/2012)" , channel="newdivxonline" , language="ES" , category="F" , type="generic" )) # morser 07/08/2012
itemlist.append( Item( title="cine-online.eu (16/07/2012)" , channel="cineonlineeu" , language="ES" , category="F" , type="generic" )) # jesus 16/7/2012
itemlist.append( Item( title="Pirate Streaming (16/07/2012)" , channel="piratestreaming" , language="IT" , category="F" , type="generic" )) # jesus 16/7/2012
itemlist.append( Item( title="Tucinecom (16/07/2012)" , channel="tucinecom" , language="ES" , category="F" , type="generic" )) # jesus 16/7/2012
itemlist.append( Item( title="Cinetux (16/07/2012)" , channel="cinetux" , language="ES" , category="F" , type="generic" )) # jesus 16/7/2012
itemlist.append( Item( title="Tus Novelas (03/07/2012)" , channel="tusnovelas" , language="ES" , category="S" , type="generic" )) # jesus 3/7/2012
itemlist.append( Item( title="Unsoloclic (03/07/2012)" , channel="unsoloclic" , language="ES" , category="F,S" , type="generic" )) # jesus 3/7/2012
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Cinetemagay (15/04/2012)" , channel="cinetemagay" , language="ES" , category="F" , type="generic" )) # sdfasd 15/4/2012
itemlist.append( Item( title="Sipeliculas (02/03/2012)" , channel="sipeliculas" , language="ES" , category="F" , type="generic" )) # miguel 2/3/2012
itemlist.append( Item( title="Gnula (15/12/2011)" , channel="gnula" , language="ES" , category="F" , type="generic" )) # vcalvo 15/12/2011
itemlist.append( Item( title="Series ID (15/12/2011)" , channel="seriesid" , language="ES" , category="S,VOS" , type="generic" )) # vcalvo 15/12/2011
itemlist.append( Item( title="Bajui (14/12/2011)" , channel="bajui" , language="ES" , category="F,S,D,VOS", type="generic")) # vcalvo 14/12/2011
itemlist.append( Item( title="Shurweb (14/12/2011)" , channel="shurweb" , language="ES" , category="F,S,D,A", type="generic")) # vcalvo 14/12/2011
itemlist.append( Item( title="Justin.tv (12/12/2011)" , channel="justintv" , language="" , category="G" , type="generic" )) # bandavi 12/12/2011
itemlist.append( Item( title="Series.ly (19/11/2011)" , channel="seriesly" , language="ES" , category="S,A,VOS" , type="generic" )) # jesus/mrfloffy 19/11/2011
itemlist.append( Item( title="Teledocumentales (19/10/2011)" , channel="teledocumentales" , language="ES" , category="D" , type="generic" )) # mrfloffy 19/10/2011
itemlist.append( Item( title="Peliculasaudiolatino (14/10/2011)" , channel="peliculasaudiolatino" , language="ES" , category="F" , type="generic" )) # Dalim 14/10/2011
itemlist.append( Item( title="Animeflv (14/10/2011)" , channel="animeflv" , language="ES" , category="A,VOS" , type="generic" )) # MarioXD 14/10/2011
itemlist.append( Item( title="Moviezet (01/10/2011)" , channel="moviezet" , language="ES" , category="F,S,VOS" , type="generic" )) # mrfluffy 01/10/2011
#itemlist.append( Item( title="NewHD (05/05/2011)" , channel="newhd" , language="ES" , category="F" , type="generic" )) # xextil 05/05/2011
return itemlist
def channels_list():
itemlist = []
# En duda
#itemlist.append( Item( title="Descarga Cine Clásico" , channel="descargacineclasico" , language="ES" , category="F,S" , type="generic" ))
#itemlist.append( Item( title="Asia-Team" , channel="asiateam" , language="ES" , category="F,S" , type="generic" ))
#itemlist.append( Item( title="Buena Isla" , channel="buenaisla" , language="ES" , category="A,VOS" , type="generic" ))
itemlist.append( Item( viewmode="movie", title="Tengo una URL" , channel="tengourl" , language="" , category="F,S,D,A" , type="generic" ))
if config.get_setting("personalchannel")=="true":
itemlist.append( Item( title=config.get_setting("personalchannelname") , channel="personal" , language="" , category="F,S,D,A" , type="generic" ))
if config.get_setting("personalchannel2")=="true":
itemlist.append( Item( title=config.get_setting("personalchannelname2") , channel="personal2" , language="" , category="F,S,D,A" , type="generic" ))
if config.get_setting("personalchannel3")=="true":
itemlist.append( Item( title=config.get_setting("personalchannelname3") , channel="personal3" , language="" , category="F,S,D,A" , type="generic" ))
if config.get_setting("personalchannel4")=="true":
itemlist.append( Item( title=config.get_setting("personalchannelname4") , channel="personal4" , language="" , category="F,S,D,A" , type="generic" ))
if config.get_setting("personalchannel5")=="true":
itemlist.append( Item( title=config.get_setting("personalchannelname5") , channel="personal5" , language="" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="Animeflv" , channel="animeflv" , language="ES" , category="A" , type="generic" ))
itemlist.append( Item( title="Animeid" , channel="animeid" , language="ES" , category="A" , type="generic" ))
itemlist.append( Item( title="Bajui" , channel="bajui" , language="ES" , category="F,S,D,VOS" , type="generic" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Beeg" , channel="beeg" , language="ES" , category="F" , type="generic" ))
#itemlist.append( Item( title="Biblioteca XBMC" , channel="libreria" , language="" , category="F,S,D,A" , type="wiimc" ))
itemlist.append( Item( title="Cine-online.eu" , channel="cineonlineeu" , language="ES" , category="F" , type="generic" )) # jesus 16/7/2012
itemlist.append( Item( title="Cineblog01 (IT)" , channel="cineblog01" , language="IT" , category="F,S,A,VOS" , type="generic" ))
itemlist.append( Item( title="Cinehanwer" , channel="cinehanwer" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Cinemaxx (RO)" , channel="cinemax_rs" , language="RU" , category="F,S,A,VOS" , type="generic" ))
#itemlist.append( Item( title="Cinegratis" , channel="cinegratis" , language="ES" , category="F" , type="generic" ))
#itemlist.append( Item( title="Cinetube" , channel="cinetube" , language="ES" , category="F,S,A,D,VOS" , type="generic" ))
#itemlist.append( Item( title="Cinemastreaming (IT)" , channel="cinemastreaming" , language="IT" , category="F,S,A" , type="generic" )) # jesus 5/11/2012
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Cinetemagay" , channel="cinetemagay" , language="ES" , category="F" , type="generic" )) # sdfasd 15/4/2012
itemlist.append( Item( title="Cinetux" , channel="cinetux" , language="ES" , category="F" , type="generic" ))# jesus 16/7/2012
if config.get_platform()=="boxee" or "xbmc" in config.get_platform(): itemlist.append( Item( title="Cuevana" , channel="cuevana" , language="ES" , category="F,S,VOS" , type="generic" ))
#itemlist.append( Item( title="CineVOS" , channel="cinevos" , language="ES" , category="F,A,D,VOS" , type="generic" ))
#itemlist.append( Item( title="dibujosanimadosgratis" , channel="dibujosanimadosgratis", language="ES" , category="A" , type="generic" ))
#itemlist.append( Item( title="Descargaya" , channel="descargaya" , language="ES" , category="F,S" , type="generic" ))
itemlist.append( Item( title="Discoverymx" , channel="discoverymx" , language="ES" , category="D" , type="generic" ))
#itemlist.append( Item( title="Divx Online" , channel="divxonline" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Divxatope (Torrent)" , channel="divxatope" , language="ES" , category="F,S" , type="generic" ))
#itemlist.append( Item( title="DL-More (FR)" , channel="dlmore" , language="FR" , category="S" , type="generic" ))
itemlist.append( Item( title="DocumaniaTV" , channel="documaniatv" , language="ES" , category="D" , type="generic" ))
itemlist.append( Item( title="El señor del anillo" , channel="elsenordelanillo" , language="ES" , category="F" , type="xbmc" ))
itemlist.append( Item( title="Elite Torrent" , channel="elitetorrent" , language="ES" , category="F,S,D" , type="xbmc" ))
itemlist.append( Item( title="Enlacia" , channel="enlacia" , language="ES" , category="F,S,D" , type="generic" ))
# DESACTIVADO - SIN CONTENIDOS itemlist.append( Item( title="Filmixt" , channel="filmixt" , language="ES" , category="F" , type="generic" ))
#itemlist.append( Item( title="FilmesOnlineBr" , channel="filmesonlinebr" , language="PT" , category="F" , type="generic" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Filesmonster Catalogue" , channel="filesmonster_catalogue" , language="es" , category="F" , type="generic" ))
itemlist.append( Item( title="Film per tutti (IT)" , channel="filmpertutti" , language="IT" , category="F,S,A" , type="generic" ))
itemlist.append( Item( title="Film Senza Limiti (IT)" , channel="filmsenzalimiti" , language="IT" , category="F" , type="generic" ))
itemlist.append( Item( title="Filmenoi (RO)" , channel="filmenoi" , language="RU" , category="F" , type="generic" ))
#if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Gaypornshare" , channel="gaypornshare" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Goear" , channel="goear" , language="ES" , category="M" , type="generic" )) # vcalvo 15/12/2011
itemlist.append( Item( title="Gnula" , channel="gnula" , language="ES" , category="F" , type="generic" )) # vcalvo 15/12/2011
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Hentai FLV" , channel="hentaiflv" , language="es" , category="A" , type="generic" ))
#itemlist.append( Item( title="Instreaming (IT)" , channel="instreaming" , language="IT" , category="F,S" , type="generic" ))
#itemlist.append( Item( title="Internapoli City (IT)" , channel="internapoli" , language="IT" , category="F" , type="generic" ))
itemlist.append( Item( title="ItaliaFilms.tv (IT)" , channel="italiafilm" , language="IT" , category="F,S,A" , type="generic" ))
#if "xbmc" in config.get_platform() or "boxee" in config.get_platform():
# itemlist.append( Item( title="Justin.tv" , channel="justintv" , language="" , category="G" , type="generic" ))
itemlist.append( Item( title="JKanime" , channel="jkanime" , language="ES" , category="A" , type="generic" )) # jesus 15/10/2012
itemlist.append( Item( title="La Guarida de bizzente", channel="documentalesatonline2", language="ES" , category="D" , type="generic" ))
itemlist.append( Item( title="LetMeWatchThis" , channel="letmewatchthis" , language="EN" , category="F,S,VOS" , type="generic" ))
itemlist.append( Item( title="lossimpsonsonline.com.ar", channel="los_simpsons_online" , language="ES" , category="S" , type="generic" ))
itemlist.append( Item( title="Malvin.tv" , channel="malvin" , language="ES" , category="F,D" , type="generic" ))
itemlist.append( Item( title="Mega HD" , channel="megahd" , language="ES" , category="F,S,D,A" , type="generic" ))
#itemlist.append( Item( title="Megapass" , channel="megapass" , language="ES" , category="F,S,D" , type="generic" ))
itemlist.append( Item( title="Megaforo" , channel="megaforo" , language="ES" , category="F,S,D" , type="generic" ))
itemlist.append( Item( title="Megaspain" , channel="megaspain" , language="ES" , category="F,S,D" , type="generic" ))
itemlist.append( Item( title="Mejor Torrent" , channel="mejortorrent" , language="ES" , category="F,S,D" , type="xbmc" ))
itemlist.append( Item( title="MCAnime" , channel="mcanime" , language="ES" , category="A" , type="generic" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="MocosoftX" , channel="mocosoftx" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Moviepremium" , channel="moviepremium" , language="ES" , category="F" , type="generic" )) # yorel 04/08/2013
#itemlist.append( Item( title="Moviezet" , channel="moviezet" , language="ES" , category="F,S,VOS" , type="generic" )) # mrfluffy 01/10/2011
#if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="myhentaitube" , channel="myhentaitube" , language="ES" , category="F" , type="generic" ))
#itemlist.append( Item( title="NewDivx" , channel="newdivx" , language="ES" , category="F,D" , type="generic" ))
#itemlist.append( Item( title="Newdivxonline" , channel="newdivxonline" , language="ES" , category="F" , type="generic" ))
#itemlist.append( Item( title="NewHD" , channel="newhd" , language="ES" , category="F,VOS" , type="generic" )) # xextil 05/05/2011
itemlist.append( Item( title="Newpct" , channel="newpct" , language="ES" , category="F,S,D,A" , type="generic" )) # jesus 08/03/2013
itemlist.append( Item( title="Novelas de TV" , channel="novelasdetv", language="ES" , category="S" , type="generic" )) # jesus 12/11/2012
#itemlist.append( Item( title="Nukety" , channel="nukety" , language="ES" , category="F,S" , type="generic" ))
itemlist.append( Item( title="Oranline" , channel="oranline" , language="ES" , category="F" , type="generic" ))# jesus 16/7/2012
#itemlist.append( Item( title="NKI" , channel="nki" , language="ES" , category="S" , type="generic" ))
#itemlist.append( Item( title="No Megavideo" , channel="nomegavideo" , language="ES" , category="F" , type="xbmc" ))
# DESACTIVADO - SIN CONTENIDOS itemlist.append( Item( title="NoloMires" , channel="nolomires" , language="ES" , category="F" , type="xbmc" ))
#itemlist.append( Item( title="Peliculas Online FLV" , channel="peliculasonlineflv" , language="ES" , category="F,D" , type="generic" ))
# DESACTIVADO - SIN CONTENIDOS itemlist.append( Item( title="Peliculas21" , channel="peliculas21" , language="ES" , category="F" , type="xbmc" ))
#itemlist.append( Item( title="Peliculamos (IT)" , channel="peliculamos" , language="IT" , category="F,S,A" , type="generic" )) # jesus 5/11/2012
itemlist.append( Item( title="Peliculasaudiolatino" , channel="peliculasaudiolatino" , language="ES" , category="F" , type="generic" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="PeliculasEroticas" , channel="peliculaseroticas" , language="ES" , category="F" , type="xbmc" ))
#itemlist.append( Item( title="peliculashd.pro" , channel="peliculashdpro" , language="ES" , category="F" , type="generic" )) # jesus 12/11/2012
#itemlist.append( Item( title="Peliculasfull" , channel="peliculasfull" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Peliculasid" , channel="peliculasid" , language="ES" , category="F,VOS" , type="xbmc" ))
itemlist.append( Item( title="PeliculasMX" , channel="peliculasmx" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Peliculaspepito" , channel="peliculaspepito" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Peliculasyonkis" , channel="peliculasyonkis_generico" , language="ES" , category="F,VOS" , type="generic" ))
itemlist.append( Item( title="Pelis24" , channel="pelis24" , language="ES" , category="F,S,VOS" , type="generic" ))
itemlist.append( Item( title="PelisPekes" , channel="pelispekes" , language="ES" , category="F" , type="generic" ))
#if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="PelisX" , channel="pelisx" , language="ES" , category="F" , type="generic" )) # ZeDinis 01/02/2013
itemlist.append( Item( title="Pirate Streaming (IT)" , channel="piratestreaming" , language="IT" , category="F" , type="generic" )) # jesus 16/7/2012
itemlist.append( Item( title="Pordede" , channel="pordede" , language="ES" , category="F,S" , type="generic" )) # jesus 16/6/2014
#itemlist.append( Item( title="PelisFlv" , channel="pelisflv" , language="ES" , category="F" , type="xbmc" ))
itemlist.append( Item( title="Quebajamos" , channel="quebajamos", language="ES" , category="F,S,D" , type="generic" )) # jesus 16/06/2014
itemlist.append( Item( title="Quiero Dibujos Animados", channel="quierodibujosanimados", language="ES" , category="S" , type="generic" )) # jesus 12/11/2012
# YA NO EXISTE itemlist.append( Item( title="Redes.tv" , channel="redestv" , language="ES" , category="D" , type="xbmc" ))
itemlist.append( Item( title="Robinfilm (IT)" , channel="robinfilm" , language="IT" , category="F" , type="generic" )) # jesus 16/05/2011
#itemlist.append( Item( title="Seriematic" , channel="seriematic" , language="ES" , category="S,D,A" , type="generic" ))
itemlist.append( Item( title="Seriesflv" , channel="seriesflv" , language="ES" , category="S" , type="generic" ))
itemlist.append( Item( title="Serieonline" , channel="serieonline" , language="ES" , category="F,S,D" , type="generic" ))
#itemlist.append( Item( title="Series ID" , channel="seriesid" , language="ES" , category="S,VOS" , type="generic" )) # vcalvo 15/12/2011
itemlist.append( Item( title="Series.ly" , channel="seriesly" , language="ES" , category="F,S,A,VOS" , type="generic" ))
#if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Series Hentai" , channel="serieshentai" , language="ES" , category="F" , type="generic" )) # kira 10/04/2011
# DESACTIVADO - SIN CONTENIDO itemlist.append( Item( title="Series21" , channel="series21" , language="ES" , category="S" , type="xbmc" ))
itemlist.append( Item( title="Seriesdanko" , channel="seriesdanko" , language="ES" , category="S,VOS" , type="generic" ))
#itemlist.append( Item( title="Series Online TV" , channel="seriesonlinetv", language="ES" , category="S" , type="generic" )) # jesus 12/11/2012
itemlist.append( Item( title="Seriespepito" , channel="seriespepito" , language="ES" , category="S,VOS" , type="generic" ))
itemlist.append( Item( title="Seriesyonkis" , channel="seriesyonkis" , language="ES" , category="S,A,VOS" , type="generic" , extra="Series" ))
#itemlist.append( Item( title="ShurHD" , channel="shurhd" , language="ES" , category="F,S" , type="generic" ))
itemlist.append( Item( title="Shurweb" , channel="shurweb" , language="ES" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="Sintonizzate" , channel="sintonizzate" , language="ES" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="Sipeliculas" , channel="sipeliculas" , language="ES" , category="F" , type="generic" )) # miguel 2/3/2012
#itemlist.append( Item( title="Sofacine" , channel="sofacine" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Somosmovies" , channel="somosmovies" , language="ES" , category="F,S,D,A,VOS" , type="generic" ))
itemlist.append( Item( title="Sonolatino" , channel="sonolatino" , language="" , category="M" , type="xbmc" ))
itemlist.append( Item( title="Stagevu" , channel="stagevusite" , language="" , category="G" , type="xbmc" ))
itemlist.append( Item( title="Stormtv" , channel="stormtv" , language="ES" , category="S,A,VOS" , type="generic" , extra="Series" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Submit Your Flicks" , channel="submityouflicks" , language="ES" , category="F" , type="generic" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Submit Your Tapes" , channel="submityourtapes" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Teledocumentales" , channel="teledocumentales" , language="ES" , category="D" , type="generic" )) # mrfloffy 19/10/2011
itemlist.append( Item( title="Tibimate" , channel="tibimate" , language="ES" , category="F" , type="generic" )) # mrfloffy 19/10/2011
#itemlist.append( Item( title="Terror y Gore" , channel="terrorygore" , language="ES,EN" , category="F" , type="xbmc" ))
itemlist.append( Item( title="Trailers ecartelera" , channel="ecarteleratrailers" , language="ES,EN" , category="F" , type="generic" ))
#if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="Tube8" , channel="tube8" , language="EN" , category="G" , type="generic" ))
itemlist.append( Item( title="tu.tv" , channel="tutvsite" , language="ES" , category="G" , type="generic" ))
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="tubehentai" , channel="tubehentai" , language="ES" , category="F" , type="xbmc" ))
#itemlist.append( Item( title="Tu butaka de cine" , channel="tubutakadecine" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Tu Mejor TV" , channel="tumejortv" , language="ES" , category="F,S" , type="generic" ))
itemlist.append( Item( title="Tus Novelas" , channel="tusnovelas" , language="ES" , category="S" , type="generic" ))# jesus 3/7/2012
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="tuporno.tv" , channel="tupornotv" , language="ES" , category="F" , type="generic" ))
#itemlist.append( Item( title="TVShack" , channel="tvshack" , language="EN" , category="F,S,A,D,M" , type="xbmc" ))
#itemlist.append( Item( title="Vagos" , channel="vagos" , language="ES" , category="F,S" , type="xbmc" ))
#itemlist.append( Item( title="Veocine" , channel="veocine" , language="ES" , category="F,A,D" , type="xbmc" ))
itemlist.append( Item( title="Unsoloclic" , channel="unsoloclic" , language="ES" , category="F,S" , type="generic" ))# jesus 3/7/2012
itemlist.append( Item( title="VePelis" , channel="vepelis" , language="ES" , category="F" , type="generic" ))# jjchao 28/05/2013
#itemlist.append( Item( title="Ver-anime" , channel="veranime" , language="ES" , category="A" , type="generic" ))
#itemlist.append( Item( title="Ver-series" , channel="verseries" , language="ES" , category="S" , type="generic" )) # 15/12/2011 jesus
itemlist.append( Item( title="Ver Telenovelas", channel="vertelenovelas" , language="ES" , category="S" , type="generic" ))
itemlist.append( Item( title="Vox filme online (RO)", channel="voxfilme" , language="RU" , category="S" , type="generic" ))
#itemlist.append( Item( title="Watchanimeon" , channel="watchanimeon" , language="EN" , category="A" , type="xbmc" ))
itemlist.append( Item( title="Watch Cartoon Online" , channel="watchcartoononline" , language="EN" , category="F,S", type="generic" )) # jesus 23/11/2012
itemlist.append( Item( title="XO (RO)" , channel="xo" , language="RU" , category="F,S", type="generic" )) # jesus 23/11/2012
if config.get_setting("enableadultmode") == "true": itemlist.append( Item( title="xhamster" , channel="xhamster" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="Yaske.net" , channel="yaske" , language="ES" , category="F" , type="generic" ))
itemlist.append( Item( title="YouAnime HD" , channel="youanimehd" , language="ES" , category="A" , type="generic" ))
#itemlist.append( Item( title="Yotix" , channel="yotix" , language="ES" , category="A" , type="generic" ))
itemlist.append( Item( title="Zampaseries" , channel="zampaseries" , language="ES" , category="F,S" , type="generic" ))
itemlist.append( Item( title="Zate.tv" , channel="zatetv" , language="ES" , category="F,S" , type="generic" ))
itemlist.append( Item( title="Zpeliculas" , channel="zpeliculas" , language="ES" , category="F" , type="generic" ))
#itemlist.append( Item( title="Dospuntocerovision" , channel="dospuntocerovision" , language="ES" , category="F,S" , type="xbmc" ))
#itemlist.append( Item( title="Pintadibujos" , channel="pintadibujos" , language="ES" , category="F,A" , type="xbmc" ))
#itemlist.append( Item( title="Film Streaming" , "filmstreaming" , language="IT" , "F,A" , type="xbmc" ))
#itemlist.append( Item( title="Pelis-Sevillista56" , "sevillista" , language="ES" , "F" , type="xbmc"))
#itemlist.append( Item( title="SoloSeries" , "soloseries" , language="ES" , "S" , type="xbmc" ))
#itemlist.append( Item( title="seriesonline.us" , "seriesonline" , language="ES" , "S" , type="xbmc" ))
#itemlist.append( Item( title="Animetakus" , channel="animetakus" , language="ES" , category="A" , type="generic" ))
#itemlist.append( Item( title="Documentalesatonline" , channel="documentalesatonline" , language="ES" , category="D" , type="xbmc" ))
#itemlist.append( Item( title="Programas TV Online" , channel="programastv" , language="ES" , category="D" , type="xbmc" ))
#itemlist.append( Item( title="Futbol Virtual" , "futbolvirtual" , language="ES" , "D" , type="xbmc" ))
#channelslist.append([ "Eduman Movies" , "edumanmovies" , "" , "ES" , "F" ])
#channelslist.append([ "SesionVIP" , "sesionvip" , "" , "ES" , "F" ])
#channelslist.append([ "Newcineonline" , "newcineonline" , "" , "ES" , "S" ])
#channelslist.append([ "PeliculasHD" , "peliculashd" , "" , "ES" , "F" ])
#channelslist.append([ "Wuapi" , "wuapisite" , "" , "ES" , "F" ])
#channelslist.append([ "Frozen Layer" , "frozenlayer" , "" , "ES" , "A" ])
#channelslist.append([ "Ovasid" , "ovasid" , "" , "ES" , "A" , "xbmc" ])
return itemlist
def addfolder(nombre,channelname,accion,category="",thumbnailname="",thumbnail="",folder=True):
if category == "":
try:
category = unicode( nombre, "utf-8" ).encode("iso-8859-1")
except:
pass
import xbmc
import xbmcgui
import xbmcplugin
listitem = xbmcgui.ListItem( nombre , iconImage="DefaultFolder.png", thumbnailImage=thumbnail)
itemurl = '%s?channel=%s&action=%s&category=%s' % ( sys.argv[ 0 ] , channelname , accion , category )
xbmcplugin.addDirectoryItem( handle = int(sys.argv[ 1 ]), url = itemurl , listitem=listitem, isFolder=folder)
def get_thumbnail_path():
WEB_PATH = ""
thumbnail_type = config.get_setting("thumbnail_type")
if thumbnail_type=="":
thumbnail_type="2"
if thumbnail_type=="0":
WEB_PATH = "http://pelisalacarta.mimediacenter.info/posters/"
elif thumbnail_type=="1":
WEB_PATH = "http://pelisalacarta.mimediacenter.info/banners/"
elif thumbnail_type=="2":
WEB_PATH = "http://pelisalacarta.mimediacenter.info/squares/"
return WEB_PATH | golaizola/pelisalacarta-xbmc | channelselector.py | Python | gpl-3.0 | 44,793 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Conditions-file preview and mini-editor for the Builder
"""
import os
import sys
import pickle
import wx
from wx.lib.expando import ExpandoTextCtrl, EVT_ETC_LAYOUT_NEEDED
from pkg_resources import parse_version
from psychopy import gui
from psychopy.experiment.utils import valid_var_re
from psychopy.data.utils import _nonalphanumeric_re
from psychopy.localization import _translate
darkblue = wx.Colour(30, 30, 150, 255)
darkgrey = wx.Colour(65, 65, 65, 255)
white = wx.Colour(255, 255, 255, 255)
class DlgConditions(wx.Dialog):
"""Given a file or conditions, present values in a grid; view, edit, save.
Accepts file name, list of lists, or list-of-dict
Designed around a conditionsFile, but potentially more general.
Example usage: from builder.DlgLoopProperties.viewConditions()
edit new empty .pkl file:
gridGUI = builder.DlgConditions(parent=self) # create and present Dlg
edit existing .pkl file, loading from file (also for .csv or .xlsx):
gridGUI = builder.DlgConditions(fileName=self.conditionsFile,
parent=self, title=fileName)
preview existing .csv or .xlsx file that has been loaded -> conditions:
gridGUI = builder.DlgConditions(conditions, parent=self,
title=fileName, fixed=True)
To add columns, an instance of this class will instantiate a new instance
having one more column. Doing so makes the return value from the first
instance's showModal() meaningless. In order to update things like
fileName and conditions, values are set in the parent, and should not be
set based on showModal retVal.
Author: Jeremy Gray, 2011
adjusted for wx 3.x: Dec 2015
"""
def __init__(self, grid=None, fileName=False, parent=None, title='',
trim=True, fixed=False, hasHeader=True, gui=True,
extraRows=0, extraCols=0,
clean=True, pos=wx.DefaultPosition, preview=True,
_restore=None, size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE | wx.DIALOG_NO_PARENT):
self.parent = parent # gets the conditionsFile info
if parent:
self.helpUrl = self.parent.app.urls['builder.loops']
# read data from file, if any:
self.defaultFileName = 'conditions.pkl'
self.newFile = True
if _restore:
self.newFile = _restore[0]
self.fileName = _restore[1]
if fileName:
grid = self.load(fileName)
if grid:
self.fileName = fileName
self.newFile = False
if not title:
f = os.path.abspath(fileName)
f = f.rsplit(os.path.sep, 2)[1:]
f = os.path.join(*f) # eg, BART/trialTypes.xlsx
title = f
elif not grid:
title = _translate('New (no file)')
elif _restore:
if not title:
f = os.path.abspath(_restore[1])
f = f.rsplit(os.path.sep, 2)[1:]
f = os.path.join(*f) # eg, BART/trialTypes.xlsx
title = f
elif not title:
title = _translate('Conditions data (no file)')
# if got here via addColumn:
# convert from conditions dict format:
if grid and type(grid) == list and type(grid[0]) == dict:
conditions = grid[:]
numCond, numParam = len(conditions), len(conditions[0])
grid = [list(conditions[0].keys())]
for i in range(numCond):
row = list(conditions[i].values())
grid.append(row)
hasHeader = True # keys of a dict are the header
# ensure a sensible grid, or provide a basic default:
if not grid or not len(grid) or not len(grid[0]):
grid = [[self.colName(0)], [u'']]
hasHeader = True
extraRows += 5
extraCols += 3
self.grid = grid # grid is list of lists
self.fixed = bool(fixed)
if self.fixed:
extraRows = extraCols = 0
trim = clean = confirm = False
else:
style = style | wx.RESIZE_BORDER
self.pos = pos
self.title = title
try:
self.madeApp = False
wx.Dialog.__init__(self, None, -1, title, pos, size, style)
except wx._core.PyNoAppError: # only needed during development?
self.madeApp = True
global app
if parse_version(wx.__version__) < parse_version('2.9'):
app = wx.PySimpleApp()
else:
app = wx.App(False)
wx.Dialog.__init__(self, None, -1, title, pos, size, style)
self.trim = trim
self.warning = '' # updated to warn about eg, trailing whitespace
if hasHeader and not len(grid) > 1 and not self.fixed:
self.grid.append([])
self.clean = bool(clean)
self.typeChoices = ['None', 'str', 'utf-8', 'int', 'long', 'float',
'bool', 'list', 'tuple', 'array']
# make all rows have same # cols, extending as needed or requested:
longest = max([len(r) for r in self.grid]) + extraCols
for row in self.grid:
for i in range(len(row), longest):
row.append(u'') # None
# self.header <== row of input param name fields
self.hasHeader = bool(hasHeader)
self.rows = min(len(self.grid), 30) # max 30 rows displayed
self.cols = len(self.grid[0])
if wx.version()[0] == '2':
# extra row for explicit type drop-down
extraRow = int(not self.fixed)
self.sizer = wx.FlexGridSizer(self.rows + extraRow,
self.cols + 1, # +1 for labels
vgap=0, hgap=0)
else:
self.sizer = wx.FlexGridSizer(cols=self.cols + 1, vgap=0, hgap=0)
# set length of input box as the longest in the column (bounded):
self.colSizes = []
for x in range(self.cols):
_size = [len(str(self.grid[y][x])) for y in range(self.rows)]
self.colSizes.append(max([4] + _size))
self.colSizes = [min(20, max(10, x + 1)) * 8 + 30 for x in self.colSizes]
self.inputTypes = [] # explicit, as selected by user
self.inputFields = [] # values in fields
self.data = []
# make header label, if any:
if self.hasHeader:
rowLabel = wx.StaticText(self, -1, label=_translate('Params:'),
size=(6 * 9, 20))
rowLabel.SetForegroundColour(darkblue)
self.addRow(0, rowLabel=rowLabel)
# make type-selector drop-down:
if not self.fixed:
if sys.platform == 'darwin':
self.SetWindowVariant(variant=wx.WINDOW_VARIANT_SMALL)
labelBox = wx.BoxSizer(wx.VERTICAL)
tx = wx.StaticText(self, -1, label=_translate('type:'),
size=(5 * 9, 20))
tx.SetForegroundColour(darkgrey)
labelBox.Add(tx, 1, flag=wx.ALIGN_RIGHT)
labelBox.AddSpacer(5) # vertical
self.sizer.Add(labelBox, 1, flag=wx.ALIGN_RIGHT)
row = int(self.hasHeader) # row to use for type inference
for col in range(self.cols):
# make each selector:
typeOpt = wx.Choice(self, choices=self.typeChoices)
# set it to best guess about the column's type:
firstType = str(type(self.grid[row][col])).split("'", 2)[1]
if firstType == 'numpy.ndarray':
firstType = 'array'
if firstType == 'unicode':
firstType = 'utf-8'
typeOpt.SetStringSelection(str(firstType))
self.inputTypes.append(typeOpt)
self.sizer.Add(typeOpt, 1)
if sys.platform == 'darwin':
self.SetWindowVariant(variant=wx.WINDOW_VARIANT_NORMAL)
# stash implicit types for setType:
self.types = [] # implicit types
row = int(self.hasHeader) # which row to use for type inference
for col in range(self.cols):
firstType = str(type(self.grid[row][col])).split("'")[1]
self.types.append(firstType)
# add normal row:
for row in range(int(self.hasHeader), self.rows):
self.addRow(row)
for r in range(extraRows):
self.grid.append([u'' for i in range(self.cols)])
self.rows = len(self.grid)
self.addRow(self.rows - 1)
# show the GUI:
if gui:
self.show()
self.Destroy()
if self.madeApp:
del(self, app)
def colName(self, c, prefix='param_'):
# generates 702 excel-style column names, A ... ZZ, with prefix
abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # for A, ..., Z
aabb = [''] + [ch for ch in abc] # for Ax, ..., Zx
return prefix + aabb[c // 26] + abc[c % 26]
def addRow(self, row, rowLabel=None):
"""Add one row of info, either header (col names) or normal data
Adds items sequentially; FlexGridSizer moves to next row automatically
"""
labelBox = wx.BoxSizer(wx.HORIZONTAL)
if not rowLabel:
if sys.platform == 'darwin':
self.SetWindowVariant(variant=wx.WINDOW_VARIANT_SMALL)
label = _translate('cond %s:') % str(
row + 1 - int(self.hasHeader)).zfill(2)
rowLabel = wx.StaticText(self, -1, label=label)
rowLabel.SetForegroundColour(darkgrey)
if sys.platform == 'darwin':
self.SetWindowVariant(variant=wx.WINDOW_VARIANT_NORMAL)
labelBox.Add(rowLabel, 1, flag=wx.ALIGN_BOTTOM)
self.sizer.Add(labelBox, 1, flag=wx.ALIGN_CENTER)
lastRow = []
for col in range(self.cols):
# get the item, as unicode for display purposes:
if len(str(self.grid[row][col])): # want 0, for example
item = str(self.grid[row][col])
else:
item = u''
# make a textbox:
field = ExpandoTextCtrl(
self, -1, item, size=(self.colSizes[col], 20))
field.Bind(EVT_ETC_LAYOUT_NEEDED, self.onNeedsResize)
field.SetMaxHeight(100) # ~ 5 lines
if self.hasHeader and row == 0:
# add a default column name (header) if none provided
header = self.grid[0]
if item.strip() == '':
c = col
while self.colName(c) in header:
c += 1
field.SetValue(self.colName(c))
field.SetForegroundColour(darkblue) # dark blue
# or (self.parent and
if not valid_var_re.match(field.GetValue()):
# self.parent.exp.namespace.exists(field.GetValue()) ):
# was always red when preview .xlsx file -- in
# namespace already is fine
if self.fixed:
field.SetForegroundColour("Red")
field.SetToolTip(wx.ToolTip(_translate(
'Should be legal as a variable name (alphanumeric)')))
field.Bind(wx.EVT_TEXT, self.checkName)
elif self.fixed:
field.SetForegroundColour(darkgrey)
field.SetBackgroundColour(white)
# warn about whitespace unless will be auto-removed. invisible,
# probably spurious:
if (self.fixed or not self.clean) and item != item.strip():
field.SetForegroundColour('Red')
# also used in show():
self.warning = _translate('extra white-space')
field.SetToolTip(wx.ToolTip(self.warning))
if self.fixed:
field.Disable()
lastRow.append(field)
self.sizer.Add(field, 1)
self.inputFields.append(lastRow)
if self.hasHeader and row == 0:
self.header = lastRow
def checkName(self, event=None, name=None):
"""check param name (missing, namespace conflict, legal var name)
disable save, save-as if bad name
"""
if self.parent:
if event:
msg, enable = self.parent._checkName(event=event)
else:
msg, enable = self.parent._checkName(name=name)
else:
if (name and not valid_var_re.match(name)
or not valid_var_re.match(event.GetString())):
msg, enable = _translate(
"Name must be alphanumeric or _, no spaces"), False
else:
msg, enable = "", True
self.tmpMsg.SetLabel(msg)
if enable:
self.OKbtn.Enable()
self.SAVEAS.Enable()
else:
self.OKbtn.Disable()
self.SAVEAS.Disable()
def userAddRow(self, event=None):
"""handle user request to add another row: add to the FlexGridSizer
"""
self.grid.append([u''] * self.cols)
self.rows = len(self.grid)
self.addRow(self.rows - 1)
self.tmpMsg.SetLabel('')
self.onNeedsResize()
def userAddCol(self, event=None):
"""adds a column by recreating the Dlg with size +1 one column wider.
relaunching loses the retVal from OK, so use parent.fileName instead
"""
self.relaunch(extraCols=1, title=self.title)
def relaunch(self, **kwargs):
self.trim = False # dont remove blank rows / cols that user added
self.getData(True)
currentData = self.data[:]
# launch new Dlg, but only after bail out of current one:
if hasattr(self, 'fileName'):
fname = self.fileName
else:
fname = None
wx.CallAfter(DlgConditions, currentData,
_restore=(self.newFile, fname),
parent=self.parent, **kwargs)
# bail from current Dlg:
# retVal here, first one goes to Builder, ignore
self.EndModal(wx.ID_OK)
# self.Destroy() # -> PyDeadObjectError, so already handled hopefully
def getData(self, typeSelected=False):
"""gets data from inputFields (unicode), converts to desired type
"""
if self.fixed:
self.data = self.grid
return
elif typeSelected: # get user-selected explicit types of the columns
self.types = []
for col in range(self.cols):
selected = self.inputTypes[col].GetCurrentSelection()
self.types.append(self.typeChoices[selected])
# mark empty columns for later removal:
if self.trim:
start = int(self.hasHeader) # name is not empty, so ignore
for col in range(self.cols):
if not ''.join([self.inputFields[row][col].GetValue()
for row in range(start, self.rows)]):
self.types[col] = 'None' # col will be removed below
# get the data:
self.data = []
for row in range(self.rows):
lastRow = []
# remove empty rows
fieldVals = [self.inputFields[row][col].GetValue()
for col in range(self.cols)]
if self.trim and not ''.join(fieldVals):
continue
for col in range(self.cols):
thisType = self.types[col]
# trim 'None' columns, including header name:
if self.trim and thisType in ['None']:
continue
thisVal = self.inputFields[row][col].GetValue()
if self.clean:
thisVal = thisVal.lstrip().strip()
if thisVal: # and thisType in ['list', 'tuple', 'array']:
while len(thisVal) and thisVal[-1] in "]), ":
thisVal = thisVal[:-1]
while len(thisVal) and thisVal[0] in "[(, ":
thisVal = thisVal[1:]
if thisType not in ['str', 'utf-8']:
thisVal = thisVal.replace('\n', '')
else:
thisVal = repr(thisVal) # handles quoting ', ", ''' etc
# convert to requested type:
try:
# todo: replace exec() with eval()
if self.hasHeader and row == 0:
# header always str
val = self.inputFields[row][col].GetValue()
lastRow.append(str(val))
elif thisType in ['float', 'int', 'long']:
exec("lastRow.append(" + thisType +
'(' + thisVal + "))")
elif thisType in ['list']:
thisVal = thisVal.lstrip('[').strip(']')
exec("lastRow.append(" + thisType +
'([' + thisVal + "]))")
elif thisType in ['tuple']:
thisVal = thisVal.lstrip('(').strip(')')
if thisVal:
exec("lastRow.append((" +
thisVal.strip(',') + ",))")
else:
lastRow.append(tuple(()))
elif thisType in ['array']:
thisVal = thisVal.lstrip('[').strip(']')
exec("lastRow.append(numpy.array" +
'("[' + thisVal + ']"))')
elif thisType in ['utf-8', 'bool']:
if thisType == 'utf-8':
thisType = 'unicode'
exec("lastRow.append(" + thisType +
'(' + thisVal + '))')
elif thisType in ['str']:
exec("lastRow.append(str(" + thisVal + "))")
elif thisType in ['file']:
exec("lastRow.append(repr(" + thisVal + "))")
else:
exec("lastRow.append(" + str(thisVal) + ')')
except ValueError as msg:
print('ValueError:', msg, '; using unicode')
exec("lastRow.append(" + str(thisVal) + ')')
except NameError as msg:
print('NameError:', msg, '; using unicode')
exec("lastRow.append(" + repr(thisVal) + ')')
self.data.append(lastRow)
if self.trim:
# the corresponding data have already been removed
while 'None' in self.types:
self.types.remove('None')
return self.data[:]
def preview(self, event=None):
self.getData(typeSelected=True)
# in theory, self.data is also ok, because fixed
previewData = self.data[:]
# is supposed to never change anything, but bugs would be very subtle
DlgConditions(previewData, parent=self.parent,
title=_translate('PREVIEW'), fixed=True)
def onNeedsResize(self, event=None):
self.SetSizerAndFit(self.border) # do outer-most sizer
if self.pos is None:
self.Center()
def show(self):
"""called internally; to display, pass gui=True to init
"""
# put things inside a border:
if wx.version()[0] == '2':
# data matrix on top, buttons below
self.border = wx.FlexGridSizer(2, 1)
elif wx.version()[0] == '3':
self.border = wx.FlexGridSizer(4)
else:
self.border = wx.FlexGridSizer(4, 1, wx.Size(0,0))
self.border.Add(self.sizer, proportion=1,
flag=wx.ALL | wx.EXPAND, border=8)
# add a message area, buttons:
buttons = wx.BoxSizer(wx.HORIZONTAL)
if sys.platform == 'darwin':
self.SetWindowVariant(variant=wx.WINDOW_VARIANT_SMALL)
if not self.fixed:
# placeholder for possible messages / warnings:
self.tmpMsg = wx.StaticText(
self, -1, label='', size=(350, 15), style=wx.ALIGN_RIGHT)
self.tmpMsg.SetForegroundColour('Red')
if self.warning:
self.tmpMsg.SetLabel(self.warning)
buttons.Add(self.tmpMsg, flag=wx.ALIGN_CENTER)
buttons.AddSpacer(8)
self.border.Add(buttons, 1, flag=wx.BOTTOM |
wx.ALIGN_CENTER, border=8)
buttons = wx.BoxSizer(wx.HORIZONTAL)
ADDROW = wx.Button(self, -1, _translate("+cond."))
tip = _translate('Add a condition (row); to delete a condition,'
' delete all of its values.')
ADDROW.SetToolTip(wx.ToolTip(tip))
ADDROW.Bind(wx.EVT_BUTTON, self.userAddRow)
buttons.Add(ADDROW)
buttons.AddSpacer(4)
ADDCOL = wx.Button(self, -1, _translate("+param"))
tip = _translate('Add a parameter (column); to delete a param, '
'set its type to None, or delete all of its values.')
ADDCOL.SetToolTip(wx.ToolTip(tip))
ADDCOL.Bind(wx.EVT_BUTTON, self.userAddCol)
buttons.Add(ADDCOL)
buttons.AddSpacer(4)
PREVIEW = wx.Button(self, -1, _translate("Preview"))
tip = _translate("Show all values as they would appear after "
"saving to a file, without actually saving anything.")
PREVIEW.SetToolTip(wx.ToolTip(tip))
PREVIEW.Bind(wx.EVT_BUTTON, self.preview)
buttons.Add(PREVIEW)
buttons.AddSpacer(4)
self.SAVEAS = wx.Button(self, wx.FD_SAVE, _translate("Save as"))
self.SAVEAS.Bind(wx.EVT_BUTTON, self.saveAs)
buttons.Add(self.SAVEAS)
buttons.AddSpacer(8)
self.border.Add(buttons, 1, flag=wx.BOTTOM |
wx.ALIGN_RIGHT, border=8)
if sys.platform == 'darwin':
self.SetWindowVariant(variant=wx.WINDOW_VARIANT_NORMAL)
buttons = wx.StdDialogButtonSizer()
# help button if we know the url
if self.helpUrl and not self.fixed:
helpBtn = wx.Button(self, wx.ID_HELP, _translate(" Help "))
helpBtn.SetToolTip(wx.ToolTip(_translate("Go to online help")))
helpBtn.Bind(wx.EVT_BUTTON, self.onHelp)
buttons.Add(helpBtn, wx.ALIGN_CENTER | wx.ALL)
buttons.AddSpacer(12)
# Add Okay and Cancel buttons
self.OKbtn = wx.Button(self, wx.ID_OK, _translate(" OK "))
if not self.fixed:
self.OKbtn.SetToolTip(wx.ToolTip(_translate('Save and exit')))
self.OKbtn.Bind(wx.EVT_BUTTON, self.onOK)
self.OKbtn.SetDefault()
if not self.fixed:
buttons.AddSpacer(4)
CANCEL = wx.Button(self, wx.ID_CANCEL, _translate(" Cancel "))
CANCEL.SetToolTip(wx.ToolTip(
_translate('Exit, discard any edits')))
buttons.Add(CANCEL)
else:
CANCEL = None
if sys.platform == "win32":
btns = [self.OKbtn, CANCEL]
else:
btns = [CANCEL, self.OKbtn]
if not self.fixed:
btns.remove(btns.index(CANCEL))
buttons.AddMany(btns)
buttons.AddSpacer(8)
buttons.Realize()
self.border.Add(buttons, 1, flag=wx.BOTTOM | wx.ALIGN_RIGHT, border=8)
# finally, its show time:
self.SetSizerAndFit(self.border)
if self.pos is None:
self.Center()
if self.ShowModal() == wx.ID_OK:
# set self.data and self.types, from fields
self.getData(typeSelected=True)
self.OK = True
else:
self.data = self.types = None
self.OK = False
self.Destroy()
def onOK(self, event=None):
if not self.fixed:
if not self.save():
return # disallow OK if bad param names
event.Skip() # handle the OK button event
def saveAs(self, event=None):
"""save, but allow user to give a new name
"""
self.newFile = True # trigger query for fileName
self.save()
self.relaunch() # to update fileName in title
def save(self, event=None):
"""save header + row x col data to a pickle file
"""
self.getData(True) # update self.data
adjustedNames = False
for i, paramName in enumerate(self.data[0]):
newName = paramName
# ensure its legal as a var name, including namespace check:
if self.parent:
msg, enable = self.parent._checkName(name=paramName)
if msg: # msg not empty means a namespace issue
newName = self.parent.exp.namespace.makeValid(
paramName, prefix='param')
adjustedNames = True
elif not valid_var_re.match(paramName):
msg, enable = _translate(
"Name must be alphanumeric or _, no spaces"), False
newName = _nonalphanumeric_re.sub('_', newName)
adjustedNames = True
else:
msg, enable = "", True
# try to ensure its unique:
while newName in self.data[0][:i]:
adjustedNames = True
newName += 'x' # might create a namespace conflict?
self.data[0][i] = newName
self.header[i].SetValue(newName) # displayed value
if adjustedNames:
self.tmpMsg.SetLabel(_translate(
'Param name(s) adjusted to be legal. Look ok?'))
return False
if hasattr(self, 'fileName') and self.fileName:
fname = self.fileName
else:
self.newFile = True
fname = self.defaultFileName
if self.newFile or not os.path.isfile(fname):
fullPath = gui.fileSaveDlg(initFilePath=os.path.split(fname)[0],
initFileName=os.path.basename(fname),
allowed="Pickle files (*.pkl)|*.pkl")
else:
fullPath = fname
if fullPath: # None if user canceled
if not fullPath.endswith('.pkl'):
fullPath += '.pkl'
f = open(fullPath, 'w')
pickle.dump(self.data, f)
f.close()
self.fileName = fullPath
self.newFile = False
# ack, sometimes might want relative path
if self.parent:
self.parent.conditionsFile = fullPath
return True
def load(self, fileName=''):
"""read and return header + row x col data from a pickle file
"""
if not fileName:
fileName = self.defaultFileName
if not os.path.isfile(fileName):
_base = os.path.basename(fileName)
fullPathList = gui.fileOpenDlg(tryFileName=_base,
allowed="All files (*.*)|*.*")
if fullPathList:
fileName = fullPathList[0] # wx.MULTIPLE -> list
if os.path.isfile(fileName) and fileName.endswith('.pkl'):
f = open(fileName, 'rb')
# Converting newline characters.
# 'b' is necessary in Python3 because byte object is
# returned when file is opened in binary mode.
buffer = f.read().replace(b'\r\n',b'\n').replace(b'\r',b'\n')
contents = pickle.loads(buffer)
f.close()
if self.parent:
self.parent.conditionsFile = fileName
return contents
elif not os.path.isfile(fileName):
print('file %s not found' % fileName)
else:
print('only .pkl supported at the moment')
def asConditions(self):
"""converts self.data into self.conditions for TrialHandler.
returns conditions
"""
if not self.data or not self.hasHeader:
if hasattr(self, 'conditions') and self.conditions:
return self.conditions
return
self.conditions = []
keyList = self.data[0] # header = keys of dict
for row in self.data[1:]:
condition = {}
for col, key in enumerate(keyList):
condition[key] = row[col]
self.conditions.append(condition)
return self.conditions
def onHelp(self, event=None):
"""similar to self.app.followLink() to self.helpUrl, but only use url
"""
wx.LaunchDefaultBrowser(self.helpUrl)
| psychopy/psychopy | psychopy/app/builder/dialogs/dlgsConditions.py | Python | gpl-3.0 | 29,203 |
# Copyright 2015 Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the Pascal VOC Dataset (images + annotations).
"""
import os
import tensorflow as tf
from . import dataset_utils
slim = tf.contrib.slim
VOC_LABELS = {
'none': (0, 'Background'),
'aeroplane': (1, 'Vehicle'),
'bicycle': (2, 'Vehicle'),
'bird': (3, 'Animal'),
'boat': (4, 'Vehicle'),
'bottle': (5, 'Indoor'),
'bus': (6, 'Vehicle'),
'car': (7, 'Vehicle'),
'cat': (8, 'Animal'),
'chair': (9, 'Indoor'),
'cow': (10, 'Animal'),
'diningtable': (11, 'Indoor'),
'dog': (12, 'Animal'),
'horse': (13, 'Animal'),
'motorbike': (14, 'Vehicle'),
'person': (15, 'Person'),
'pottedplant': (16, 'Indoor'),
'sheep': (17, 'Animal'),
'sofa': (18, 'Indoor'),
'train': (19, 'Vehicle'),
'tvmonitor': (20, 'Indoor'),
}
def get_split(split_name, dataset_dir, file_pattern, reader,
split_to_sizes, items_to_descriptions, num_classes):
"""Gets a dataset tuple with instructions for reading Pascal VOC dataset.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in split_to_sizes:
raise ValueError('split name %s was not recognized.' % split_name)
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
# Features in Pascal VOC TFRecords.
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['xmin', 'ymin', 'xmax', 'ymax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
# else:
# labels_to_names = create_readable_names_for_imagenet_labels()
# dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=split_to_sizes[split_name],
items_to_descriptions=items_to_descriptions,
num_classes=num_classes,
labels_to_names=labels_to_names)
| maxkferg/smart-city-model | modules/vision/datasets/pascalvoc_common.py | Python | gpl-3.0 | 4,420 |
mcinif='mcini_g63'
runname='col_test0123'
mcpick='col_test1.pickle'
pathdir='/beegfs/work/ka_oj4748/echoRD'
wdir='/beegfs/work/ka_oj4748/gen_tests'
colref='True'
prec2D='False'
update_part=False
import sys
sys.path.append(pathdir)
import run_echoRD as rE
rE.echoRD_job(mcinif=mcinif,mcpick=mcpick,runname=runname,wdir=wdir,pathdir=pathdir,colref=colref,prec2D=prec2D,update_part=update_part,hdf5pick=False)
| cojacoo/testcases_echoRD | col_test0123.py | Python | gpl-3.0 | 409 |
'''
Created on Sep 16, 2016
State Diagram:
Off ==[motion]==> On ==[No motion]==> TentativeOn
/\ /\ // ||
|| || // ||
|| =======[motion]=== // ||
|| ||
\\ //
=====[no motion and time passed]=========
Transitioning to off also causes the output of the last live segment
@author: larry.dawson
'''
import datetime
import time
import sys
MAX_ON=120
class TimeSeriesDebouncer(object):
def __init__(self, latent_time = 15, keep_in_case_seconds = 5):
self.origin = time.time()
self.latent_time = latent_time
self.state = self.off
self.on_time = None
self.last_on = self.origin
self.keep_in_case_seconds = keep_in_case_seconds
def print(self, *args):
print(*args)
sys.stdout.flush()
def reportactive(self, segment):
self.print("%d, %d, active" % (segment[0], segment[1]))
def reportinactive(self, segment):
self.print("%d, %d, inactive" % (segment[0], segment[1]))
def off(self, motion):
#I'm in the off state -
ts = time.time()
if motion:
self.on_time = ts
last_on = self.last_on
if last_on is None:
last_on = ts
self.reportinactive((last_on, ts))
self.last_on = self.on_time
self.state = self.on
#And maybe do something like this - delete files now: delete_files_between(last_active + 5, now - self.keep_in_case_seconds)
#Except this might take some time! Do the deletes in a different process!
def on(self, motion):
if not motion:
self.state = self.tentative_on
else:
self.last_on = time.time()
def tentative_on(self, motion):
now = time.time()
if motion:
self.state = self.on
self.last_on = now
elif (now - self.last_on) > self.latent_time:
segment = (self.on_time, self.last_on)
self.on_time = None
self.last_on = None
self.state = self.off
self.reportactive(segment)
else:
if (now - self.on_time) > MAX_ON:
self.reportactive(self.on_time, self.last_on)
self.on_time = self.last_on
#stay in tentative on
def close(self):
segment = (self.on_time, self.last_on)
self.on_time = None
self.last_on = None
self.state = self.off
self.reportinactive(segment)
if __name__ == '__main__':
counter = TimeSeriesDebouncer()
counter.state(True)
time.sleep(3)
counter.state(False)
time.sleep(4)
counter.state(True)
time.sleep(4)
counter.state(False)
time.sleep(1)
counter.state(False)
counter.state(False)
time.sleep(1)
counter.state(True)
time.sleep(4)
counter.state(True)
time.sleep(5)
counter.state(False)
counter.close()
| dawsonlp/picam | src/activetime.py | Python | gpl-3.0 | 3,167 |
# Reach, the remote acccess tool
# Copyright (C) 2010 Simon Poirier <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# API CONSTANTS
VERSION_MAJOR = 2 # it won't be reach2 if this number changes ;)
VERSION_MINOR = 0
VERSION_REVISION = 2
| simpoir/reach | reach/__init__.py | Python | gpl-3.0 | 847 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import functools
from django.conf import settings
from django.core.urlresolvers import resolve, reverse
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from import_export.views import handle_upload_form
from pootle.core.browser import (
get_parent, get_table_headings, make_directory_item, make_store_item)
from pootle.core.decorators import get_path_obj, permission_required
from pootle.core.helpers import get_sidebar_announcements_context
from pootle.core.views import (
PootleBrowseView, PootleTranslateView, PootleExportView)
from pootle_app.models import Directory
from pootle_app.models.permissions import (
check_permission, get_matching_permissions)
from pootle_app.views.admin.permissions import admin_permissions as admin_perms
from pootle_language.models import Language
from pootle_store.models import Store
from .models import TranslationProject
@get_path_obj
@permission_required('administrate')
def admin_permissions(request, translation_project):
ctx = {
'page': 'admin-permissions',
'browse_url': reverse('pootle-tp-browse', kwargs={
'language_code': translation_project.language.code,
'project_code': translation_project.project.code,
}),
'translate_url': reverse('pootle-tp-translate', kwargs={
'language_code': translation_project.language.code,
'project_code': translation_project.project.code,
}),
'translation_project': translation_project,
'project': translation_project.project,
'language': translation_project.language,
'directory': translation_project.directory,
}
return admin_perms(request, translation_project.directory,
'translation_projects/admin/permissions.html', ctx)
def redirect_to_tp_on_404(f):
@functools.wraps(f)
def method_wrapper(self, request, *args, **kwargs):
try:
request.permissions = get_matching_permissions(
request.user,
self.permission_context) or []
except Http404 as e:
# Test if lang code is not canonical but valid
lang = Language.get_canonical(kwargs['language_code'])
if lang is not None and lang.code != kwargs['language_code']:
kwargs["language_code"] = lang.code
return redirect(
resolve(request.path).view_name,
permanent=True,
**kwargs)
elif kwargs["dir_path"] or kwargs.get("filename", None):
try:
TranslationProject.objects.get(
project__code=kwargs["project_code"],
language__code=kwargs["language_code"])
# the TP exists so redirect to it
return redirect(
reverse(
'pootle-tp-browse',
kwargs={
k: v
for k, v
in kwargs.items()
if k in [
"language_code",
"project_code"]}))
except TranslationProject.DoesNotExist:
pass
# if we get here - the TP does not exist
user_choice = self.request.COOKIES.get(
'user-choice', None)
if user_choice:
url = None
if user_choice == 'language':
url = reverse(
'pootle-language-browse',
args=[kwargs["language_code"]])
elif user_choice == "project":
url = reverse(
'pootle-project-browse',
args=[kwargs["project_code"], '', ''])
if url:
response = redirect(url)
response.delete_cookie('user-choice')
return response
raise e
return f(self, request, *args, **kwargs)
return method_wrapper
class TPMixin(object):
"""This Mixin is used by all TP views.
The context object may be a resource with the TP, ie a Directory or Store.
"""
@redirect_to_tp_on_404
def dispatch(self, request, *args, **kwargs):
return super(TPMixin, self).dispatch(request, *args, **kwargs)
@property
def ctx_path(self):
return self.tp.pootle_path
@property
def resource_path(self):
return self.object.pootle_path.replace(self.ctx_path, "")
@property
def dir_path(self):
return self.resource_path
@cached_property
def tp(self):
return self.object.translation_project
@cached_property
def project(self):
if self.tp.project.disabled and not self.request.user.is_superuser:
raise Http404
return self.tp.project
@cached_property
def language(self):
return self.tp.language
@cached_property
def sidebar_announcements(self):
return get_sidebar_announcements_context(
self.request,
(self.project, self.language, self.tp))
class TPDirectoryMixin(TPMixin):
model = Directory
browse_url_path = "pootle-tp-browse"
export_url_path = "pootle-tp-export"
translate_url_path = "pootle-tp-translate"
@property
def object_related(self):
tp_prefix = (
"parent__" * self.kwargs.get("dir_path", "").count("/"))
return [
"%stranslationproject" % tp_prefix,
"%stranslationproject__language" % tp_prefix,
"%stranslationproject__project" % tp_prefix]
@lru_cache()
def get_object(self):
return get_object_or_404(
Directory.objects.select_related(*self.object_related),
pootle_path=self.path)
@property
def url_kwargs(self):
return {
"language_code": self.language.code,
"project_code": self.project.code,
"dir_path": self.dir_path}
class TPStoreMixin(TPMixin):
model = Store
browse_url_path = "pootle-tp-store-browse"
export_url_path = "pootle-tp-store-export"
translate_url_path = "pootle-tp-store-translate"
is_store = True
@property
def permission_context(self):
return self.get_object().parent
@property
def dir_path(self):
return self.resource_path.replace(self.object.name, "")
@property
def url_kwargs(self):
return {
"language_code": self.language.code,
"project_code": self.project.code,
"dir_path": self.dir_path,
"filename": self.object.name}
@lru_cache()
def get_object(self):
path = (
"/%(language_code)s/%(project_code)s/%(dir_path)s%(filename)s"
% self.kwargs)
return get_object_or_404(
Store.objects.select_related(
"parent",
"translation_project__language",
"translation_project__project"),
pootle_path=path)
class TPBrowseBaseView(PootleBrowseView):
template_extends = 'translation_projects/base.html'
def get_context_data(self, *args, **kwargs):
ctx = super(TPBrowseBaseView, self).get_context_data(*args, **kwargs)
ctx.update(self.get_upload_widget())
ctx.update(
{'parent': get_parent(self.object)})
return ctx
def get_upload_widget(self):
ctx = {}
has_upload = (
"import_export" in settings.INSTALLED_APPS
and self.request.user.is_authenticated()
and check_permission('translate', self.request))
if has_upload:
if "po" in self.project.filetype_tool.valid_extensions:
ctx.update(handle_upload_form(
self.request,
self.project,
self.language))
ctx.update(
{'display_download': True,
'has_sidebar': True})
return ctx
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
class TPBrowseStoreView(TPStoreMixin, TPBrowseBaseView):
pass
class TPBrowseView(TPDirectoryMixin, TPBrowseBaseView):
table_id = "tp"
table_fields = [
'name', 'progress', 'total', 'need-translation',
'suggestions', 'critical', 'last-updated', 'activity']
@cached_property
def items(self):
if 'virtualfolder' in settings.INSTALLED_APPS:
from virtualfolder.helpers import vftis_for_child_dirs
dirs_with_vfolders = set(
vftis_for_child_dirs(self.object).values_list(
"directory__pk", flat=True))
else:
dirs_with_vfolders = []
directories = [
make_directory_item(
child,
**(dict(sort="priority")
if child.pk in dirs_with_vfolders
else {}))
for child in self.object.children
if isinstance(child, Directory)]
stores = [
make_store_item(child)
for child in self.object.children
if isinstance(child, Store)]
return directories + stores
@cached_property
def has_vfolders(self):
return self.object.has_vfolders
@cached_property
def vfolders(self):
from virtualfolder.helpers import make_vfolder_treeitem_dict
vftis = self.object.vf_treeitems
if not self.has_admin_access:
vftis = vftis.filter(vfolder__is_public=True)
return [
make_vfolder_treeitem_dict(vfolder_treeitem)
for vfolder_treeitem
in vftis.order_by('-vfolder__priority').select_related("vfolder")
if (self.has_admin_access
or vfolder_treeitem.is_visible)]
@cached_property
def vfolder_data(self):
ctx = {}
if 'virtualfolder' not in settings.INSTALLED_APPS:
return {}
if len(self.vfolders) > 0:
table_fields = [
'name', 'priority', 'progress', 'total',
'need-translation', 'suggestions', 'critical',
'last-updated', 'activity']
ctx.update({
'vfolders': {
'id': 'vfolders',
'fields': table_fields,
'headings': get_table_headings(table_fields),
'items': self.vfolders}})
return ctx
@cached_property
def vfolder_stats(self):
if 'virtualfolder' not in settings.INSTALLED_APPS:
return {}
stats = {"vfolders": {}}
for vfolder_treeitem in self.vfolders or []:
stats['vfolders'][
vfolder_treeitem['code']] = vfolder_treeitem["stats"]
del vfolder_treeitem["stats"]
return stats
@cached_property
def stats(self):
stats = self.vfolder_stats
if stats and stats["vfolders"]:
stats.update(self.object.get_stats())
else:
stats = self.object.get_stats()
return stats
def get_context_data(self, *args, **kwargs):
ctx = super(TPBrowseView, self).get_context_data(*args, **kwargs)
ctx.update(self.vfolder_data)
return ctx
class TPTranslateBaseView(PootleTranslateView):
translate_url_path = "pootle-tp-translate"
browse_url_path = "pootle-tp-browse"
export_url_path = "pootle-tp-export"
template_extends = 'translation_projects/base.html'
@property
def pootle_path(self):
return "%s%s" % (self.ctx_path, self.resource_path)
class TPTranslateView(TPDirectoryMixin, TPTranslateBaseView):
@property
def request_path(self):
return "/%(language_code)s/%(project_code)s/%(dir_path)s" % self.kwargs
@cached_property
def extracted_path(self):
if 'virtualfolder' not in settings.INSTALLED_APPS:
return None, self.request_path
from virtualfolder.helpers import extract_vfolder_from_path
from virtualfolder.models import VirtualFolderTreeItem
return extract_vfolder_from_path(
self.request_path,
vfti=VirtualFolderTreeItem.objects.select_related(
"directory", "vfolder"))
@property
def display_vfolder_priority(self):
if 'virtualfolder' not in settings.INSTALLED_APPS:
return False
vfolder = self.extracted_path[0]
if vfolder:
return False
return self.object.has_vfolders
@property
def resource_path(self):
vfolder = self.extracted_path[0]
path = ""
if vfolder:
path = "%s/" % vfolder.name
return (
"%s%s"
% (path,
self.object.pootle_path.replace(self.ctx_path, "")))
@property
def path(self):
return self.extracted_path[1]
@property
def vfolder_pk(self):
vfolder = self.extracted_path[0]
if vfolder:
return vfolder.pk
return ""
class TPTranslateStoreView(TPStoreMixin, TPTranslateBaseView):
pass
class TPExportBaseView(PootleExportView):
@property
def source_language(self):
return self.project.source_language
class TPExportView(TPDirectoryMixin, TPExportBaseView):
pass
class TPExportStoreView(TPStoreMixin, TPExportBaseView):
pass
| Finntack/pootle | pootle/apps/pootle_translationproject/views.py | Python | gpl-3.0 | 13,913 |
'''
Created on 13.04.2013
@author: capone
'''
X86_DEBUGGERS_ROOT = r'C:\\Program Files (x86)\\Windows Kits\\8.0\\Debuggers\\x86'
X64_DEBUGGERS_ROOT = r'C:\Program Files (x86)\\Windows Kits\\8.0\\Debuggers\\x64' | capone212/crashtec | src/crashtec/config/windowsconfig.py | Python | gpl-3.0 | 212 |
#!/usr/bin/env python
from setuptools import setup, find_packages
from setuptools import Command
# Build and update pyqt resources
import sequence.resource.pyqt
del sequence
class UploadGhPages(Command):
'''Command to update build and upload sphinx doc to github.'''
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Import fabric
try:
from fabric.api import local
# Import subprocess
except ImportError:
from subprocess import call
from functools import partial
local = partial(call, shell=True)
# Create gh-pages branch
local('git checkout --orphan gh-pages ')
# Unstage all
local('rm .git/index')
# Build doc
local('python setup.py build_sphinx')
# No jekyll file
local('touch .nojekyll')
local('git add .nojekyll')
# Add Readme
local('git add README.md')
# Add html content
local('git add docs/build/html/* -f ')
# Move html content
local('git mv docs/build/html/* ./ ')
# Git commit
local('git commit -m "build sphinx" ')
# Git push
local('git push --set-upstream origin gh-pages -f ')
# Back to master
local('git checkout master -f ')
# Delete branch
local('git branch -D gh-pages ')
setup(
name='python-sequence',
version='0.1.1',
description='Package for sequence edition and execution.',
packages=find_packages(),
include_package_data=True,
package_data={'': ['*.png', '*.ui', '*.qrc']},
cmdclass={'upload_gh_pages': UploadGhPages},
entry_points={
'console_scripts': [
'sequence-console = sequence.console:main'],
'gui_scripts': [
'sequence-runner = sequence.runner:main',
'sequence-editor = sequence.editor:main']},
)
| vxgmichel/python-sequence | setup.py | Python | gpl-3.0 | 1,974 |
import logging
from penfold.WorkflowActionIO import WorkflowActionIO
from penfold.error import InputError
log = logging.getLogger(__name__)
class InputOutputFactory(object):
"""Handle the processing of Workflow inputs and outputs"""
def __init__(self, action_data=None):
self._action_data = action_data
def create(self, name, type=None, source=None, value=None, mapping=None, generator=None, filter_value=None):
return WorkflowActionIO(name=name, type=type, source=source, value=value, mapping=mapping, generator=generator, filter_value=filter_value)
def createFrom(self, io, value):
return WorkflowActionIO(name=io.name, type=io.type, source=io.source, mapping=io.mapping, value=value)
def createFromList(self, template, values):
res = []
for v in values:
w = WorkflowActionIO(
template.name, template.type, template.source, v, template.mapping, template.generator)
res.append(w)
return res
def createInput(self, input_data):
return self._createInputFromData(input_data)
def createActionInputInstances(self, action_data=None):
"""Create a list of WorkflowActionIO instances for the given action"""
if action_data is not None:
self._action_data = action_data
if self._action_data is None:
raise InputError("No data")
inputs = []
for i in self._action_data.input:
io = self._createInputFromData(i)
log.debug("creating input {}".format(io))
inputs.append(io)
return inputs
def _createInputFromData(self, data):
io = WorkflowActionIO(name=data.name, type=data.type, source=data.source,
value=data.value_, mapping=data.mapping, generator=data.generator)
return io
def _createOutputFromData(self, data):
io = WorkflowActionIO(name=data.name, type=data.type, source=data.source,
mapping=data.mapping, filter_value=data.filter_value)
return io
"""Create a list of WorkflowActionIO instances for the plugin of the given action"""
def createPluginInputInstances(self, action_data=None):
if action_data is not None:
self._action_data = action_data
if self._action_data is None:
raise InputError("No data")
inputs = []
# there are muliple entries
for i in self._action_data.plugin.input:
io = self._createInputFromData(i)
inputs.append(io)
return inputs
def createActionOutputInstances(self, action_data=None):
if action_data is not None:
self._action_data = action_data
if self._action_data is None:
raise InputError("No data")
outputs = []
# there are muliple entries
for o in self._action_data.output:
io = self._createOutputFromData(o)
outputs.append(io)
return outputs
def createPluginOutputInstances(self, action_data=None):
if action_data is not None:
self._action_data = action_data
if self._action_data is None:
raise InputError("No data")
outputs = []
# there are muliple entries
for i in self._action_data.plugin.output:
io = self._createOutputFromData(i)
outputs.append(io)
return outputs
| neil-davis/penfold | src/package/penfold/InputOutputFactory.py | Python | gpl-3.0 | 3,461 |
import json
from libnow.src.business.exceptions.MapperException import MapperException
class JsonMapper(object):
@staticmethod
def from_json_to_py(json_string):
"""
Convierte un Json (string) a un mapa Python (diccionario).
En caso de error lanza una MapperException.
:param json_string:
:return:
"""
try:
if not json_string:
raise MapperException("Json no valido: El json esta vacio.")
return json.loads(json_string)
except Exception, e:
raise MapperException(e.message, e)
@staticmethod
def from_py_to_json(py_map):
"""
Convierte un mapa Python en un Json (string).
En caso de error, lanza una MapperException.
:param py_map:
:return:
"""
try:
return json.dumps(py_map)
except Exception, e:
raise MapperException(e.message, e)
| keikenuro/kaiju-libnow | kaiju_libnow/libnow/src/business/mapper/web/JsonMapper.py | Python | gpl-3.0 | 954 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'HighlightTranslationVCS'
db.create_table(u'appulet_highlighttranslationvcs', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('highlight', self.gf('django.db.models.fields.related.ForeignKey')(related_name='highlight_translation_vcs_entries', to=orm['appulet.Highlight'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 10, 24, 0, 0), auto_now_add=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 10, 24, 0, 0), auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name_oc', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('name_es', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('name_ca', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('name_fr', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('name_en', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('long_text_oc', self.gf('django.db.models.fields.CharField')(max_length=2000, blank=True)),
('long_text_es', self.gf('django.db.models.fields.CharField')(max_length=2000, blank=True)),
('long_text_ca', self.gf('django.db.models.fields.CharField')(max_length=2000, blank=True)),
('long_text_fr', self.gf('django.db.models.fields.CharField')(max_length=2000, blank=True)),
('long_text_en', self.gf('django.db.models.fields.CharField')(max_length=2000, blank=True)),
))
db.send_create_signal(u'appulet', ['HighlightTranslationVCS'])
def backwards(self, orm):
# Deleting model 'HighlightTranslationVCS'
db.delete_table(u'appulet_highlighttranslationvcs')
models = {
u'appulet.box': {
'Meta': {'object_name': 'Box'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interactive_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'boxes'", 'to': u"orm['appulet.InteractiveImage']"}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'max_x': ('django.db.models.fields.IntegerField', [], {}),
'max_y': ('django.db.models.fields.IntegerField', [], {}),
'message_ca': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_es': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_oc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'min_x': ('django.db.models.fields.IntegerField', [], {}),
'min_y': ('django.db.models.fields.IntegerField', [], {})
},
u'appulet.highlight': {
'Meta': {'object_name': 'Highlight'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'highlights'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'long_text_ca': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_en': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_es': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_fr': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_oc': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'media': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'step': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'highlights'", 'null': 'True', 'to': u"orm['appulet.Step']"}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'appulet.highlighttranslationvcs': {
'Meta': {'object_name': 'HighlightTranslationVCS'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'highlight_translation_vcs_entries'", 'to': u"orm['appulet.Highlight']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'long_text_ca': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_en': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_es': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_fr': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_oc': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'appulet.interactiveimage': {
'Meta': {'object_name': 'InteractiveImage'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'interactive_images'", 'null': 'True', 'to': u"orm['appulet.Highlight']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'})
},
u'appulet.map': {
'Meta': {'object_name': 'Map'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'map'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'map_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'route': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['appulet.Route']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'appulet.rating': {
'Meta': {'object_name': 'Rating'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ratings'", 'null': 'True', 'to': u"orm['appulet.Highlight']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ratings'", 'null': 'True', 'to': u"orm['appulet.Route']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': u"orm['auth.User']"})
},
u'appulet.reference': {
'Meta': {'object_name': 'Reference'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'general': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'references'", 'null': 'True', 'to': u"orm['appulet.Highlight']"}),
'html_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'appulet.route': {
'Meta': {'object_name': 'Route'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'routes'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description_ca': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_es': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_oc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gpx_pois': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'gpx_track': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'gpx_waypoints': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_route_based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['appulet.Route']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reference': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['appulet.Reference']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'short_description_ca': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_oc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'track': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['appulet.Track']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'upload_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'appulet.step': {
'Meta': {'object_name': 'Step'},
'absolute_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'altitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'steps'", 'null': 'True', 'to': u"orm['appulet.Track']"})
},
u'appulet.track': {
'Meta': {'object_name': 'Track'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 24, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['appulet'] | MoveLab/erulet-server | appulet/migrations/0008_auto__add_highlighttranslationvcs.py | Python | gpl-3.0 | 21,086 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
class SummableRunFile(object):
def __init__(self, path, run_number, is_event_mode):
assert(type(path) == str)
assert(type(run_number) == str)
self._path = path
self._run_number = run_number
self._is_event_mode = is_event_mode
def is_event_data(self):
return self._is_event_mode
def file_path(self):
return self._path
def display_name(self):
return str(self._run_number)
| mganeva/mantid | scripts/SANS/sans/gui_logic/models/run_file.py | Python | gpl-3.0 | 731 |
import libvirt
import sys
def fmtstate(x):
return {
libvirt.VIR_DOMAIN_SHUTOFF: "Shutoff",
libvirt.VIR_DOMAIN_SHUTDOWN: "Shutdown",
libvirt.VIR_DOMAIN_NOSTATE: "No state",
libvirt.VIR_DOMAIN_BLOCKED: "Blocked",
libvirt.VIR_DOMAIN_PAUSED: "Paused",
libvirt.VIR_DOMAIN_CRASHED: "Crashed",
libvirt.VIR_DOMAIN_PMSUSPENDED: "Suspended",
libvirt.VIR_DOMAIN_RUNNING: "Running",
}.get(x, "Error")
def fmtmem(x):
if x < 1048576:
return str(x / 1024) + " MB"
return str(x / 1048576) + " GB"
def fmtdict(d):
d.__setitem__('mem', fmtmem(d['mem']))
d.__setitem__('memmax', fmtmem(d['memmax']))
d.__setitem__('state', fmtstate(d['state']))
return d
class VWebConnection:
def __init__(self):
self.conn = libvirt.openReadOnly("qemu:///system")
def close(self):
if self.conn is not None:
self.conn.close()
def getDomains(self):
if self.conn is None:
return []
ks = ['state', 'memmax', 'mem', 'cpus', 'cput', 'name']
return [fmtdict(x) for x in map(lambda l : dict([(k,v) for k, v in l]),
[zip(ks, vs) for vs in
map(lambda d : d.info() + [d.name()],
self.conn.listAllDomains(0))])]
def getDomainDetails(self, name):
if self.conn is None:
return []
dom = self.conn.lookupByName(name)
ks = ['state', 'memmax', 'mem', 'cpus', 'cput', 'name', 'OStype',
'persistent', 'updated', 'hostname']
s = dom.info() + \
[dom.name(), dom.OSType(), dom.isPersistent(), dom.isUpdated()]
try:
s.append(dom.hostname())
except:
s.append("")
info = fmtdict(dict(zip(ks, s)))
mem = {}
try:
mem = dom.memoryStats()
except:
mem = {}
return info, mem
| ignuki/vweb | mylib.py | Python | gpl-3.0 | 2,000 |
# -*- coding: utf-8 -*-
import pytest
import os.path as op
from django.core import mail
from django.test import TestCase
from django.test.utils import override_settings
from core.email.email import Email
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["%s/templates" % op.abspath(op.dirname(__file__))],
"OPTIONS": {
"context_processors": [
"django.core.context_processors.request",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.static",
],
},
},
]
)
@pytest.mark.django_db
class TestEmail(TestCase):
def test_can_send_a_simple_email_by_generating_the_text_content(self):
# Setup
email = Email("[email protected]", html_template="mail_dummy.html", subject="Subject")
# Run
email.send()
# Check
assert len(mail.outbox) == 1
assert mail.outbox[0].to[0] == "[email protected]"
assert mail.outbox[0].subject == "Subject"
assert mail.outbox[0].body.strip() == "Hello"
assert len(mail.outbox[0].alternatives) == 1
assert mail.outbox[0].alternatives[0][0].strip() == "<p>Hello</p>"
assert mail.outbox[0].alternatives[0][1] == "text/html"
def test_can_send_a_simple_email_with_a_text_template(self):
# Setup
email = Email(
"[email protected]",
html_template="mail_dummy.html",
text_template="mail_dummy.txt",
subject="Subject",
)
# Run
email.send()
# Check
assert len(mail.outbox) == 1
assert mail.outbox[0].to[0] == "[email protected]"
assert mail.outbox[0].subject == "Subject"
assert mail.outbox[0].body.strip() == "Hello txt"
assert len(mail.outbox[0].alternatives) == 1
assert mail.outbox[0].alternatives[0][0].strip() == "<p>Hello</p>"
assert mail.outbox[0].alternatives[0][1] == "text/html"
def test_can_send_a_simple_email_with_a_subject_template(self):
# Setup
email = Email(
"[email protected]", html_template="mail_dummy.html", subject_template="mail_subject.html"
)
# Run
email.send()
# Check
assert len(mail.outbox) == 1
assert mail.outbox[0].to[0] == "[email protected]"
assert mail.outbox[0].subject == "Hello subject"
assert mail.outbox[0].body.strip() == "Hello"
assert len(mail.outbox[0].alternatives) == 1
assert mail.outbox[0].alternatives[0][0].strip() == "<p>Hello</p>"
assert mail.outbox[0].alternatives[0][1] == "text/html"
def test_can_use_an_extra_context(self):
# Setup
email = Email(
"[email protected]",
html_template="mail_extra_context.html",
subject="Subject",
extra_context={"foo": "bar"},
)
# Run
email.send()
# Check
assert len(mail.outbox) == 1
assert mail.outbox[0].body.strip() == "bar"
def test_can_be_sent_in_a_specific_language(self):
# Setup
email = Email(
"[email protected]",
html_template="mail_language.html",
subject="Subject",
extra_context={"foo": "bar"},
language="en",
)
# Run
email.send()
# Check
assert len(mail.outbox) == 1
assert mail.outbox[0].body.strip() == "en"
def test_email_tag_will_be_converted_to_mailgun_header(self):
email = Email(
"[email protected]", html_template="mail_dummy.html", subject="Subject", tag="test"
)
# Run
email.send()
# Check
test_email = mail.outbox[0]
assert "X-Mailgun-Tag" in test_email.extra_headers
assert test_email.extra_headers["X-Mailgun-Tag"] == "test"
| erudit/eruditorg | tests/unit/core/email/test_email.py | Python | gpl-3.0 | 4,054 |
import socket
import sched
import time
connected_players = {}
def process_packet(sock, data, address):
print "Received a packet from", address, "containing", data
if data == "SYN":
connected_players[address] = "SYN-ACK sent"
sock.sendto("SYN-ACK", address)
elif data == "ACK":
connected_players[address] = "ACK-ed"
print "connected players:", connected_players
if __name__ == "__main__":
PORT = 4485
MAX = 2000
scheduler = sched.scheduler(time.time, time.sleep)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("", PORT))
while True:
data, address = sock.recvfrom(MAX)
process_packet(sock, data, address)
| atsheehan/pypogs | pypogs/tmp-server.py | Python | gpl-3.0 | 713 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import re
from past.builtins import cmp
import functools
import frappe, erpnext
from erpnext.accounts.report.utils import get_currency, convert_to_presentation_currency
from erpnext.accounts.utils import get_fiscal_year
from frappe import _
from frappe.utils import (flt, getdate, get_first_day, add_months, add_days, formatdate)
from six import itervalues
def get_period_list(from_fiscal_year, to_fiscal_year, periodicity, accumulated_values=False,
company=None, reset_period_on_fy_change=True):
"""Get a list of dict {"from_date": from_date, "to_date": to_date, "key": key, "label": label}
Periodicity can be (Yearly, Quarterly, Monthly)"""
fiscal_year = get_fiscal_year_data(from_fiscal_year, to_fiscal_year)
validate_fiscal_year(fiscal_year, from_fiscal_year, to_fiscal_year)
# start with first day, so as to avoid year to_dates like 2-April if ever they occur]
year_start_date = getdate(fiscal_year.year_start_date)
year_end_date = getdate(fiscal_year.year_end_date)
months_to_add = {
"Yearly": 12,
"Half-Yearly": 6,
"Quarterly": 3,
"Monthly": 1
}[periodicity]
period_list = []
start_date = year_start_date
months = get_months(year_start_date, year_end_date)
for i in range(months // months_to_add):
period = frappe._dict({
"from_date": start_date
})
to_date = add_months(start_date, months_to_add)
start_date = to_date
if to_date == get_first_day(to_date):
# if to_date is the first day, get the last day of previous month
to_date = add_days(to_date, -1)
if to_date <= year_end_date:
# the normal case
period.to_date = to_date
else:
# if a fiscal year ends before a 12 month period
period.to_date = year_end_date
period.to_date_fiscal_year = get_fiscal_year(period.to_date, company=company)[0]
period.from_date_fiscal_year_start_date = get_fiscal_year(period.from_date, company=company)[1]
period_list.append(period)
if period.to_date == year_end_date:
break
# common processing
for opts in period_list:
key = opts["to_date"].strftime("%b_%Y").lower()
if periodicity == "Monthly" and not accumulated_values:
label = formatdate(opts["to_date"], "MMM YYYY")
else:
if not accumulated_values:
label = get_label(periodicity, opts["from_date"], opts["to_date"])
else:
if reset_period_on_fy_change:
label = get_label(periodicity, opts.from_date_fiscal_year_start_date, opts["to_date"])
else:
label = get_label(periodicity, period_list[0].from_date, opts["to_date"])
opts.update({
"key": key.replace(" ", "_").replace("-", "_"),
"label": label,
"year_start_date": year_start_date,
"year_end_date": year_end_date
})
return period_list
def get_fiscal_year_data(from_fiscal_year, to_fiscal_year):
fiscal_year = frappe.db.sql("""select min(year_start_date) as year_start_date,
max(year_end_date) as year_end_date from `tabFiscal Year` where
name between %(from_fiscal_year)s and %(to_fiscal_year)s""",
{'from_fiscal_year': from_fiscal_year, 'to_fiscal_year': to_fiscal_year}, as_dict=1)
return fiscal_year[0] if fiscal_year else {}
def validate_fiscal_year(fiscal_year, from_fiscal_year, to_fiscal_year):
if not fiscal_year.get('year_start_date') and not fiscal_year.get('year_end_date'):
frappe.throw(_("End Year cannot be before Start Year"))
def get_months(start_date, end_date):
diff = (12 * end_date.year + end_date.month) - (12 * start_date.year + start_date.month)
return diff + 1
def get_label(periodicity, from_date, to_date):
if periodicity == "Yearly":
if formatdate(from_date, "YYYY") == formatdate(to_date, "YYYY"):
label = formatdate(from_date, "YYYY")
else:
label = formatdate(from_date, "YYYY") + "-" + formatdate(to_date, "YYYY")
else:
label = formatdate(from_date, "MMM YY") + "-" + formatdate(to_date, "MMM YY")
return label
def get_data(
company, root_type, balance_must_be, period_list, filters=None,
accumulated_values=1, only_current_fiscal_year=True, ignore_closing_entries=False,
ignore_accumulated_values_for_fy=False , total = True):
accounts = get_accounts(company, root_type)
if not accounts:
return None
accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)
company_currency = get_appropriate_currency(company, filters)
gl_entries_by_account = {}
for root in frappe.db.sql("""select lft, rgt from tabAccount
where root_type=%s and ifnull(parent_account, '') = ''""", root_type, as_dict=1):
set_gl_entries_by_account(
company,
period_list[0]["year_start_date"] if only_current_fiscal_year else None,
period_list[-1]["to_date"],
root.lft, root.rgt, filters,
gl_entries_by_account, ignore_closing_entries=ignore_closing_entries
)
calculate_values(
accounts_by_name, gl_entries_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy)
accumulate_values_into_parents(accounts, accounts_by_name, period_list, accumulated_values)
out = prepare_data(accounts, balance_must_be, period_list, company_currency)
out = filter_out_zero_value_rows(out, parent_children_map)
if out and total:
add_total_row(out, root_type, balance_must_be, period_list, company_currency)
return out
def get_appropriate_currency(company, filters=None):
if filters and filters.get("presentation_currency"):
return filters["presentation_currency"]
else:
return frappe.get_cached_value('Company', company, "default_currency")
def calculate_values(
accounts_by_name, gl_entries_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy):
for entries in itervalues(gl_entries_by_account):
for entry in entries:
d = accounts_by_name.get(entry.account)
if not d:
frappe.msgprint(
_("Could not retrieve information for {0}.".format(entry.account)), title="Error",
raise_exception=1
)
for period in period_list:
# check if posting date is within the period
if entry.posting_date <= period.to_date:
if (accumulated_values or entry.posting_date >= period.from_date) and \
(not ignore_accumulated_values_for_fy or
entry.fiscal_year == period.to_date_fiscal_year):
d[period.key] = d.get(period.key, 0.0) + flt(entry.debit) - flt(entry.credit)
if entry.posting_date < period_list[0].year_start_date:
d["opening_balance"] = d.get("opening_balance", 0.0) + flt(entry.debit) - flt(entry.credit)
def accumulate_values_into_parents(accounts, accounts_by_name, period_list, accumulated_values):
"""accumulate children's values in parent accounts"""
for d in reversed(accounts):
if d.parent_account:
for period in period_list:
accounts_by_name[d.parent_account][period.key] = \
accounts_by_name[d.parent_account].get(period.key, 0.0) + d.get(period.key, 0.0)
accounts_by_name[d.parent_account]["opening_balance"] = \
accounts_by_name[d.parent_account].get("opening_balance", 0.0) + d.get("opening_balance", 0.0)
def prepare_data(accounts, balance_must_be, period_list, company_currency):
data = []
year_start_date = period_list[0]["year_start_date"].strftime("%Y-%m-%d")
year_end_date = period_list[-1]["year_end_date"].strftime("%Y-%m-%d")
for d in accounts:
# add to output
has_value = False
total = 0
row = frappe._dict({
"account": _(d.name),
"parent_account": _(d.parent_account) if d.parent_account else '',
"indent": flt(d.indent),
"year_start_date": year_start_date,
"year_end_date": year_end_date,
"currency": company_currency,
"include_in_gross": d.include_in_gross,
"account_type": d.account_type,
"is_group": d.is_group,
"opening_balance": d.get("opening_balance", 0.0) * (1 if balance_must_be=="Debit" else -1),
"account_name": ('%s - %s' %(_(d.account_number), _(d.account_name))
if d.account_number else _(d.account_name))
})
for period in period_list:
if d.get(period.key) and balance_must_be == "Credit":
# change sign based on Debit or Credit, since calculation is done using (debit - credit)
d[period.key] *= -1
row[period.key] = flt(d.get(period.key, 0.0), 3)
if abs(row[period.key]) >= 0.005:
# ignore zero values
has_value = True
total += flt(row[period.key])
row["has_value"] = has_value
row["total"] = total
data.append(row)
return data
def filter_out_zero_value_rows(data, parent_children_map, show_zero_values=False):
data_with_value = []
for d in data:
if show_zero_values or d.get("has_value"):
data_with_value.append(d)
else:
# show group with zero balance, if there are balances against child
children = [child.name for child in parent_children_map.get(d.get("account")) or []]
if children:
for row in data:
if row.get("account") in children and row.get("has_value"):
data_with_value.append(d)
break
return data_with_value
def add_total_row(out, root_type, balance_must_be, period_list, company_currency):
total_row = {
"account_name": "'" + _("Total {0} ({1})").format(_(root_type), _(balance_must_be)) + "'",
"account": "'" + _("Total {0} ({1})").format(_(root_type), _(balance_must_be)) + "'",
"currency": company_currency
}
for row in out:
if not row.get("parent_account"):
for period in period_list:
total_row.setdefault(period.key, 0.0)
total_row[period.key] += row.get(period.key, 0.0)
row[period.key] = row.get(period.key, 0.0)
total_row.setdefault("total", 0.0)
total_row["total"] += flt(row["total"])
row["total"] = ""
if "total" in total_row:
out.append(total_row)
# blank row after Total
out.append({})
def get_accounts(company, root_type):
return frappe.db.sql("""
select name, account_number, parent_account, lft, rgt, root_type, report_type, account_name, include_in_gross, account_type, is_group, lft, rgt
from `tabAccount`
where company=%s and root_type=%s order by lft""", (company, root_type), as_dict=True)
def filter_accounts(accounts, depth=10):
parent_children_map = {}
accounts_by_name = {}
for d in accounts:
accounts_by_name[d.name] = d
parent_children_map.setdefault(d.parent_account or None, []).append(d)
filtered_accounts = []
def add_to_list(parent, level):
if level < depth:
children = parent_children_map.get(parent) or []
sort_accounts(children, is_root=True if parent==None else False)
for child in children:
child.indent = level
filtered_accounts.append(child)
add_to_list(child.name, level + 1)
add_to_list(None, 0)
return filtered_accounts, accounts_by_name, parent_children_map
def sort_accounts(accounts, is_root=False, key="name"):
"""Sort root types as Asset, Liability, Equity, Income, Expense"""
def compare_accounts(a, b):
if re.split('\W+', a[key])[0].isdigit():
# if chart of accounts is numbered, then sort by number
return cmp(a[key], b[key])
elif is_root:
if a.report_type != b.report_type and a.report_type == "Balance Sheet":
return -1
if a.root_type != b.root_type and a.root_type == "Asset":
return -1
if a.root_type == "Liability" and b.root_type == "Equity":
return -1
if a.root_type == "Income" and b.root_type == "Expense":
return -1
return 1
accounts.sort(key = functools.cmp_to_key(compare_accounts))
def set_gl_entries_by_account(
company, from_date, to_date, root_lft, root_rgt, filters, gl_entries_by_account, ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = get_additional_conditions(from_date, ignore_closing_entries, filters)
accounts = frappe.db.sql_list("""select name from `tabAccount`
where lft >= %s and rgt <= %s""", (root_lft, root_rgt))
additional_conditions += " and account in ('{}')"\
.format("', '".join([frappe.db.escape(d) for d in accounts]))
gl_entries = frappe.db.sql("""select posting_date, account, debit, credit, is_opening, fiscal_year, debit_in_account_currency, credit_in_account_currency, account_currency from `tabGL Entry`
where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
order by account, posting_date""".format(additional_conditions=additional_conditions),
{
"company": company,
"from_date": from_date,
"to_date": to_date,
"cost_center": filters.cost_center,
"project": filters.project,
"finance_book": filters.get("finance_book"),
"company_fb": frappe.db.get_value("Company", company, 'default_finance_book')
},
as_dict=True)
if filters and filters.get('presentation_currency'):
convert_to_presentation_currency(gl_entries, get_currency(filters))
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.account, []).append(entry)
return gl_entries_by_account
def get_additional_conditions(from_date, ignore_closing_entries, filters):
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("posting_date >= %(from_date)s")
if filters:
if filters.get("project"):
if not isinstance(filters.get("project"), list):
projects = frappe.safe_encode(filters.get("project"))
filters.project = [d.strip() for d in projects.strip().split(',') if d]
additional_conditions.append("project in %(project)s")
if filters.get("cost_center"):
filters.cost_center = get_cost_centers_with_children(filters.cost_center)
additional_conditions.append("cost_center in %(cost_center)s")
if filters.get("finance_book"):
if filters.get("include_default_book_entries"):
additional_conditions.append("finance_book in (%(finance_book)s, %(company_fb)s)")
else:
additional_conditions.append("finance_book in (%(finance_book)s)")
return " and {}".format(" and ".join(additional_conditions)) if additional_conditions else ""
def get_cost_centers_with_children(cost_centers):
if not isinstance(cost_centers, list):
cost_centers = [d.strip() for d in cost_centers.strip().split(',') if d]
all_cost_centers = []
for d in cost_centers:
lft, rgt = frappe.db.get_value("Cost Center", d, ["lft", "rgt"])
children = frappe.get_all("Cost Center", filters={"lft": [">=", lft], "rgt": ["<=", rgt]})
all_cost_centers += [c.name for c in children]
return list(set(all_cost_centers))
def get_columns(periodicity, period_list, accumulated_values=1, company=None):
columns = [{
"fieldname": "account",
"label": _("Account"),
"fieldtype": "Link",
"options": "Account",
"width": 300
}]
if company:
columns.append({
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
})
for period in period_list:
columns.append({
"fieldname": period.key,
"label": period.label,
"fieldtype": "Currency",
"options": "currency",
"width": 150
})
if periodicity!="Yearly":
if not accumulated_values:
columns.append({
"fieldname": "total",
"label": _("Total"),
"fieldtype": "Currency",
"width": 150
})
return columns
| shubhamgupta123/erpnext | erpnext/accounts/report/financial_statements.py | Python | gpl-3.0 | 15,121 |
# /usr/bin/env python
import numpy
import csv
from math import *
from euclid import *
from omega import *
from cyclops import *
csv_filename = "terraindata/cyprus/data.csv"
ref_point = {}
ref_point['lat'] = 36.0001388889
ref_point['lon'] = 31.9998611111
data_xz_scale = [1, 1]
data_height_scale = 1.0
# model
scene = getSceneManager()
all = SceneNode.create("everything")
site_centers = {}
# Create a directional light
light1 = Light.create()
light1.setLightType(LightType.Directional)
light1.setLightDirection(Vector3(-1.0, -1.0, -1.0))
light1.setColor(Color(1.0, 1.0, 1.0, 1.0))
light1.setAmbient(Color(0.2, 0.2, 0.2, 1.0))
light1.setEnabled(True)
light2 = Light.create()
light2.setColor(Color("#606040"))
light2.setPosition(Vector3(0, 0, 0))
light2.setEnabled(True)
#light2.setAttenuation(0, 0.1, 0.1)
# Camera
cam = getDefaultCamera()
cam.setPosition(Vector3(146047.75, 17604.45, 178648.63))
cam.setOrientation(Quaternion(0.99, -0.14, -0.1, 0.01))
cam.getController().setSpeed(4000)
setNearFarZ(2, 400000)
cam.addChild(light2)
colormap = {}
colormap[0] = '#ffffff'
colormap[1] = '#00008F'
colormap[2] = '#0080FF'
colormap[3] = '#80FF80'
colormap[4] = '#FF8000'
colormap[5] = '#800000'
# ui
uim = UiModule.createAndInitialize()
imgplot = loadImage('terraindata/cyprus/colormap.jpg')
hplot = Container.create(ContainerLayout.LayoutVertical, uim.getUi())
hplot.setVisible(True)
hplot.setStyle('fill: #aaaaaa80')
hplot.setSize(Vector2(imgplot.getWidth(), imgplot.getHeight()))
#hplot.setAutosize(True)
#hplot.setPosition(Vector3(10000, 200, 0))
hplot.setPosition(Vector3(10, 10, 0))
plotview = Image.create(hplot)
plotview.setData(imgplot)
#plotview.setPosition(Vector2(2, 2))
#plotview.setSize(Vector2(imgplot.getWidth(), 64))
def drawSite(site, options):
print site["Tier"], site["Short"], site["Latitude"], site["Longitude"], site["Height"], site["Vessels"], site["Beads"]
lat = float(site["Latitude"])
lon = float(site["Longitude"])
height = float(site["Height"]) + 200
vessel = int(site["Vessels"])
vessel_c = int(min(vessel / 5, 5))
bead = int(site["Beads"])
bead_c = int(min(bead / 5, 5))
display = '(' + site["Tier"] + ') ' + site["Short"]
# calculate position
oneDegreeLength = 30.86666667 * 3647
x = (lon - ref_point['lon']) * oneDegreeLength * data_xz_scale[0];
z = -1 * (lat - ref_point['lat']) * oneDegreeLength * data_xz_scale[1];
pos = Vector3(x, height, z)
print 'pos: ', pos, vessel_c, bead_c
#text
text = Text3D.create('fonts/arial.ttf', 0.6, display)
text.setPosition(Vector3(0.5, 0.4, 0.5))
#text.setPosition(pos)
text.setFontResolution(120)
text.setColor(Color('green'))
center = SphereShape.create(0.5, 2)
center.setScale(Vector3(1000, 1000, 1000))
center.setPosition(pos)
center.setVisible(False)
center.addChild(text)
size = 1000
if vessel == 0 and bead == 0:
print 'no data'
sphere = SphereShape.create(0.5, 2)
sphere.setScale(Vector3(500, 50, 500))
sphere.setEffect('colored -d ' + colormap[0])
sphere.setVisible(True)
sphere.setPosition(pos)
elif vessel > 0 and bead == 0:
print 'vessel only'
cylinder = CylinderShape.create(1, 0.5, 0.5, 1, 16)
cylinder.pitch(radians(-90))
cylinder.setScale(Vector3(size, size, size))
cylinder.setEffect('colored -d ' + colormap[vessel_c])
cylinder.setVisible(True)
cylinder.setPosition(pos[0], pos[1], pos[2] + size/2)
elif vessel == 0 and bead > 0:
print 'bead only'
sphere = SphereShape.create(0.5, 2)
sphere.setScale(Vector3(size, size, size))
sphere.setEffect('colored -d ' + colormap[bead_c])
sphere.setVisible(True)
sphere.setPosition(pos[0], pos[1] + size/2, pos[2])
else:
print 'both'
cylinder = CylinderShape.create(1, 0.5, 0.5, 1, 16)
cylinder.pitch(radians(-90))
cylinder.setScale(Vector3(size, size, size))
cylinder.setEffect('colored -d ' + colormap[vessel_c])
cylinder.setVisible(True)
cylinder.setPosition(Vector3(pos[0] - size/2-100, pos[1], pos[2]))
sphere = SphereShape.create(0.5, 2)
sphere.setScale(Vector3(size, size, size))
sphere.setEffect('colored -d ' + colormap[bead_c])
sphere.setVisible(True)
sphere.setPosition(Vector3(pos[0] + size/2+100, pos[1] + size/2, pos[2]))
# sites
with open(csv_filename, 'rb') as csvfile:
sites = csv.DictReader(csvfile)
options = {}
csvfile.seek(0)
sites = csv.DictReader(csvfile)
for site in sites:
drawSite(site, options)
# menu
mm = MenuManager.createAndInitialize()
menu = mm.getMainMenu()
mm.setMainMenu(menu)
# cameras
menu.addButton("Go to camera 1",
'cam.setPosition(Vector3(46930.8, 7805.12, 65433.8)), cam.setOrientation(Quaternion(-0.99, 0.07, 0.07, 0.01))')
queueCommand(":freefly")
| mivp/tessterrain | examples/cyprus/run_local.py | Python | gpl-3.0 | 4,984 |
import os
import hashlib
import base64
from Crypto.Cipher import AES
from Crypto import Random
AES_KEY_BITS = 256
AES_KEY_BYTES = (AES_KEY_BITS/8)
AES_KEY_FILE_DEFAULT = '/var/palette/.aes'
# pylint: disable=invalid-name
aes_key_file = AES_KEY_FILE_DEFAULT
# pylint: enable=invalid-name
# This is the Tableau password storage algorithm.
# NOTE: str() is called to convert from unicode.
def tableau_hash(password, salt):
return hashlib.sha1(str(password) + "fnord" + str(salt)).hexdigest()
def set_aes_key_file(path):
# pylint: disable=global-statement
# pylint: disable=invalid-name
global aes_key_file
aes_key_file = path
if not os.path.isfile(aes_key_file):
return genaeskey()
def genaeskey():
key = Random.new().read(AES_KEY_BYTES)
tmp = os.path.abspath(aes_key_file + '.tmp')
with open(tmp, 'w') as f:
f.write(key)
os.rename(tmp, aes_key_file)
os.chmod(aes_key_file, 0600)
return key
def aeskey():
if not os.path.isfile(aes_key_file):
return genaeskey()
with open(aes_key_file, 'r') as f:
key = f.read(AES_KEY_BYTES)
return key
def aes_encrypt(cleartext):
key = aeskey()
ivec = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, ivec)
return base64.b16encode(ivec + cipher.encrypt(cleartext))
def aes_decrypt(ciphertext):
key = aeskey()
msg = base64.b16decode(ciphertext)
ivec = msg[0:AES.block_size]
cipher = AES.new(key, AES.MODE_CFB, ivec)
return cipher.decrypt(msg[AES.block_size:])
| palette-software/palette | controller/controller/passwd.py | Python | gpl-3.0 | 1,549 |
import sys,time,threading
from Core.Color import LinuxColor, LinuxStatus
class PB():
Stop = False
def __init__(self,MSG): self.Mess = MSG
def Run(self):
print self.Mess + "... ",
sys.stdout.flush()
i = 0
while self.Stop != True:
if (i%4) == 0: sys.stdout.write('\b/')
elif (i%4) == 1: sys.stdout.write('\b-')
elif (i%4) == 2: sys.stdout.write('\b\\')
elif (i%4) == 3: sys.stdout.write('\b|')
sys.stdout.flush()
time.sleep(0.2)
i+=1
print ""
self.Stop = False
def ProcBar_Main(self):
self.Stop = False
Process = threading.Thread(target=self.Run)
Process.start()
| micle2018/OCCULT | Core/Process.py | Python | gpl-3.0 | 769 |
import urllib,urllib2,re,sys,httplib
#import xbmcplugin,xbmcgui,xbmcaddon,urlresolver
import cookielib,os,string,cookielib,StringIO
import os,time,base64,logging
from datetime import datetime
from utils import *
try:
import json
except ImportError:
import simplejson as json
#testing in shell
#TEST 1
# python -c "execfile('default.py'); Episode_Listing_Pages('http://www.animeflavor.com/index.php?q=node/4871')"
#TEST2
# python -c "execfile('default.py'); Episode_Media_Link('http://www.animeflavor.com/index.php?q=node/19518')"
#animestream
# modded from --> <addon id="plugin.video.animecrazy" name="Anime Crazy" version="1.0.9" provider-name="AJ">
BASE_URL = 'http://www.animeflavor.com'
base_url_name = BASE_URL.split('www.')[1]
base_txt = base_url_name + ': '
# aniUrls = ['http://www.animeflavor.com/node/anime_list','http://www.animeflavor.com/anime_movies','http://www.animeflavor.com/cartoons']
aniUrls = []
aniUrls.append(['http://www.animeflavor.com/node/anime_list','anime'])
aniUrls.append(['http://www.animeflavor.com/anime_movies','anime movie'])
aniUrls.append(['http://www.animeflavor.com/cartoons','cartoon'])
def Episode_Listing_Pages(url):
# Identifies the number of pages attached to the original content page
print base_txt + url
episodeListPage = url
epList = Episode_Listing(episodeListPage)
return epList
def Episode_Listing(url):
# Extracts the URL and Page name of the various content pages
link = grabUrlSource(url)
epBlockMatch=re.compile('<div class="relativity_child">(.+?)class="page-next"').findall(link)
epBlockMatch1=re.compile('Watch episodes(.+?)Login</a> to post').findall(link)
epBlockMatch2=re.compile('<div class="relativity_child">(.+?)<div class="block block-block"').findall(link)
epList = []
if len(epBlockMatch) >= 1:
epBlock=epBlockMatch[0]
elif len(epBlockMatch1) >= 1:
epBlock=epBlockMatch1[0]
elif len(epBlockMatch2) >= 1:
epBlock=epBlockMatch2[0]
else:
print base_txt + 'Nothing was parsed from Episode_Listing (failed epBlock): ' + url
return epList
epNum = 0
episodeMediaThumb = ''
match=re.compile('<a href="(.+?)">(.+?)</a>').findall(epBlock)
if(len(match) >= 1):
for episodePageLink, episodePageName in match:
season = '1'
episodePageLink = BASE_URL + episodePageLink
subLoc = episodePageName.find(' Version')
if subLoc>-1:
episodePageName = episodePageName[:subLoc].strip()
epNumPart = episodePageName.strip().split()
for epNum in reversed(epNumPart):
if epNum.isdigit():
epNum = int(epNum)
break
else:
epNum = 0
if 'season' in episodePageLink:
season=re.compile('season-(.+?)-').findall(episodePageLink)[0]
elif 'Season' in episodePageName.title():
season=re.compile('Season (.+?) ').findall(episodePageName.title())[0]
if season.isdigit():
season = int(season)
else:
season = 1
episodePageName = episodePageName.title().replace(' - ',' ').replace(':',' ').replace('-',' ').strip()
episodePageName = episodePageName.title().replace('Season',' ').replace(' ',' ').replace(' ',' ').strip()
epList.append([episodePageLink, episodePageName, episodeMediaThumb.replace("'",""), epNum, season])
else:
print base_txt + 'Nothing was parsed from Episode_Listing: ' + url
return epList
def Episode_Page(url):
# Identifies the number of mirrors for the content
print base_txt + url
link = grabUrlSource(url)
altBlock=re.compile('<div class="relativity_child">(.+?)</div>').findall(link)
if(len(altBlock) >= 1):
altBlock = altBlock[0]
mirror = 1
else:
altBlock = ' '
print base_txt + 'No Alternate videos found Episode_Page: ' + url
mirror = 0
epMedia = []
episodeMediaMirrors = url
# first video
epMedia = epMedia + Episode_Media_Link(episodeMediaMirrors,mirror)
#alternate video(s)
match=re.compile('<a href="(.+?)">').findall(altBlock)
if(len(match) >= 1):
for episodeMediaMirrors in match:
mirror = mirror + 1
episodeMediaMirrors = BASE_URL + episodeMediaMirrors
epMedia = epMedia + Episode_Media_Link(episodeMediaMirrors, mirror)
return epMedia
def Episode_Media_Link(url, AltMirror=1,part=1):
# Extracts the URL for the content media file
link = grabUrlSource(url).lower().replace(' ','')
epMedia = []
mirror = 0
match=re.compile('<(iframe|embed)(.+?)src="(.+?)"').findall(link)
if(len(match) >= 1):
for garbage1, garbage2, episodeMediaLink in match:
if (not any(skip_ads in episodeMediaLink for skip_ads in remove_ads)):
if (base_url_name in episodeMediaLink):
episodeMediaLink = Media_Link_Finder(episodeMediaLink)
if AltMirror==0:
mirror = mirror + 1
else:
mirror = AltMirror
epMedia.append([base_url_name,episodeMediaLink, mirror, part])
match=re.compile('<(iframe|embed)src="(.+?)"').findall(link)
if(len(match) >= 1):
for garbage, episodeMediaLink in match:
if (not any(skip_ads in episodeMediaLink for skip_ads in remove_ads)):
if (base_url_name in episodeMediaLink):
episodeMediaLink = Media_Link_Finder(episodeMediaLink)
if AltMirror==0:
mirror = mirror + 1
else:
mirror = AltMirror
epMedia.append([base_url_name,episodeMediaLink, mirror, part])
match=re.compile('<(iframe|embed)src=\'(.+?)\'').findall(link)
if(len(match) >= 1):
for garbage, episodeMediaLink in match:
if (not any(skip_ads in episodeMediaLink for skip_ads in remove_ads)):
if (base_url_name in episodeMediaLink):
episodeMediaLink = Media_Link_Finder(episodeMediaLink)
if AltMirror==0:
mirror = mirror + 1
else:
mirror = AltMirror
epMedia.append([base_url_name,episodeMediaLink, mirror, part])
match=re.compile('config=flavor1\|file=(.+?)\|image').findall(link)
if(len(match) >= 1):
for episodeMediaLink in match:
if (not any(skip_ads in episodeMediaLink for skip_ads in remove_ads)):
if (base_url_name in episodeMediaLink):
episodeMediaLink = Media_Link_Finder(episodeMediaLink)
if AltMirror==0:
mirror = mirror + 1
else:
mirror = AltMirror
epMedia.append([base_url_name,episodeMediaLink, mirror, part])
if(len(epMedia) < 1):
print base_txt + 'Nothing was parsed from Episode_Media_Link: ' + url
return epMedia
def Video_List_And_Pagination(url):
link = grabUrlSource(url)
mostPop = []
videoImg = ''
seriesBlock = re.compile('<table class="views-view-grid">(.+?)<div class="block block-block" id="block-block-17">').findall(link)[0]
match=re.compile('<a href="(.+?)">(.+?)<').findall(seriesBlock)
for videoImg, videoName in match:
videoName = urllib.unquote(videoName)
mostPop.append([BASE_URL + videoImg, videoName, ''])
return mostPop
def Media_Link_Finder(url):
# Extracts the URL for the content media file
link = grabUrlSource(url).replace(' ','')
match = re.compile('(iframe|embed)src="(.+?)"').findall(link)
match1 = re.compile('(iframe|embed)src=\'(.+?)\'').findall(link)
epMediaFound = []
if(len(match) >= 1):
epMediaFound = match[0][1]
if(len(match1) >= 1):
epMediaFound = match1[0][1]
if (len(epMediaFound) < 1):
epMediaFound = url
print base_txt + 'Nothing was parsed from Media_Link_Finder: ' + url
return epMediaFound
def Video_List_Searched(searchText,link):
# Generate list of shows/movies based on the provide keyword(s)
# urls = ['http://www.animeflavor.com/index.php?q=node/anime_list','http://www.animeflavor.com/index.php?q=anime_movies','http://www.animeflavor.com/index.php?q=cartoons']
searchRes = []
videoName = searchText
match=re.compile('<a(.+?)>'+searchText+'(.+?)<').findall(link)
if(len(match) >= 1):
for linkFound, videoName in match:
if (not 'title="Go' in linkFound):
videoInfo = re.compile('href="(.+?)"').findall(linkFound)
videoLink = videoInfo[-1]
videoNameSplit = videoLink.split('/')
videoName = searchText + videoName.replace('</a>','').strip()
if (not any(skip_ads in videoLink for skip_ads in remove_ads)):
searchRes.append([BASE_URL+videoLink, videoName])
# else:
# print base_txt + 'Nothing was parsed from Video_List_Searched'
return searchRes
def Total_Video_List(link):
# Generate list of shows/movies
searchRes = []
match1=re.compile('<div class="view-content">(.+?)<div id="sidearea">').findall(link)
match=re.compile('<a(.+?)>(.+?)</a>').findall(match1[0])
if(len(match) >= 1):
for linkFound, videoName in match:
if (not 'title="Go' in linkFound):
videoInfo = re.compile('href="(.+?)"').findall(linkFound)
if(len(videoInfo) >= 1):
videoLink = videoInfo[-1]
videoName = urllib.unquote(videoName)
if (not any(skip_ads in videoLink for skip_ads in remove_ads)):
searchRes.append([BASE_URL+videoLink, videoName.title()])
else:
print base_txt + 'Nothing was parsed from Total_Video_List'
# searchRes = U2A_List(searchRes)
return searchRes | torenado/plugin.video.animestream | resources/lib/streamSites/animeflavor.py | Python | gpl-3.0 | 8,906 |
import sys
import time
import threading
class ResettableTimer(threading.Thread):
def __init__(self, maxtime, callback):
self.maxtime = maxtime
self.counter = 0
self.inc = maxtime / 10.0
self.callback = callback
threading.Thread.__init__(self)
self.setDaemon(True)
self.start()
def reset(self):
self.counter = 0
def run(self):
self.counter = 0
while self.counter < self.maxtime:
self.counter += self.inc
time.sleep(self.inc)
self.callback()
| jimktrains/gus | resettabletimer.py | Python | gpl-3.0 | 569 |
# -*- coding: utf-8 -*-
import cv
import os
import cv2
def quick_show(image):
"""Display an image on the screen.
Quick 'n' dirty method to throw up a window with an image in it and
wait for the user to dismiss it.
"""
cv.NamedWindow("foo")
cv.ShowImage("foo", image)
cv.WaitKey(0)
cv.DestroyWindow("foo")
path_root = '/home/ivan/dev/pydev/lab/labtrans/plotter/data/cam/'
path_root = '/media/ivan/bf7f8bb4-842c-4abb-b280-8195370749c0/ivan/dev/labtrans/datos/mswim/placas veiculos/'
# 20130626_115022_imagemPlaca.jpg
for f in os.listdir(path_root):
#print(type(open(path_root + f).read()))
#exit()
#print(f)
# load
original = cv.LoadImageM(path_root + f, cv.CV_LOAD_IMAGE_GRAYSCALE)
# Crop from x, y, w, h -> 100, 200, 100, 200
cropped = original[0:original.rows-45, 0:original.cols]
# resize
thumbnail = cv.CreateMat(
cropped.rows / 10, cropped.cols / 10, original.type
)
cv.Resize(cropped, thumbnail)
# quick_show(cropped)
# gray mode
# CvtColor(original,gray,CV_RGB2GRAY)
#gray = cv.CreateImage((original.width, original.height), cv.IPL_DEPTH_8U, 1)
#cv.CvtColor(original,gray,cv.CV_RGB2GRAY)
# localize
# cvThreshold(image, binary_image,128,255, CV_THRESH_OTSU)
threshes = {'otsu': cv.CV_THRESH_OTSU,
'binary': cv.CV_THRESH_BINARY,
'binary_inv': cv.CV_THRESH_BINARY_INV,
'trunk': cv.CV_THRESH_TRUNC,
'tozero': cv.CV_THRESH_TOZERO,
'tozero_inv': cv.CV_THRESH_TOZERO_INV}
threshes_available = ['otsu', 'binary']
for name, thresh in threshes.items():
if name not in threshes_available:
continue
print(name)
binary_image = cv.CreateMat(
cropped.rows, cropped.cols, original.type
)
color = 255
threshold = 128
cv.Threshold(cropped, binary_image, threshold, color, thresh)
quick_show(binary_image)
# Connected Component Analysis
#myblobs = cv2.CBlobResult(binary_image, mask, 0, True)
#myblobs.filter_blobs(325,2000)
#blob_count = myblobs.GetNumBlobs()
quick_show(thumbnail)
| xmnlab/minilab | ia/ocr/xmnlpr/lpr.py | Python | gpl-3.0 | 2,210 |
#!/usr/bin/env python
"""Split a fasta file into chuncks of at least n nucleotides
Usage:
python fasta_split_n_nucleotides.py input_file n output_stub
input_file = fasta file to split (string)
n = minimal number of nucleotides in each split files (integer > 0)
output_stub = name stub for ouptut files (string, defaults to input_file)
"""
# Importing modules
import sys
# Defining classes
class Fasta(object):
"""Fasta object with name and sequence
"""
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
def write_to_file(self, handle):
handle.write(">" + self.name + "\n")
handle.write(self.sequence + "\n")
# Defining functions
def fasta_iterator(input_file):
"""Takes a fasta file input_file and returns a fasta iterator
"""
with open(input_file) as f:
sequence = ""
name = ""
begun = False
for line in f:
line = line.strip()
if line.startswith(">"):
if begun:
yield Fasta(name, sequence)
name = line.replace(">", "")
sequence = ""
begun = True
else:
sequence += line
if name != "":
yield Fasta(name, sequence)
# Main
if __name__ == '__main__':
try:
input_file = sys.argv[1]
n = int(sys.argv[2])
except:
print __doc__
sys.exit(1)
try:
output_stub = sys.argv[3]
except:
output_stub = input_file
sequences = fasta_iterator(input_file)
output_number = 1
output_file = output_stub + "_{:06n}".format(output_number)
outf = open(output_file, "w")
nucleotide_count = 0
for s in sequences:
if nucleotide_count > n:
outf.close()
nucleotide_count = 0
output_number += 1
output_file = output_stub + "_{:06n}".format(output_number)
outf = open(output_file, "w")
nucleotide_count += len(s.sequence)
outf.write(">" + s.name + "\n" + s.sequence + "\n")
| enormandeau/Scripts | fasta_split_n_nucleotides.py | Python | gpl-3.0 | 2,105 |
#!/usr/bin/env python
# SuperTester v1.4 by Albert Huang
# =================================
# Before using this really fun (and colorful) debugging tool, you MUST put
# this line at the very beginning of main():
#
# setvbuf(stdout, NULL, _IONBF, 0);
#
# This forces your C program to NOT buffer output (stdout)!
# Finally, make sure to remove any extraneous debug statements before running
# this program! The valid output will not match any extra debug statements!
#
# This little script assumes that your program is ./backgammon.
# If it isn't, you can do either of the following:
# 1) Compile it to ./backgammon:
# gcc coolcode.c cool2.c cool3.c -o backgammon
# ~ OR ~
# 2) Modify this script to use a different file name. Really easy - just
# open this script in your favorite editor (vi, emacs, nano),
# scroll to the bottom, and follow the instructions there.
#
# You will also need the test files from the class folder.
# http://www.ece.umd.edu/class/enee150/assignments/pr1/
#
# You can run something like this to get all the files (5 testcases):
# wget http://www.ece.umd.edu/class/enee150/assignments/pr1/test1.in
# wget http://www.ece.umd.edu/class/enee150/assignments/pr1/test1.out
# ...etc.
#
# Once you are sure you have done all of the above (AND recompiled your
# program), hit ENTER to begin. Good luck and have fun!
#
import os
import sys
import signal
import time
import datetime
import subprocess
import select
import difflib
import traceback
def headermsg():
sys.stdout.write("\033c")
sys.stdout.flush()
fh = open(__file__)
for line in fh.readlines():
line = line.strip()
if line[0] == '#':
if line != '#!/usr/bin/env python':
print line[2:]
else:
break
sys.stdout.write("Hit ENTER > ")
sys.stdout.flush()
raw_input()
def tprint(text):
now = datetime.datetime.now()
timeStr = now.strftime("%Y-%m-%d %H:%M:%S")
print "[%s] %s" % (timeStr, text)
def eprint(text):
tprint("\033[31mError: \033[0m %s" % text)
def iprint(text):
tprint("\033[34mInfo: \033[0m %s" % text)
def wprint(text):
tprint("\033[33mWarning:\033[0m %s" % text)
def sprint(text):
tprint("\033[32mSuccess:\033[0m %s" % text)
def countLines(f):
return sum(1 for line in open(f))
def diffStr(str1, str2):
if str1 != str2:
output = ""
for line in difflib.unified_diff(str1.splitlines(), str2.splitlines(), lineterm = ''):
output += line + "\n"
return output
else:
return None
# input: infile outfile
def tester(prgm, input, validOutput):
# Start the process!
iprint("Launching process %s with input %s..." % (prgm, input))
p = subprocess.Popen([prgm], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# Dirty hooks
poll_obj = select.poll()
poll_obj.register(p.stdout, select.POLLIN)
try:
# Some variables...
lineCounter = 0
totalLines = countLines(input)
stdoutLines = ""
unifiedLines = ""
for line in open(input).readlines():
line = line.strip()
if p.returncode != None:
if p.returncode == 0:
sprint("Program exited successfully. Yay! (Make sure that it was supposed to exit, though!")
break
else:
wprint("Program did NOT exit successfully. (Error code: %i)" % (p.returncode))
break
try:
p.stdin.write(line + "\n")
except:
wprint("Program seems to be already closed, but no status code was returned. Eeek! Exiting anyway.")
break
p.stdin.flush()
lineCounter += 1
time.sleep(0.1)
p.stdout.flush()
unifiedLines += line + "\n"
# Read in lines!
tmpBuf = ""
if select.select([p.stdout], [], [], 0)[0]:
iprint("Reading output...")
while poll_obj.poll(0):
tmpBuf = p.stdout.read(1)
if tmpBuf:
stdoutLines += tmpBuf
unifiedLines += tmpBuf
else:
break
checkstdoutLines = '\n'.join(stdoutLines.splitlines()[:-1])
curLinesInLog = len(checkstdoutLines.splitlines())
iprint("Sent line '%s' (line %i/%i). (Current lines in log: %i)" % (line, lineCounter, totalLines, curLinesInLog))
# Do a diff
validLogPart = open(validOutput).read(len(checkstdoutLines))
diffc = diffStr(validLogPart, checkstdoutLines)
if diffc != None:
eprint("Diff found on input line %i! (Actual input line: '%s')" % (lineCounter, line))
#tprint("Program output follows:")
#print checkstdoutLines
#tprint("Valid output follows:")
#print validLogPart
tprint("Diff follows: (valid vs invalid)")
print diffc
fh = open("progout.txt", "w")
fh.write(stdoutLines)
fh.close()
iprint("Program output has been written to progout.txt.")
# Write a bigger diff
fh = open("progdiff.txt", "w")
for line in difflib.ndiff(validLogPart.splitlines(), checkstdoutLines.splitlines()):
fh.write(line + "\n")
fh.close()
iprint("Advanced diff output has been written to progout.txt.")
# Write unified output
fh = open("progunified.txt", "w")
for line in unifiedLines.splitlines():
fh.write(line+"\n")
fh.close()
iprint("Unified output has been written to progunified.txt.")
iprint("These files will help you figure out what went wrong.")
iprint("Happy debugging, and good luck!")
# Cleanup
os.kill(p.pid, signal.SIGKILL)
sys.exit(1)
else:
curLinesInLog = len(stdoutLines.splitlines())
iprint("Sent line '%s' (line %i/%i). (Current lines in log: %i)" % (line, lineCounter, totalLines, curLinesInLog))
except SystemExit:
sys.exit(1)
except Exception, err:
eprint("Oops, something went wrong!")
print traceback.format_exc()
os.kill(p.pid, signal.SIGKILL)
sys.exit(1)
# Display header message!
headermsg()
####################################
# Tester commands
####################################
# --------------------
# Program Name Change
# --------------------
# To change the program name, simply replace "./backgammon" with your
# program name. Make sure to include the './' part, since this prevents
# some weird stuff from happening!
#
# For example, if your program is called a.out, the new lines would look
# like:
# tester("./a.out", "test1.in", "test1.out")
# tester("./a.out", "test2.in", "test2.out")
# ...and so on.
#
# --------------------
# Additional Tests
# --------------------
# You can also add additional lines as necessary!
# Just add more "tester" lines! The syntax is as follows:
# tester(program,
# input_file_with_lines_to_feed_to_the_program,
# valid_output_file_with_valid_output_to_compare_to)
#
tester("./backgammon", "test1.in", "test1.out")
tester("./backgammon", "test2.in", "test2.out")
tester("./backgammon", "test3.in", "test3.out")
tester("./backgammon", "test4.in", "test4.out")
tester("./backgammon", "test5.in", "test5.out")
| alberthdev/SuperTester | SuperTesterOrig.py | Python | gpl-3.0 | 6,774 |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from backtrader import date2num
import backtrader.feed as feed
class PandasData(feed.DataBase):
'''
The ``dataname`` parameter inherited from ``feed.DataBase`` is the pandas
DataFrame
'''
params = (
# Possible values for datetime (must always be present)
# None : datetime is the "index" in the Pandas Dataframe
# -1 : autodetect position or case-wise equal name
# >= 0 : numeric index to the colum in the pandas dataframe
# string : column name (as index) in the pandas dataframe
('datetime', None),
# Possible values below:
# None : column not present
# -1 : autodetect position or case-wise equal name
# >= 0 : numeric index to the colum in the pandas dataframe
# string : column name (as index) in the pandas dataframe
('open', -1),
('high', -1),
('low', -1),
('close', -1),
('volume', -1),
('openinterest', -1),
)
datafields = [
'datetime', 'open', 'high', 'low', 'close', 'volume', 'openinterest'
]
def __init__(self):
super(PandasData, self).__init__()
# these "colnames" can be strings or numeric types
colnames = list(self.p.dataname.columns.values)
if self.p.datetime is None:
# datetime is expected as index col and hence not returned
# add fake entry for the autodetection algorithm
colnames.insert(0, 0)
# try to autodetect if all columns are numeric
cstrings = filter(lambda x: isinstance(x, six.string_types), colnames)
colsnumeric = not len(cstrings)
# Where each datafield find its value
self._colmapping = dict()
# Build the column mappings to internal fields in advance
for i, datafield in enumerate(self.datafields):
defmapping = getattr(self.params, datafield)
if isinstance(defmapping, six.integer_types) and defmapping < 0:
# autodetection requested
if colsnumeric:
# matching names doesn't help, all indices are numeric
# use current colname index
self._colmapping[datafield] = colnames[i]
else:
# name matching may be possible
for colname in colnames:
if isinstance(colname, six.string_types):
if datafield.lower() == colname.lower():
self._colmapping[datafield] = colname
break
if datafield not in self._colmapping:
# not yet there ... use current index
self._colmapping[datafield] = colnames[i]
else:
# all other cases -- used given index
self._colmapping[datafield] = defmapping
def start(self):
# reset the length with each start
self._idx = -1
def _load(self):
self._idx += 1
if self._idx >= len(self.p.dataname):
# exhausted all rows
return False
# Set the standard datafields
for datafield in self.datafields[1:]:
colindex = self._colmapping[datafield]
if colindex is None:
# datafield signaled as missing in the stream: skip it
continue
# get the line to be set
line = getattr(self.lines, datafield)
# indexing for pandas: 1st is colum, then row
line[0] = self.p.dataname[colindex][self._idx]
# datetime conversion
coldtime = self._colmapping[self.datafields[0]]
if coldtime is None:
# standard index in the datetime
tstamp = self.p.dataname.index[self._idx]
else:
# it's in a different column ... use standard column index
tstamp = self.p.dataname.index[coldtime][self._idx]
# convert to float via datetime and store it
dt = tstamp.to_datetime()
dtnum = date2num(dt)
self.lines.datetime[0] = dtnum
# Done ... return
return True
| gnagel/backtrader | docs/pfeed.py | Python | gpl-3.0 | 5,235 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import wx
import wx.html2
from psychopy.localization import _translate
from psychopy.projects import pavlovia
from psychopy import logging
class BaseFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.Center()
# set up menu bar
self.menuBar = wx.MenuBar()
self.fileMenu = self.makeFileMenu()
self.menuBar.Append(self.fileMenu, _translate('&File'))
self.SetMenuBar(self.menuBar)
def makeFileMenu(self):
fileMenu = wx.Menu()
app = wx.GetApp()
keyCodes = app.keys
# add items to file menu
fileMenu.Append(wx.ID_CLOSE,
_translate("&Close View\t%s") % keyCodes['close'],
_translate("Close current window"))
self.Bind(wx.EVT_MENU, self.closeFrame, id=wx.ID_CLOSE)
# -------------quit
fileMenu.AppendSeparator()
fileMenu.Append(wx.ID_EXIT,
_translate("&Quit\t%s") % keyCodes['quit'],
_translate("Terminate the program"))
self.Bind(wx.EVT_MENU, app.quit, id=wx.ID_EXIT)
return fileMenu
def closeFrame(self, event=None, checkSave=True):
self.Destroy()
def checkSave(self):
"""If the app asks whether everything is safely saved
"""
return True # for OK
class PavloviaMiniBrowser(wx.Dialog):
"""This class is used by to open an internal browser for the user stuff
"""
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
def __init__(self, parent, user=None, loginOnly=False, logoutOnly=False,
style=style, *args, **kwargs):
# create the dialog
wx.Dialog.__init__(self, parent, style=style, *args, **kwargs)
# create browser window for authentication
self.browser = wx.html2.WebView.New(self)
self.loginOnly = loginOnly
self.logoutOnly = logoutOnly
self.tokenInfo = {}
# do layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.browser, 1, wx.EXPAND, 10)
self.SetSizer(sizer)
if loginOnly:
self.SetSize((600, 600))
else:
self.SetSize((700, 600))
self.CenterOnParent()
# check there is a user (or log them in)
if not user:
self.user = pavlovia.getCurrentSession().user
if not user:
self.login()
if not user:
return None
def logout(self):
self.browser.Bind(wx.html2.EVT_WEBVIEW_LOADED, self.checkForLogoutURL)
self.browser.LoadURL('https://gitlab.pavlovia.org/users/sign_out')
def login(self):
self._loggingIn = True
authURL, state = pavlovia.getAuthURL()
self.browser.Bind(wx.html2.EVT_WEBVIEW_ERROR, self.onConnectionErr)
self.browser.Bind(wx.html2.EVT_WEBVIEW_LOADED, self.checkForLoginURL)
self.browser.LoadURL(authURL)
def setURL(self, url):
self.browser.LoadURL(url)
def gotoUserPage(self):
if self.user:
url = self.user.attributes['web_url']
self.browser.LoadURL(url)
def gotoProjects(self):
self.browser.LoadURL("https://pavlovia.org/projects.html")
def onConnectionErr(self, event):
if 'INET_E_DOWNLOAD_FAILURE' in event.GetString():
self.EndModal(wx.ID_EXIT)
raise Exception("{}: No internet connection available.".format(event.GetString()))
def checkForLoginURL(self, event):
url = event.GetURL()
if 'access_token=' in url:
self.tokenInfo['token'] = self.getParamFromURL(
'access_token', url)
self.tokenInfo['tokenType'] = self.getParamFromURL(
'token_type', url)
self.tokenInfo['state'] = self.getParamFromURL(
'state', url)
self._loggingIn = False # we got a log in
self.browser.Unbind(wx.html2.EVT_WEBVIEW_LOADED)
pavlovia.login(self.tokenInfo['token'])
if self.loginOnly:
self.EndModal(wx.ID_OK)
elif url == 'https://gitlab.pavlovia.org/dashboard/projects':
# this is what happens if the user registered instead of logging in
# try now to do the log in (in the same session)
self.login()
else:
logging.info("OAuthBrowser.onNewURL: {}".format(url))
def checkForLogoutURL(self, event):
url = event.GetURL()
if url == 'https://gitlab.pavlovia.org/users/sign_in':
if self.logoutOnly:
self.EndModal(wx.ID_OK)
def getParamFromURL(self, paramName, url=None):
"""Takes a url and returns the named param"""
if url is None:
url = self.browser.GetCurrentURL()
return url.split(paramName + '=')[1].split('&')[0]
class PavloviaCommitDialog(wx.Dialog):
"""This class will be used to brings up a commit dialog
(if there is anything to commit)"""
def __init__(self, *args, **kwargs):
# pop kwargs for Py2 compatibility
changeInfo = kwargs.pop('changeInfo', '')
initMsg = kwargs.pop('initMsg', '')
super(PavloviaCommitDialog, self).__init__(*args, **kwargs)
# Set Text widgets
wx.Dialog(None, id=wx.ID_ANY, title=_translate("Committing changes"))
self.updatesInfo = wx.StaticText(self, label=changeInfo)
self.commitTitleLbl = wx.StaticText(self, label=_translate('Summary of changes'))
self.commitTitleCtrl = wx.TextCtrl(self, size=(500, -1), value=initMsg)
self.commitDescrLbl = wx.StaticText(self, label=_translate('Details of changes\n (optional)'))
self.commitDescrCtrl = wx.TextCtrl(self, size=(500, 200), style=wx.TE_MULTILINE | wx.TE_AUTO_URL)
# Set buttons
self.btnOK = wx.Button(self, wx.ID_OK)
self.btnCancel = wx.Button(self, wx.ID_CANCEL)
# Format elements
self.setToolTips()
self.setDlgSizers()
def setToolTips(self):
"""Set the tooltips for the dialog widgets"""
self.commitTitleCtrl.SetToolTip(
wx.ToolTip(
_translate("Title summarizing the changes you're making in these files")))
self.commitDescrCtrl.SetToolTip(
wx.ToolTip(
_translate("Optional details about the changes you're making in these files")))
def setDlgSizers(self):
"""
Set the commit dialog sizers and layout.
"""
commitSizer = wx.FlexGridSizer(cols=2, rows=2, vgap=5, hgap=5)
commitSizer.AddMany([(self.commitTitleLbl, 0, wx.ALIGN_RIGHT),
self.commitTitleCtrl,
(self.commitDescrLbl, 0, wx.ALIGN_RIGHT),
self.commitDescrCtrl])
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
buttonSizer.AddMany([self.btnCancel,
self.btnOK])
# main sizer and layout
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.updatesInfo, 0, wx.ALL | wx.EXPAND, border=5)
mainSizer.Add(commitSizer, 1, wx.ALL | wx.EXPAND, border=5)
mainSizer.Add(buttonSizer, 0, wx.ALL | wx.ALIGN_RIGHT, border=5)
self.SetSizerAndFit(mainSizer)
self.Layout()
def ShowCommitDlg(self):
"""Show the commit application-modal dialog
Returns
-------
wx event
"""
return self.ShowModal()
def getCommitMsg(self):
"""
Gets the commit message for the git commit.
Returns
-------
string:
The commit message and description.
If somehow the commit message is blank, a default is given.
"""
if self.commitTitleCtrl.IsEmpty():
commitMsg = "_"
else:
commitMsg = self.commitTitleCtrl.GetValue()
if not self.commitDescrCtrl.IsEmpty():
commitMsg += "\n\n" + self.commitDescrCtrl.GetValue()
return commitMsg
| psychopy/versions | psychopy/app/pavlovia_ui/_base.py | Python | gpl-3.0 | 8,295 |
#!venv/bin/python
from webserver import app
if __name__ == '__main__':
app.run(host=app.config['HOST'],port=app.config['PORT'],debug=app.config['DEBUG'])
| samarth/baalti-server | run.py | Python | gpl-3.0 | 160 |
"""
Keep track of (optionally weighted) descriptive statistics of incoming data,
handling nans.
Copyright 2013 Deepak Subburam
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"""
import sys
import numpy as np
from . import datab as db
from . import arrays, logging, strings
class Sparse(object):
"""
Keep track of summary statistics, but do not store the values.
SYNOPSIS
s = Sparse(weighted=True, logger='debug')
s.update(data, weights=w)
stats = s.compute()
OR
s = Sparse(data, weights=w)
stats = s.compute()
OPTIONS
weighted:
Whether weights accompany datapoints. Defaults to False.
logger (default: 'info'):
If this is a logger object, use that for debug/error messages. If this
is a string, initialize a logger object with the corresponding log level.
store_last:
Store values processed by last call to update().
ATTRIBUTES
statistics:
statistics computed so far. Needs call to obj.compute() to refresh.
last_update:
values processed by last call to update().
NOTES
For weighted objects, statistics{d_count} contains sum(w)^2 / sum(w^2),
and is a measure of the diversity of data points.
"""
def __init__(self, *data, **opts):
if 'logger' in opts:
self.logger = opts['logger']
else: self.logger = 'info'
if type(self.logger) == str:
self.logger = logging.Logger(self.__class__.__name__, self.logger)
self.count = 0
self.size = 0
self.sum_xw = 0.0
self.sum_xxw = 0.0
self.sum_w = 0.0
self.sum_ww = 0.0
self.weighted = False
if 'weighted' in opts: self.weighted = opts['weighted']
if 'weights' in opts and opts['weights'] is not None:
self.weighted = True
if 'negative_weights' in opts and opts['negative_weights'] is not None:
self.weighted = True
if 'weights' not in opts: opts['weights'] = None
if 'negative_weights' not in opts: opts['negative_weights'] = None
if 'store_last' in opts and opts['store_last']: self.last_update = True
else: self.last_update = None
self.statistics = {'mean': None,
'mean_square': None,
'sum_square': None,
'variance': None,
'std_dev': None,
't-stat': None,
'std_err': None,
'count': 0,
'size': 0,
'sum': None,
'min': None,
'max': None,
}
if self.weighted:
self.statistics['mean_wt'] = None
self.statistics['d_count'] = 0.0
if len(data) > 1:
raise TypeError('Too many arguments to constructor.')
elif len(data):
if 'IDs' not in opts: opts['IDs'] = None
if self.weighted is None:
self.weighted = ('weights' in opts) and (opts['weights'] is not None)
self.update(data[0], weights=opts['weights'],
negative_weights=opts['negative_weights'], IDs=opts['IDs'])
def update(self, values, weights=None, IDs=None, negative_weights=None):
"""
values, [negative_]weights and IDs may either be all arrays or all scalars.
If negative_weights are specified, values get multiplied by the sign of
the corresponding negative_weight, and weights get set to
abs(negative_weights).
datapoints with either the value or the weight being nan are ignored,
as are datapoints with weight <= 0. Though these datapoints affect
the 'size' statistic (but not the 'count' statistic).
values, weights and IDs are returned (useful to get back defaults/masked
versions of the inputs).
"""
values = arrays.nice_array(values, logger=self.logger)
mask = values.mask.copy()
if self.weighted:
if negative_weights is not None:
if weights is not None: raise AssertionError('Can not specify both weights and negative weights')
weights = abs(negative_weights)
values = values.copy()*np.sign(negative_weights)
elif weights is None:
raise AssertionError('Weighted statistics object received no weights in update.')
weights = arrays.nice_array(weights, shape=values.shape, logger=self.logger)
mask |= weights.mask
# Following contortion to avoid bogus
# "RuntimeWarning: Invalid value encountered in less_equal"
if mask.size < 2 and (not mask): mask = weights <= 0
mask[~mask] = (weights[~mask] <= 0)
fweights = weights.flatten()
else:
if weights is not None:
raise AssertionError('Unweighted statistics object received weights in update.')
fweights = np.ma.ones(values.size, dtype=float)
fweights.mask = mask.flatten()
fvalues = values.flatten()
fvalues.mask = fweights.mask
if IDs is None: IDs = np.array(list(range(fvalues.size)), dtype=int) + self.size
elif not isinstance(IDs, np.ndarray): IDs = np.array(IDs)
self.size += fvalues.size
count = fvalues.count()
if count == 0:
if self.last_update is not None: self.last_update = ([], [], [])
return
min_index = np.ma.argmin(fvalues)
max_index = np.ma.argmax(fvalues)
if self.count == 0:
self.statistics['min'] = (fvalues[min_index], IDs.flat[min_index])
self.statistics['max'] = (fvalues[max_index], IDs.flat[max_index])
else:
if fvalues[min_index] < self.statistics['min'][0]:
self.statistics['min'] = (fvalues[min_index], IDs.flat[min_index])
if fvalues[max_index] > self.statistics['max'][0]:
self.statistics['max'] = (fvalues[max_index], IDs.flat[max_index])
self.count += count
self.sum_xw += np.ma.sum(fvalues * fweights)
self.sum_xxw += np.ma.sum(fvalues * fvalues * fweights)
self.sum_w += np.ma.sum(fweights)
self.sum_ww += np.ma.sum(fweights * fweights)
if self.last_update is not None:
self.last_update = (fvalues, fweights, IDs.flat)
def compute(self):
statistics = self.statistics
statistics['count'] = self.count
statistics['size'] = self.size
if self.count == 0: return self.statistics
statistics['mean'] = self.sum_xw / self.sum_w
statistics['sum'] = statistics['mean'] * self.count
statistics['mean_square'] = self.sum_xxw / self.sum_w
statistics['sum_square'] = statistics['mean_square'] * self.count
d_count = self.count
if self.weighted:
self.statistics['mean_wt'] = self.sum_w / self.count
d_count = self.sum_w ** 2 / self.sum_ww
statistics['d_count'] = d_count
if self.count == 1: return self.statistics
statistics['variance'] = \
statistics['mean_square'] - statistics['mean'] ** 2
if statistics['variance'] < 0.0: return self.statistics
statistics['std_dev'] = np.sqrt(statistics['variance'])
statistics['std_err'] = statistics['std_dev'] / np.sqrt(d_count)
if statistics['std_err'] <= 0.0: return self.statistics
statistics['t-stat'] = statistics['mean'] / statistics['std_err']
return self.statistics
@classmethod
def stats(cls, data, weights=None, axis=None, step=1, sliced=None, select=None,
overlay=None, split=None, buckets=None, group=None,
labels=None, label_index=None, label_all=None, label_other='Other',
negative_weights=None, IDs=None,
datab=None, name=None, formats=None, **opts):
"""
Calls Class(data).compute(), handling complexities in the form of data.
data can be two dimensional, and axis can be 0 or 1. In this case,
a list of statistics-records is returned, in Datab form (unless datab=False).
overlay:
run stats only for records selected by this mask.
split:
run stats for all records, records selected by this mask, and for
the others, returning a 3-tuple of results. Does not work with axis option.
buckets:
run stats for all records, and for records selected by each of the masks
in this list of masks. Does not work with axis option.
group:
bucket stats by values in this field.
sliced:
run stats for records selected by this slice.
select:
run stats for records selected by this boolean mask.
step:
When axis option is specified, clump these many rows together for each row
stat to be computed. This can optionally be a list of steps, in which case
each clump can have variable number of rows.
label_all:
Relevant only when axis or split/buckets option present. If not None,
compute stats over entire dataset, in addition to for each index of the
axis or split/buckets, and place results in an entry of output with this
label.
label_other:
Relevant only when buckets option present. If not None, compute stats
over part of dataset not in any bucket, in addition to for each bucket,
and place results in an entry of output with this label.
labels:
list to use to add labels to each entry of output. Relevant only when
there are multiple lines of output.
label_index:
like labels, except use label_index[::step].
name:
in the header, label the key column with this string.
formats:
Default formatting e.g. %7.2f for numeric fields in Datab spec.
"""
if group is not None:
if buckets is not None and split is not None:
raise AssertionError('group, buckets and split options not supported together.')
label_other = None
labels, buckets = [], []
if type(group) == list or type(group) == tuple:
group = np.array(group)
for group_name in np.unique(group):
labels.append(group_name.decode() if hasattr(group_name, 'decode')
else group_name)
buckets.append(group == group_name)
if name is None: name = 'group'
if split is not None:
if buckets is not None:
raise AssertionError('group, buckets and split options not supported together.')
buckets = [split]
if labels is None:
labels = ['True']
label_other = 'False'
else:
label_other = labels[1]
labels = [labels[0]]
if name is None: name = 'condn'
elif buckets is not None:
if labels is None:
labels = [str(d + 1) for d in range(len(buckets))]
if name is None: name = 'bucket'
data = arrays.nice_array(data)
if weights is not None: weights = arrays.nice_array(weights)
if negative_weights is not None:
if weights is not None: raise AssertionError('Can not specify both weights and negative weights')
weights = abs(negative_weights)
data = data.copy()*np.sign(negative_weights)
if axis is None and np.isscalar(step) and step == 1:
data, weights, IDs = \
arrays.select([data, weights, IDs],
sliced=sliced, overlay=overlay, select=select)
if buckets is None:
results = cls(data, weights=weights, IDs=IDs, **opts).compute()
if datab is True:
return Datab([results], name=name or 'key', labels=labels, formats=formats)
else:
return results
if label_all:
all_labels = [label_all]
results = [cls.stats(data, weights=weights, IDs=IDs, formats=formats,
**opts)]
else: all_labels, results = [], []
if label_other: other = np.ones(np.shape(data), dtype=bool)
buckets = arrays.select(buckets,
sliced=sliced, overlay=overlay, select=select)
all_labels.extend(labels)
for b in buckets:
results.append(cls.stats(data, weights=weights, IDs=IDs, overlay=b,
formats=formats, **opts))
if label_other: other[b] = False
if label_other:
all_labels.append(label_other)
results.append(cls.stats(data, weights=weights, IDs=IDs,
overlay=other, formats=formats, **opts))
if datab is False: return results
else: return Datab(results, labels=all_labels, name=name, formats=formats)
if buckets is not None:
raise AssertionError('split/buckets option not supported with axis/step option.')
data, weights, IDs = arrays.select([data, weights, IDs],
sliced=sliced, overlay=overlay, select=select)
if cls != Multivariate:
if axis is not None and (axis > 1 or axis < 0 or data.ndim != 2):
raise ValueError('Got unsupported axis option value that is ' +
'not 0 or 1; or data is not two-dimensional.')
if axis == 0:
data = data.transpose()
if overlay is not None: overlay = overlay.transpose()
if IDs is not None: IDs = IDs.transpose()
if weights is not None: weights = weights.transpose()
elif axis is not None and axis != 0:
raise ValueError('Axis option value 0 is the only one supported for Multivariate stats.')
if weights is not None and weights.ndim == 1 and data.ndim == 2:
if len(weights) != np.shape(data)[1]:
raise ValueError('shape mismatch: 1D weights cannot be broadcast to shape of values')
sys.stderr.write('stats.stats: Broadcasting 1D weights for 2D values.\n')
weights = arrays.extend(weights, np.shape(data)[0]).T
if label_all is not None:
results = [cls(data, weights=weights, IDs=IDs, **opts).compute()]
all_labels = [label_all]
else:
results = []
all_labels = []
start_idx = 0
count = 0
while start_idx < len(data):
if np.isscalar(step): end_idx = start_idx + step
else: end_idx = start_idx + step[min(count, len(step)-1)]
row_data, row_weights, row_IDs = \
arrays.select([data, weights, IDs], sliced=(start_idx, end_idx, 1))
results.append(cls.stats(row_data, weights=row_weights, IDs=row_IDs))
if labels is not None and len(labels): all_labels.append(labels[count])
elif label_index is not None: all_labels.append(label_index[start_idx] + '-')
else: all_labels.append(str(start_idx) + '-')
start_idx = end_idx
count += 1
if datab is False:
return results
else:
return Datab(results, labels=all_labels, name=name or 'key', formats=formats)
@classmethod
def summary(cls, data, weights=None, include=None, exclude=None, fields=None,
all=False, filename=None, line_space=0, stringify=False, **opts):
"""
Convenience wrapper that, roughly speaking, calls cls.stats(data)
and then prints results in nice tabulated form, using stats.Datab.
See cls.stats() for documentation on supported options.
"""
stats_data = cls.stats(data, weights=weights, datab=True, **opts)
if len(stats_data) == 1 and 'name' not in opts:
#Get rid of redundant key column.
if exclude is None: exclude = ['key']
else:
if np.isscalar(exclude): exclude = [exclude]
exclude.append('key')
return stats_data.output(include=include, exclude=exclude, fields=fields, all=all,
filename=filename, line_space=line_space, stringify=stringify)
@classmethod
def loop_summary(cls, data, weights=None, include=None, exclude=None, fields=None,
labels=None, name='var', formats=None, all=False,
stringify=False, sort=None, **opts):
"""
Calls summary() in a loop for multiple responses/predictors.
"""
if type(data) != tuple:
raise ValueError('Data needs to be tuple so looping can happen.')
datab = opts.pop('datab', None)
output = []
all_labels = []
for count, layer in enumerate(data):
w = weights[count] if type(weights) == tuple else weights
output.append(cls.stats(layer, weights=w, **opts))
if labels is not None and len(labels): all_labels.append(labels[count])
else: all_labels.append(count)
if datab is False: return output
output = Datab(output, labels=all_labels, name=name, formats=formats)
if datab is True: return output
return output.output(include=include, exclude=exclude, fields=fields, all=all,
stringify=stringify, sort=sort)
@classmethod
def col_summary(cls, records, spec, weights=None, bucket=None, splits=[],
group=None, statistic='mean', all=False, exclude=None,
datab=None, formats='%7.4f', stringify=False, sort=None, **opts):
"""
Prints a summary statistic for each column in records as specified in <spec>.
Example:
col_summary(results, [('growth', 'median'), 'gross_domestic/GDP%5.1f'],
weights='population', group='continent', all=True)
spec, statistic, formats:
a list with each element representing a field whose statistic is to be printed.
THe element is generally a 2-tuple (<names_fmt_str>, <stat_str>) but can also be
a string, in which case <stat_str> defaults to <statistic>.
<names_fmt_str> can simply be the name of the field in records, for whom the
default statistic <statistic> is to be printed.
If it contains '/' then the name of the column printed is changed to that
which follows the '/'.
If it contains '%' then the values are fomatted as per the format specified;
otherwise the default <formats> is used.
weights:
print weighted statistics. can be a scalar <names_fmt_str> like in spec, or a
(<weights_array>, <name_fmt_str>) or a <weights_array> in which case 'weight'
is used as the name.
datab, stringify:
If True, a datab object of the results is returned. Otherwise the output is
printed or, if <stringify>, returned as a string.
"""
results = []
for entry in spec:
field_name_fmt, stat = (entry, statistic) if np.isscalar(entry) else entry
field_name, fmt = field_name_fmt.split('%') if '%' in field_name_fmt else \
(field_name_fmt, formats[1:])
field, name = field_name.split('/') if '/' in field_name else \
(field_name, field_name)
results.append([records[field], (name, float, '%' + fmt), stat])
kwargs = {}
if bucket:
field, name = bucket.split('/') if '/' in bucket else \
(bucket, bucket)
kwargs = bucketer([records[field], name, splits], label_other=None)
groups = None
if group:
field, kwargs['name'] = group.split('/') if '/' in group else \
(group, group)
groups = records[group]
if type(weights) == str:
field_name, wfmt = weights.split('%') if '%' in weights else \
(weights, formats[1:])
field, wname = field_name.split('/') if '/' in field_name else \
(field_name, field_name)
weights = records[field]
elif type(weights) == tuple:
weights, name_fmt = weights
wname, wfmt = name_fmt.split('%') if '%' in name_fmt else \
(name_fmt, formats[1:])
elif weights is not None:
wname, wfmt = 'weight', formats[1:]
# Finally, compute the stats.
for entry in results:
entry.append(cls.stats(entry[0], weights, group=groups, all=all, **kwargs))
if datab is False:
return results
# Set up datab object.
spec = []
first_stats = results[0][-1]
if first_spec := getattr(first_stats, 'spec', None):
spec.append(first_spec[0])
spec.append(('count', int, '%5d'))
if weights is not None:
spec.extend([('d_count', float, '%7.1f'),
(wname, float, f'%{wfmt}')])
spec.extend([entry[1] for entry in results])
output = db.Datab(shape=getattr(first_stats, 'shape', 1), spec=spec)
# Fill datab object.
if first_spec:
output[first_spec[0][0]] = first_stats[first_spec[0][0]]
output['count'] = first_stats['count']
if weights is not None:
output['d_count'] = first_stats['d_count']
output[wname] = first_stats['mean_wt']
for entry in results:
stat = entry[3][entry[2]]
output[entry[1][0]] = stat[0] if type(stat) == tuple else stat
if datab is True:
return output
return output.output(exclude=exclude, stringify=stringify, sort=sort)
class Full(Sparse):
"""
Store the values, so as to produce median and percentile values.
"""
default_percentiles = [[0.99, '99th%le'],
[0.95, '95th%le'],
[0.90, '90th%le'],
[0.75, '75th%le'],
[0.50, 'median'],
[0.25, '25th%le'],
[0.10, '10th%le'],
[0.05, '5th%le'],
[0.01, '1st%le']]
def __init__(self, *args, **opts):
self.data = None
Sparse.__init__(self, store_last=True, *args, **opts)
for percentile in Full.default_percentiles:
self.statistics[percentile[1]] = None
self.statistics['mad'] = None
def update(self, values, weights=None, IDs=None, negative_weights=None):
Sparse.update(self, values, weights=weights, IDs=IDs,
negative_weights=negative_weights)
if np.isscalar(self.last_update): return
values, weights, IDs = self.last_update
if not len(values): return
mask = values.mask | weights.mask
# Following contortion to avoid bogus
# "RuntimeWarning: Invalid value encountered in less_equal"
mask[~mask] = (weights[~mask] <= 0)
valid_values = values[~mask]
if not len(valid_values): return
indices = np.ma.argsort(valid_values)
update_data = \
np.array(list(zip(valid_values[indices], weights[~mask][indices],
IDs[~mask][indices])),
dtype=np.dtype([('value', values[0].dtype),
('weight', weights[0].dtype),
('ID', IDs[0].dtype)]))
if self.data is None:
self.data = update_data
else:
insert_indices = self.data['value'].searchsorted(update_data['value'])
self.data = np.insert(self.data, insert_indices, update_data)
def compute_percentiles(self, percentiles=None):
if self.data is None: return
if percentiles is None: percentiles = Full.default_percentiles
data = self.data
cumulative_weight = np.cumsum(data['weight'])
sum_weight = cumulative_weight[-1]
mean_weight = sum_weight / len(data)
if sum_weight <= 0.0: return
for entry in percentiles:
wanted_center = sum_weight * entry[0] + mean_weight / 2
right_index = np.searchsorted(cumulative_weight, wanted_center)
if right_index == 0:
self.statistics[entry[1]] = (data[0][0], data[0][2])
elif right_index == len(data):
self.statistics[entry[1]] = (data[-1][0], data[-1][2])
else:
left_index = right_index - 1
left_distance = wanted_center - cumulative_weight[left_index]
right_distance = cumulative_weight[right_index] - wanted_center
value = (right_distance * data[left_index][0] +
left_distance * data[right_index][0]) / \
(left_distance + right_distance)
if right_distance < left_distance: ID_index = right_index
else: ID_index = left_index
self.statistics[entry[1]] = (value, data[ID_index][2])
def compute(self):
if self.data is None or self.sum_w <= 0.0: return self.statistics
statistics = Sparse.compute(self)
data = self.data
if len(data) == 1:
self.statistics['median'] = [data['value'], data['ID']]
return self.statistics
self.compute_percentiles()
deviations = Full(weighted=True)
deviations.update(np.abs(data['value'] - self.statistics['median'][0]),
data['weight'])
deviations.compute_percentiles(percentiles=[[0.5, 'mad']])
self.statistics['mad'] = deviations.statistics['mad']
return self.statistics
class Multivariate(Sparse):
"""
Each value is an constant-sized array of variables; covariances between the
variables are computed.
SYNOPSIS
s = Multivariate(nvars=2, weighted=True, logger='debug', names=['pred', 'resp'])
s.update(data, weights=w)
stats = s.compute()
OR
s = Multivariate(data, weights=w)
stats = s.compute()
OPTIONS
nvars: Number of variables in each datapoint.
weighted: Whether weights accompany datapoints. Defaults to False
store_last: Store values processed by last call to update().
store_all: Store values processed by all calls to update().
ATTRIBUTES
statistics:
statistics computed so far. Needs call to obj.compute() to refresh.
last_update, all_update:
values processed by last/all call(s) to update(), if store_last/all specified
during object construction.
Sparse[i]:
Sparse statistics object for each of the variables.
NOTES
statistics{multiple_ij} containts mean(x_i * x_j) / mean(x_j * x_j).
"""
def __init__(self, *data, **opts):
if 'logger' in opts:
self.logger = opts['logger']
else: self.logger = 'info'
if type(self.logger) == str:
self.logger = logging.Logger(self.__class__.__name__, self.logger)
if len(data) > 1:
raise SyntaxError('Too many arguments to constructor.')
elif len(data):
data = data[0]
if not isinstance(data, np.ndarray): data = np.array(data)
if data.ndim == 1: data = data[:, np.newaxis]
if 'nvars' in opts and opts['nvars'] != np.shape(data)[-1]:
raise ValueError('Number of columns in data incompatible with nvars option.')
self.nvars = np.shape(data)[-1]
elif 'nvars' in opts:
if opts['nvars'] < 1: raise ValueError('Number of nvars must be at least 1.')
else: self.nvars = opts['nvars']
else:
raise ValueError('Need to specify nvars option.')
if 'names' in opts:
self.names = opts['names']
if len(self.names) != self.nvars:
raise AssertionError('Number of names supplied should match nvars.')
del opts['names']
else: self.names = ['var'+str(i) for i in range(self.nvars)]
self.size = 0
self.count = 0
self.sum_w = 0.0
self.sum_ww = 0.0
self.count_ij = np.zeros((self.nvars, self.nvars), dtype=int)
self.sum_ijw = np.zeros((self.nvars, self.nvars))
self.sum_wij = np.zeros((self.nvars, self.nvars))
self.sum_wwij = np.zeros((self.nvars, self.nvars))
self.weighted = False
if 'weighted' in opts:
self.weighted = opts['weighted']
if 'weights' in opts and opts['weights'] is not None:
self.weighted = True
if 'negative_weights' in opts and opts['negative_weights'] is not None:
self.weighted = True
if 'weights' not in opts: opts['weights'] = None
if 'negative_weights' not in opts: opts['negative_weights'] = None
if 'store_last' in opts and opts['store_last']:
self.last_update = True
else: self.last_update = None
if 'store_all' in opts and opts['store_all']:
self.all_update = []
else: self.all_update = None
self.statistics = {'mean_ij': np.zeros((self.nvars, self.nvars)) + np.nan,
'variance_ij': np.zeros((self.nvars, self.nvars)) + np.nan,
'std_dev_ij': np.zeros((self.nvars, self.nvars)) + np.nan,
'count': 0,
'size': 0,
'sum_ij': np.zeros((self.nvars, self.nvars)) + np.nan,
'correlation_ij': np.zeros((self.nvars, self.nvars)) + np.nan,
'multiple_ij': np.zeros((self.nvars, self.nvars)) + np.nan,
'count_ij': np.zeros((self.nvars, self.nvars), dtype=int),
'nvars': self.nvars,
}
self.Sparse = [Sparse(weighted=self.weighted) for i in range(self.nvars)]
if self.weighted:
self.statistics['mean_wt'] = None
self.statistics['d_count'] = 0.0
self.statistics['mean_wt_ij'] = np.zeros((self.nvars, self.nvars)) + np.nan
self.statistics['d_count_ij'] = np.zeros((self.nvars, self.nvars))
if len(data): self.update(data, weights=opts['weights'],
negative_weights=opts['negative_weights'])
def update(self, values, weights=None, negative_weights=None):
"""
Can update one datapoint at a time (in which case values is an array
and weights must be a scalar), or a set (in which case values are
rows of a 2D array, and weights is a 1D array).
If negative_weights are specified, values get multiplied by the sign of
the corresponding negative_weight, and weights get set to
abs(negative_weights).
"""
values = arrays.nice_array(values, logger=self.logger,
shape=(np.size(values) / self.nvars, self.nvars))
if self.weighted:
if negative_weights is not None:
if weights is not None: raise AssertionError('Can not specify both weights and negative weights')
negative_weights = arrays.nice_array(negative_weights, shape=len(values),
logger=self.logger)
weights = abs(negative_weights)
values = values.copy()*np.sign(negative_weights)[:, np.newaxis]
elif weights is None:
raise AssertionError('Weighted statistics object received no weights in update.')
else:
weights = arrays.nice_array(weights, shape=len(values), logger=self.logger)
else:
if weights is not None:
raise AssertionError('Unweighted statistics object received weights in update.')
weights = np.ma.ones(len(values))
for i in range(self.nvars):
if self.weighted: self.Sparse[i].update(values[:, i], weights)
else: self.Sparse[i].update(values[:, i])
for j in range(i):
valid = ~(values.mask[:, i] | values.mask[:, j] | weights.mask)
self.count_ij[i, j] += np.sum(valid)
self.sum_ijw[i, j] += np.sum(values[:, i] * values[:, j] * weights)
self.sum_wij[i, j] += np.sum(weights[valid])
self.sum_wwij[i, j] += np.sum(weights[valid] ** 2)
self.size += len(weights)
valid = np.any(~values.mask, axis=1) & (~weights.mask)
self.count += np.sum(valid)
self.sum_w += np.sum(weights[valid])
self.sum_ww += np.sum(weights[valid] ** 2)
if self.last_update is not None: self.last_update = (values, weights, valid)
if self.all_update is not None: self.all_update.append((values, weights, valid))
def compute(self):
statistics = self.statistics
statistics['count'] = self.count
statistics['size'] = self.size
if self.count == 0: return statistics
sparse_stats = [s.compute() for s in self.Sparse]
if self.weighted:
statistics['mean_wt'] = self.sum_w / self.count
statistics['d_count'] = self.sum_w ** 2 / self.sum_ww
if self.count == 1: return statistics
stat_fields = ['count', 'variance', 'std_dev']
if self.weighted: stat_fields.extend(['mean_wt', 'd_count'])
for i in range(self.nvars):
for stat in stat_fields:
statistics[stat + '_ij'][i, i] = sparse_stats[i][stat]
statistics['correlation_ij'][i, i] = 1.0
statistics['multiple_ij'][i, i] = 1.0
statistics['mean_ij'][i, i] = sparse_stats[i]['mean_square']
statistics['sum_ij'][i, i] = sparse_stats[i]['sum_square']
for j in range(i):
statistics['count_ij'][i, j] = self.count_ij[i, j]
if self.count_ij[i, j] == 0: continue
statistics['mean_ij'][i, j] = self.sum_ijw[i, j] / self.sum_wij[i, j]
statistics['sum_ij'][i, j] = \
statistics['mean_ij'][i, j] * statistics['count_ij'][i, j]
statistics['variance_ij'][i, j] = statistics['mean_ij'][i, j] - \
sparse_stats[i]['mean'] * sparse_stats[j]['mean']
statistics['std_dev_ij'][i, j] = \
np.sqrt(abs(statistics['variance_ij'][i, j])) * \
np.sign(statistics['variance_ij'][i, j])
statistics['correlation_ij'][i, j] = \
statistics['variance_ij'][i, j] / \
np.sqrt(statistics['variance_ij'][i, i] *
statistics['variance_ij'][j, j])
if statistics['mean_ij'][j, j] > 0:
statistics['multiple_ij'][i, j] = \
statistics['mean_ij'][i, j] / statistics['mean_ij'][j, j]
if statistics['mean_ij'][i, i] > 0:
statistics['multiple_ij'][j, i] = \
statistics['mean_ij'][i, j] / statistics['mean_ij'][i, i]
if self.weighted:
statistics['mean_wt_ij'][i, j] = \
self.sum_wij[i, j] / self.count_ij[i, j]
statistics['d_count_ij'][i, j] = \
self.sum_wij[i, j] ** 2 / self.sum_wwij[i, j]
for stat in stat_fields + ['correlation', 'mean', 'sum']:
statistics[stat + '_ij'][j, i] = statistics[stat + '_ij'][i, j]
return self.statistics
def print_ij(self, statistic, names=None, format=None):
"""Print table of given cross-statistic."""
values = self.statistics[statistic]
if names is None: names = self.names
if format is None:
stat = statistic.replace('_ij', '')
if stat in Datab.spec_index: format = Datab.spec[Datab.spec_index[stat]][2]
else: format = '%9.4f'
print(' '.join([strings.fmt(string, format) for string in [' '] + names]))
for i in range(self.nvars):
print(strings.fmt(names[i], format), end=' ')
for j in range(self.nvars):
print(format % values[i, j], end=' ')
print('')
def datab_ij(self, statistic, names=None, format=None):
"""Return datab object containing table of given cross-statistic."""
if names is None: names = self.names
if format is None:
stat = statistic.replace('_ij', '')
if stat in Datab.spec_index: format = Datab.spec[Datab.spec_index[stat]][2]
else: format = '%9.4f'
spec = [('factor', 'S6', '%-6s')] + [(n, float, format) for n in names]
values = self.statistics[statistic]
entries = [tuple([names[i]] + list(row)) for i, row in enumerate(values)]
return db.Datab(entries, spec)
class Datab(db.Datab):
"""
Store and print computed statistics-records in nice tabular form.
Datab.spec:
preferred order of statistics, and typical formatting.
Datab.spec_index:
dict going from statistic to index in Datab.spec.
Datab.default_output_fields:
shortlisted statistics to print by default.
See datab.Datab for other class/object fields and definitions.
"""
# entries in spec should be ordered as they are to be columned.
spec = (('size', int, '%7d'), ('count', int, '%7d'),
('d_count', float, '%8.1f'), ('mean_wt', float, '%7.3f'),
('sum', float, '%8.1f'), ('mean', float, '%8.4f'),
('std_dev', float, '%8.4f'), ('t-stat', float, '%7.3f'),
('median', float, '%8.4f'), ('mad', float, '%8.4f'),
('25th%le', float, '%8.4f'), ('75th%le', float, '%8.4f'),
('10th%le', float, '%8.4f'), ('90th%le', float, '%8.4f'),
('correlation_ij', float, '%7.4f'), ('multiple_ij', float, '%10.6f'),
('std_err', float, '%8.6f'), ('variance', float, '%8.6f'),
('min', float, '%8.3f'), ('max', float, '%8.3f'),
)
spec_index = dict([(f[0], i) for i, f in enumerate(spec)])
matrix_stat = {'correlation_ij': {'name': 'corr',
'symmetric': True},
'multiple_ij': {'name': 'coeff',
'symmetric': False}}
# the below should be in order the fields appear in spec.
default_output_fields = ('count', 'd_count', 'mean_wt',
'mean', 'std_dev', 'correlation_ij', 't-stat',
'median', 'mad', 'min', 'max')
def __new__(subtype, results, labels=[], name='key', formats=None, **datab_args):
"""
results argument is a list of statistics-records. If corresponding list
of strings, labels, is given, a column called <name> is created
and stored in the Datab object, which will also be constructed
with index=<name> option.
"""
first_result = None
for r in results:
if r is not None:
first_result = r
break
if not first_result: return None
if labels is None or not len(labels):
labels = [str(d) for d in range(len(results))]
else: labels = [str(l) for l in labels]
indices = []
for stat in list(first_result.keys()):
if stat not in Datab.spec_index: continue
indices.append(Datab.spec_index[stat])
statistics = np.array(Datab.spec)[np.sort(indices)]
stats_data, none_indices = [], []
key_len = len(name)
for count, result in enumerate(results):
if result is None:
stats_data.append(None)
none_indices.append(count)
continue
row_stats = [labels[count]]
key_len = max(key_len, len(row_stats[-1]))
for stat in [s[0] for s in statistics]:
if stat not in Datab.matrix_stat:
if result[stat] is None or np.ndim(result[stat]) == 0:
row_stats.append(result[stat])
# for (min|max, arg) stats, just store the min/max value
else: row_stats.append(result[stat][0])
else:
for i in range(first_result['nvars']):
for j in range(i):
row_stats.append(result[stat][i, j])
if not Datab.matrix_stat[stat]['symmetric']:
row_stats.append(result[stat][j, i])
stats_data.append(tuple(row_stats))
my_spec = [(name, 'S'+str(key_len), '%-'+str(key_len)+'s')]
default_fields = []
for spec in statistics:
if spec[0] not in Datab.matrix_stat:
my_spec.append(spec)
if spec[0] in Datab.default_output_fields:
default_fields.append(spec[0])
else:
nm = Datab.matrix_stat[spec[0]]['name']
for i in range(first_result['nvars']):
for j in range(i):
my_spec.append(('_'.join([nm, str(i), str(j)]),
spec[1], spec[2]))
if spec[0] in Datab.default_output_fields:
default_fields.append('_'.join([nm, str(i), str(j)]))
if not Datab.matrix_stat[spec[0]]['symmetric']:
my_spec.append(('_'.join([nm, str(j), str(i)]),
spec[1], spec[2]))
if spec[0] in Datab.default_output_fields:
default_fields.append('_'.join([nm, str(j), str(i)]))
if formats is not None:
for s in my_spec:
if s[1] == float and s[0] != 'd_count': s[2] = formats
obj = super(Datab, subtype).__new__(subtype, stats_data, my_spec, index=name,
**datab_args)
obj.default_output_fields = default_fields
for index in none_indices:
obj[name][index] = labels[index]
return obj
def output(self, include=None, exclude=None, all=False, **kwargs):
"""
Print statistics-records nicely. If fields=[field1, field2, ...] not
specified, use Datab.default_output_fields.
all:
print all statistics fields.
include:
include this statistic field(s) on top of what's going to be printed.
exclude:
exclude this statistic field(s) from what's going to be printed.
"""
if np.isscalar(exclude): exclude = [exclude]
if np.isscalar(include): include = [include]
if 'fields' in kwargs and kwargs['fields'] is not None:
super(Datab, self).output(**kwargs)
return
if self.identifier is not None and \
(exclude is None or self.identifier not in exclude):
kwargs['fields'] = [self.identifier]
else: kwargs['fields'] = []
if all: fields = [s[0] for s in self.spec]
else:
fields = self.default_output_fields
if include is not None:
spec_index = dict([(f[0], i) for i, f in enumerate(self.spec)])
field_indices = [spec_index[f] for f in fields]
insert_indices = np.searchsorted(field_indices,
[spec_index[f] for f in include])
fields = np.array(fields, dtype='S'+str(max([len(s[0])
for s in self.spec])))
fields = [f.decode() for f in
np.insert(fields, insert_indices, include)]
for field in fields:
if exclude is None or field not in exclude:
kwargs['fields'].append(field)
return super(Datab, self).output(**kwargs)
def summary(*args, **kwargs):
return Full.summary(*args, **kwargs)
def loop_summary(*args, **kwargs):
return Full.loop_summary(*args, **kwargs)
def col_summary(*args, **kwargs):
return Full.col_summary(*args, **kwargs)
def stats(*args, **kwargs):
return Full.stats(*args, **kwargs)
def bucketer(*data_field_splits, **kwargs):
"""
Helper function for constructing bucket options. Pass output of this function
to stats.summary(), etc like this --
stats.summary(data['1_252'], data['weight'],
**stats.bucketer([pred1, 'var1', (-.1, 0, .1)], [pred2, 'var2', 0]))
*data_field_splits:
List of triples, (data, field_name, [split_val1, ...]).
If the third element here is a scalar, it gets cast as a list with one element.
If the third element here is None, bucketing is done evaluating data as True/False.
If the first element here is a tuple, it is deemed to be a set of True/False arrays,
and the third element is assumed to be a list of labels (if None, label with integers).
**kwargs:
Can handle the following options: label_all, label_other, formats.
"""
def _recurse_buckets(overlays_labels, overlays, labels):
if not overlays_labels: return overlays, labels
new_overs, new_labels = [], []
for layer_over, layer_label in zip(*overlays_labels.pop(-1)):
for existing_over, existing_label in zip(overlays, labels):
new_overs.append(existing_over & layer_over)
new_labels.append(layer_label + '|' + existing_label)
return _recurse_buckets(overlays_labels, new_overs, new_labels)
label_all = kwargs.get('label_all', 'All')
label_other = kwargs.get('label_other', 'Other')
formats = kwargs['formats'] if 'formats' in kwargs else '%6.1f'
fmt = formats + '_%-' + formats[1:]
str_fmt = '%' + str(strings.fmt_length(formats)) + 's ' + \
strings.fmt(' ', formats)
name_fmt = '%-' + str(strings.fmt_length(formats) * 2 + 1) + 's'
overlays_labels = []
name = ''
for d_f_s in data_field_splits:
data, field, splits = d_f_s
if np.isscalar(splits): splits = [splits]
if name: name += '|'
name += name_fmt % field
overlays = [np.ones(np.shape(data[0] if type(data) == tuple else data),
dtype=bool)] if label_all else []
labels = [str_fmt % label_all] if label_all else []
if type(data) == tuple:
# multiple boolean bucketing
for i in range(len(data)):
overlays.append(data[i])
labels.append(name_fmt % (i if splits is None else splits[i]))
overlays_labels.append((overlays, labels))
continue
if splits is None:
# boolean bucketing
overlays.append(data)
labels.append(str_fmt % 'True')
overlays.append(~data)
labels.append(str_fmt % 'False')
overlays_labels.append((overlays, labels))
continue
other_overlay = np.ones(np.shape(data), dtype=bool) if label_other else None
for count, value in enumerate(splits):
if count == 0:
overlays.append(data < value)
if label_other: other_overlay &= ~overlays[-1]
labels.append(fmt % (-np.inf, value))
if count > 0 and len(splits) > 1:
overlays.append((data < value) & (data >= splits[count - 1]))
if label_other: other_overlay &= ~overlays[-1]
labels.append(fmt % (splits[count - 1], value))
if count == len(splits) - 1:
overlays.append(data >= value)
if label_other: other_overlay &= ~overlays[-1]
labels.append(fmt % (value, np.inf))
if label_other:
overlays += [other_overlay]
labels += [str_fmt % label_other]
overlays_labels.append((overlays, labels))
overlays, labels = _recurse_buckets(overlays_labels, *overlays_labels.pop(-1))
labels = [l.replace('-inf_', ' _') for l in labels]
labels = [l.replace('_inf', '_ ') for l in labels]
return {'buckets': overlays, 'labels': labels,
'name': name, 'label_all': None, 'label_other': None}
| Fenugreek/tamarind | stats.py | Python | gpl-3.0 | 49,648 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-15 09:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('location', '0002_auto_20170312_1343'),
]
operations = [
migrations.AlterModelOptions(
name='userlocation',
options={'get_latest_by': 'created', 'ordering': ('-created',)},
),
]
| vest-thermostat/server | vest/location/migrations/0003_auto_20170315_0950.py | Python | gpl-3.0 | 450 |
# @String(visibility="MESSAGE",value="<html><div WIDTH=600>This script tags particles according to skeleton features: It detects maxima on a masked image and clusters detected maxima using features of the mask-derived skeleton. A maxima is considered to be associated to a skeleton feature (junction, tip, etc.) if the distance between its centroid and the feature is less than or equal to a cuttoff (\"snap to\") distance.") MSG
# @ImagePlus(label="Particles image") impPart
# @ImagePlus(label="Skeletonizable mask", description="Must be a binary image (background = 0). Used to confine maxima detection and generate skeleton") impSkel
# @String(label="AutoThreshold for particle detection", choices={"Default", "Huang", "Intermodes", "IsoData", "IJ_IsoData", "Li", "MaxEntropy", "Mean", "MinError", "Minimum", "Moments", "Otsu", "Percentile", "RenyiEntropy", "Shanbhag", "Triangle", "Yen"}) thres_method
# @Double(label="Max. \"snap to\" distance", description="In calibrated units", min=1, max=100, style="scroll bar", value=3) cutoff_dist
# @String(label="Output", choices={"ROIs only", "ROIs and Measurements (IJ1 table)", "ROIs and Measurements (IJ2 table)"}) output
# @UIService uiService
# @ImageJ ij
"""
Classify_Particles_Using_Skeleton.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tags particles according to skeleton features: Detects maxima on a masked
image and clusters detected maxima using features of the skeletonized mask.
A maxima is considered to be associated to a skeleton feature (junction,
tip, etc.) if the distance between its centroid and the feature is less than
or equal to a cuttoff ("snap to") distance.
:version: 20190111
:copyright: 2017-2019 TF
:url: https://github.com/tferr/hIPNAT
:license: GPL3, see LICENSE for more details
"""
from ij import IJ
from ij.gui import Overlay, PointRoi
from ij.measure import ResultsTable
from ij.plugin.filter import MaximumFinder
from ipnat.processing import Binary
from sc.fiji.skeletonize3D import Skeletonize3D_
from sc.fiji.analyzeSkeleton import AnalyzeSkeleton_
from org.scijava.table import DefaultGenericTable, GenericColumn
from java.awt import Color
import math, sys
def addToTable(table, column_header, value):
if isinstance(table, ResultsTable):
addToIJ1Table(table, column_header, value)
else:
addToIJ2Table(table, column_header, value)
def addToIJ1Table(table, column_header, value):
if table.getCounter() == 0:
table.incrementCounter()
table.addValue(column_header, value)
def addToIJ2Table(table, column_header, value):
""" Adds the specified value to the specifed column of an IJ table """
col_idx = table.getColumnIndex(column_header)
if col_idx == -1:
column = GenericColumn(column_header)
column.add(value)
table.add(column)
else:
column = table.get(col_idx)
column.add(value)
table.remove(col_idx)
table.add(col_idx, column)
def showTable(table, title):
if isinstance(table, ResultsTable):
table.show(title)
else:
ij.ui().show(title, table)
def cleanse_overlay(overlay):
""" Removes all point ROIs from the specified overlay """
if not overlay:
return Overlay()
for i in reversed(range(overlay.size())):
roi = overlay.get(i)
if isinstance(roi, PointRoi):
overlay.remove(i)
return overlay
def distance(x1, y1, x2, y2):
""" Calculates the distance between 2D points """
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def error(msg):
""" Displays an error message """
uiService.showDialog(msg, "Error")
def get_centroids(imp, tolerance):
""" Returns maxima using IJ1 """
mf = MaximumFinder()
maxima = mf.getMaxima(imp.getProcessor(), tolerance, True)
return maxima.xpoints, maxima.ypoints, maxima.npoints
def get_threshold(imp, method):
# from ij.process import AutoThresholder
# from ij.process import ImageStatistics
# thresholder = AutoThresholder()
# stats = imp.getProcessor().getStatistics()
# value = thresholder.getThreshold(method, stats.histogram)
arg = "%s dark" % method
IJ.setAutoThreshold(imp, arg)
value = imp.getProcessor().getMinThreshold()
IJ.resetThreshold(imp)
return value
def pixel_size(imp):
""" Returns the smallest pixel length of the specified image """
cal = imp.getCalibration()
return min(cal.pixelWidth, cal.pixelHeight)
def skeleton_properties(imp):
""" Retrieves lists of endpoints, junction points, junction
voxels and total length from a skeletonized image
"""
skel_analyzer = AnalyzeSkeleton_()
skel_analyzer.setup("", imp)
skel_result = skel_analyzer.run()
avg_lengths = skel_result.getAverageBranchLength()
n_branches = skel_result.getBranches()
lengths = [n*avg for n, avg in zip(n_branches, avg_lengths)]
total_length = sum(lengths)
return (skel_result.getListOfEndPoints(), skel_result.getJunctions(),
skel_result.getListOfJunctionVoxels(), total_length)
def skeletonize(imp):
""" Skeletonizes the specified image in situ """
thin = Skeletonize3D_()
thin.setup("", imp)
thin.run(None)
Binary.removeIsolatedPixels(imp)
def run():
mask_ip = impSkel.getProcessor()
part_ip = impPart.getProcessor()
if not mask_ip.isBinary():
error(impSkel.getTitle() + " is not a binary mask.")
return
# Mask grayscale image and skeletonize mask
try:
mask_pixels = mask_ip.getPixels()
part_pixels = part_ip.getPixels()
for i in xrange(len(part_pixels)):
if mask_pixels[i] == 0:
part_pixels[i] = 0
part_ip.setPixels(part_pixels)
except IndexError:
error("Chosen images are not the same size.")
skeletonize(impSkel)
# Get skeleton features
end_points, junctions, junction_voxels, total_len = skeleton_properties(impSkel)
if not end_points and not junction_voxels:
error(impSkel.getTitle() + " does not seem a valid skeleton.")
return
# Retrieve centroids from IJ1
threshold_lower = get_threshold(impPart, thres_method)
cx, cy, n_particles = get_centroids(impPart, threshold_lower)
if None in (cx, cy):
error("Verify parameters: No particles detected.")
return
# Loop through each centroids and categorize its position
# according to its distance to skeleton features
n_bp = n_tip = n_none = n_both = 0
overlay = cleanse_overlay(impPart.getOverlay())
for i in range(n_particles):
j_dist = ep_dist = sys.maxint
# Retrieve the distance between this particle and the closest junction voxel
for jvoxel in junction_voxels:
dist = distance(cx[i], cy[i], jvoxel.x, jvoxel.y)
if (dist <= cutoff_dist and dist < j_dist):
j_dist = dist
# Retrieve the distance between this particle and the closest end-point
for end_point in end_points:
dist = distance(cx[i], cy[i], end_point.x, end_point.y)
if (dist <= cutoff_dist and dist < ep_dist):
ep_dist = dist
roi_id = str(i).zfill(len(str(n_particles)))
roi_name = "Unknown:" + roi_id
roi_color = Color.ORANGE
roi_type = 2 # dot
# Is particle associated with neither junctions nor end-points?
if j_dist > cutoff_dist and ep_dist > cutoff_dist:
roi_name = "Unc:" + roi_id
#n_none += 1
# Is particle associated with both?
elif abs(j_dist - ep_dist) <= pixel_size(impPart) / 2:
roi_name = "J+T:" + roi_id
roi_color = Color.CYAN
#roi_type = 1 # crosshair
n_both += 1
# Is particle associated with an end-point?
elif ep_dist < j_dist:
roi_name = "Tip:" + roi_id
roi_color = Color.GREEN
#roi_type = 0 # hybrid
n_tip += 1
# Is particle associated with a junction?
elif ep_dist > j_dist:
roi_name = "Junction:" + roi_id
roi_color = Color.MAGENTA
#roi_type = 3 # circle
n_bp += 1
roi = PointRoi(cx[i], cy[i])
roi.setName(roi_name)
roi.setStrokeColor(roi_color)
roi.setPointType(roi_type)
roi.setSize(2) # medium
overlay.add(roi)
# Display result
impSkel.setOverlay(overlay)
impPart.setOverlay(overlay)
# Output some measurements
if "table" in output:
t = ResultsTable.getResultsTable() if "IJ1" in output else DefaultGenericTable()
addToTable(t, "Part. image", "%s (%s)" % (impPart.getTitle(), impPart.getCalibration().getUnits()))
addToTable(t, "Skel. image", "%s (%s)" % (impSkel.getTitle(), impSkel.getCalibration().getUnits()))
addToTable(t, "Junction particles", n_bp)
addToTable(t, "Tip particles", n_tip)
addToTable(t, "J+T particles", n_both)
addToTable(t, "Unc. particles", n_none)
addToTable(t, "Junctions w/ particles", n_bp + n_both)
addToTable(t, "Tips w/ particles", n_tip + n_both)
addToTable(t, "Total skel. lenght", total_len)
addToTable(t, "Total end points", len(end_points))
addToTable(t, "Total junctions", sum(junctions))
addToTable(t, "Unc. particles / Total skel. lenght)", n_none/total_len)
addToTable(t, "Snap-to dist.", str(cutoff_dist) + impPart.getCalibration().getUnits())
addToTable(t, "Threshold", "%d (%s)" % (threshold_lower, thres_method))
showTable(t, "Results")
run()
| tferr/hIPNAT | src/main/resources/scripts/Analyze/Skeleton/Classify_Particles_Using_Skeleton.py | Python | gpl-3.0 | 9,651 |
Subsets and Splits