code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit ([email protected]) if this license
# is a problem for you.
#############################################################################*/
#!/usr/bin/env python
"""
This module implements several mathematical functions:
- peak search
- center of mass search
- fwhm search
WARNING : array are numpy.ndarray objects.
"""
import numpy
def search_peak(xdata, ydata):
"""
Search a peak and its position in arrays xdata ad ydata.
Return three integer:
- peak position
- peak value
- index of peak position in array xdata
This result may accelerate the fwhm search.
"""
ymax = max(ydata)
idx = __give_index(ymax,ydata)
return xdata[idx],ymax,idx
def search_com(xdata,ydata):
"""
Return the center of mass in arrays xdata and ydata
"""
num = numpy.sum(xdata*ydata)
denom = numpy.sum(ydata).astype(numpy.float)
try:
result = num/denom
except ZeroDivisionError:
result = 0
return result
def search_fwhm(xdata,ydata,peak=None,index=None):
"""
Search a fwhm and its center in arrays xdata and ydatas.
If no fwhm is found, (0,0) is returned.
peak and index which are coming from search_peak result, may
accelerate calculation
"""
if peak is None or index is None:
x,mypeak,index_peak = search_peak(xdata,ydata)
else:
mypeak = peak
index_peak = index
hm = mypeak/2
idx = index_peak
try:
while ydata[idx] >= hm:
idx = idx-1
x0 = xdata[idx]
x1 = xdata[idx+1]
y0 = ydata[idx]
y1 = ydata[idx+1]
lhmx = (hm*(x1-x0) - (y0*x1)+(y1*x0)) / (y1-y0)
except ZeroDivisionError:
lhmx = 0
except IndexError:
lhmx = xdata[0]
idx = index_peak
try:
while ydata[idx] >= hm:
idx = idx+1
x0 = xdata[idx]
x1 = xdata[idx+1]
y0 = ydata[idx]
y1 = ydata[idx+1]
uhmx = (hm*(x1-x0) - (y0*x1)+(y1*x0)) / (y1-y0)
except ZeroDivisionError:
uhmx = 0
except IndexError:
uhmx = xdata[-1]
FWHM = uhmx - lhmx
CFWHM = (uhmx+lhmx)/2
return FWHM,CFWHM
def __give_index(elem,array):
"""
Return the index of elem in array
"""
mylist = array.tolist()
return mylist.index(elem)
def test():
pass
if __name__ == '__main__':
test()
| tonnrueter/pymca_devel | PyMca/SpecArithmetic.py | Python | gpl-2.0 | 3,550 |
from PyQt5 import QtCore, QtGui, QtWidgets
from ui_test3 import Ui_Dialog3
class testDialog3(QtWidgets.QDialog):
def __init__(self, parent):
QtWidgets.QDialog.__init__(self, parent)
self.ui = Ui_Dialog3()
self.ui.setupUi(self)
| sergelhomme/Network_Analysis | Network_Analysis_2_1/network_analysis/tools/testdialog3.py | Python | gpl-2.0 | 245 |
#!/usr/bin/python
import numpy as np
from time import time
from ..libtest import gen_array, average
from contextlib import contextmanager
import pytest
def test_wrong_param_count():
with pytest.raises(TypeError):
average()
def test_small_average():
array = gen_array(5)
for i in range(5):
array[i] = i
assert average(array, 5) == 2
N = 1000000
@contextmanager
def time_op(name):
start = time()
yield
stop = time()
print "%-15s: %7.4fs" % (name, stop - start)
with time_op("Gen array"):
ptr = gen_array(N)
with time_op("Py table"):
py_table = [ptr[i] for i in xrange(N)]
with time_op("C average"):
cavg = average(ptr, N)
print cavg
with time_op("Py average"):
total = sum(py_table)
total /= N
print total
arr = np.array(py_table)
with time_op("Numpy average"):
avg = int(np.mean(arr))
print avg
| jepio/JKalFilter | libtest/test/test_libtest.py | Python | gpl-2.0 | 889 |
import os
import zipfile
from translate.misc import wStringIO
from pootle.tests import PootleTestCase, formset_dict
from pootle_project.models import Project
from pootle_language.models import Language
from pootle_store.models import Store
def unit_dict(pootle_path):
"""prepare a translation submission dictioanry with values from
first unit in store corresponding to pootle_path"""
store = Store.objects.get(pootle_path=pootle_path)
unit = store.units[0]
result = {'id': unit.id,
'index': unit.index,
'pootle_path': pootle_path,
}
for i, source in enumerate(unit.source.strings):
result["source_f_%d" % i] = source
return result
class AnonTests(PootleTestCase):
def test_admin_not_logged(self):
"""checks that admin pages are not accessible without login"""
response = self.client.get("/admin/")
self.assertContains(response, '', status_code=403)
class AdminTests(PootleTestCase):
def setUp(self):
super(AdminTests, self).setUp()
self.client.login(username='admin', password='admin')
def test_admin_rights(self):
"""checks that admin user can access admin pages"""
response = self.client.get('/')
self.assertContains(response, "<a href='/admin/'>Admin</a>")
response = self.client.get('/admin/')
self.assertContains(response, 'General Settings')
def test_add_project(self):
"""Checks that we can add a project successfully."""
response = self.client.get("/admin/projects.html")
self.assertContains(response, "<a href='/projects/pootle/admin.html'>pootle</a>")
self.assertContains(response, "<a href='/projects/terminology/admin.html'>terminology</a>")
add_dict = {
"code": "testproject",
"localfiletype": "xlf",
"fullname": "Test Project",
"checkstyle": "standard",
"treestyle": "gnu",
}
response = self.client.post("/admin/projects.html", formset_dict([add_dict]))
self.assertContains(response, "<a href='/projects/testproject/admin.html'>testproject</a>")
# check for the actual model
testproject = Project.objects.get(code="testproject")
self.assertTrue(testproject)
self.assertEqual(testproject.fullname, add_dict['fullname'])
self.assertEqual(testproject.checkstyle, add_dict['checkstyle'])
self.assertEqual(testproject.localfiletype, add_dict['localfiletype'])
self.assertEqual(testproject.treestyle, add_dict['treestyle'])
def test_add_project_language(self):
"""Tests that we can add a language to a project, then access
its page when there are no files."""
fish = Language(code="fish", fullname="fish")
fish.save()
response = self.client.get("/projects/pootle/admin.html")
self.assertContains(response, "fish")
project = Project.objects.get(code='pootle')
add_dict = {
"language": fish.id,
"project": project.id,
}
response = self.client.post("/projects/pootle/admin.html", formset_dict([add_dict]))
self.assertContains(response, '/fish/pootle/')
response = self.client.get("/fish/")
self.assertContains(response, '<a href="/fish/">fish</a>')
self.assertContains(response, '<a href="/fish/pootle/">Pootle</a>')
self.assertContains(response, "1 project, 0% translated")
def test_upload_new_file(self):
"""Tests that we can upload a new file into a project."""
pocontent = wStringIO.StringIO('#: test.c\nmsgid "test"\nmsgstr "rest"\n')
pocontent.name = "test_new_upload.po"
post_dict = {
'file': pocontent,
'overwrite': 'merge',
'do_upload': 'upload',
}
response = self.client.post("/ar/pootle/", post_dict)
self.assertContains(response, 'href="/ar/pootle/test_new_upload.po')
store = Store.objects.get(pootle_path="/ar/pootle/test_new_upload.po")
self.assertTrue(os.path.isfile(store.file.path))
self.assertEqual(store.file.read(), pocontent.getvalue())
def test_upload_overwrite(self):
"""Tests that we can overwrite a file in a project."""
pocontent = wStringIO.StringIO('#: test.c\nmsgid "fish"\nmsgstr ""\n#: test.c\nmsgid "test"\nmsgstr "barf"\n\n')
pocontent.name = "pootle.po"
post_dict = {
'file': pocontent,
'overwrite': 'overwrite',
'do_upload': 'upload',
}
response = self.client.post("/af/pootle/", post_dict)
# Now we only test with 'in' since the header is added
store = Store.objects.get(pootle_path="/af/pootle/pootle.po")
self.assertEqual(store.file.read(), pocontent.getvalue())
def test_upload_new_archive(self):
"""Tests that we can upload a new archive of files into a project."""
po_content_1 = '#: test.c\nmsgid "test"\nmsgstr "rest"\n'
po_content_2 = '#: frog.c\nmsgid "tadpole"\nmsgstr "fish"\n'
archivefile = wStringIO.StringIO()
archivefile.name = "fish.zip"
archive = zipfile.ZipFile(archivefile, "w", zipfile.ZIP_DEFLATED)
archive.writestr("test_archive_1.po", po_content_1)
archive.writestr("test_archive_2.po", po_content_2)
archive.close()
archivefile.seek(0)
post_dict = {
'file': archivefile,
'overwrite': 'merge',
'do_upload': 'upload',
}
response = self.client.post("/ar/pootle/", post_dict)
self.assertContains(response, 'href="/ar/pootle/test_archive_1.po')
self.assertContains(response, 'href="/ar/pootle/test_archive_2.po')
store = Store.objects.get(pootle_path="/ar/pootle/test_archive_1.po")
self.assertTrue(os.path.isfile(store.file.path))
self.assertEqual(store.file.read(), po_content_1)
store = Store.objects.get(pootle_path="/ar/pootle/test_archive_2.po")
self.assertTrue(os.path.isfile(store.file.path))
self.assertEqual(store.file.read(), po_content_2)
def test_upload_over_file(self):
"""Tests that we can upload a new version of a file into a project."""
pocontent = wStringIO.StringIO('''#: fish.c
msgid "fish"
msgstr ""
#: test.c
msgid "test"
msgstr "resto"
''')
pocontent.name = "pootle.po"
post_dict = {
'file': pocontent,
'overwrite': 'overwrite',
'do_upload': 'upload',
}
response = self.client.post("/af/pootle/", post_dict)
pocontent = wStringIO.StringIO('#: test.c\nmsgid "test"\nmsgstr "blo3"\n\n#: fish.c\nmsgid "fish"\nmsgstr "stink"\n')
pocontent.name = "pootle.po"
post_dict = {
'file': pocontent,
'overwrite': 'merge',
'do_upload': 'upload',
}
response = self.client.post("/af/pootle/", post_dict)
# NOTE: this is what we do currently: any altered strings become suggestions.
# It may be a good idea to change this
mergedcontent = '#: fish.c\nmsgid "fish"\nmsgstr "stink"\n'
suggestedcontent = '#: test.c\nmsgid ""\n"_: suggested by admin [1963585124]\\n"\n"test"\nmsgstr "blo3"\n'
store = Store.objects.get(pootle_path="/af/pootle/pootle.po")
self.assertTrue(store.file.read().find(mergedcontent) >= 0)
suggestions = [str(sug) for sug in store.findunit('test').get_suggestions()]
self.assertTrue("blo3" in suggestions)
def test_upload_new_xliff_file(self):
"""Tests that we can upload a new XLIFF file into a project."""
xliffcontent = wStringIO.StringIO('''<?xml version='1.0' encoding='utf-8'?>
<xliff xmlns="urn:oasis:names:tc:xliff:document:1.1" version="1.1">
<file original="" source-language="en-US" datatype="po">
<body>
<trans-unit id="1" xml:space="preserve">
<source>test</source>
<target state="needs-review-translation">rest</target>
<context-group name="po-reference" purpose="location">
<context context-type="sourcefile">test.c</context>
</context-group>
</trans-unit>
</body>
</file>
</xliff>
''')
xliffcontent.name = 'test_new_xliff_upload.xlf'
post_dict = {
'file': xliffcontent,
'overwrite': 'overwrite',
'do_upload': 'upload',
}
response = self.client.post("/ar/pootle/", post_dict)
self.assertContains(response,' href="/ar/pootle/test_new_xliff_upload.po')
#FIXME: test conversion?
def test_upload_xliff_over_file(self):
"""Tests that we can upload a new version of a XLIFF file into a project."""
pocontent = wStringIO.StringIO('#: test.c\nmsgid "test"\nmsgstr "rest"\n\n#: frog.c\nmsgid "tadpole"\nmsgstr "fish"\n')
pocontent.name = "test_upload_xliff.po"
post_dict = {
'file': pocontent,
'overwrite': 'overwrite',
'do_upload': 'upload',
}
response = self.client.post("/ar/pootle/", post_dict)
xlfcontent = wStringIO.StringIO('''<?xml version="1.0" encoding="utf-8"?>
<xliff version="1.1" xmlns="urn:oasis:names:tc:xliff:document:1.1">
<file datatype="po" original="test_upload_xliff.po" source-language="en-US">
<body>
<trans-unit id="test" xml:space="preserve" approved="yes">
<source>test</source>
<target state="translated">rested</target>
<context-group name="po-reference" purpose="location">
<context context-type="sourcefile">test.c</context>
</context-group>
</trans-unit>
<trans-unit id="slink" xml:space="preserve" approved="yes">
<source>slink</source>
<target state="translated">stink</target>
<context-group name="po-reference" purpose="location">
<context context-type="sourcefile">toad.c</context>
</context-group>
</trans-unit>
</body>
</file>
</xliff>''')
xlfcontent.name = "test_upload_xliff.xlf"
post_dict = {
'file': xlfcontent,
'overwrite': 'merge',
'do_upload': 'upload',
}
response = self.client.post("/ar/pootle/", post_dict)
# NOTE: this is what we do currently: any altered strings become suggestions.
# It may be a good idea to change this
mergedcontent = '#: test.c\nmsgid "test"\nmsgstr "rest"\n\n#: frog.c\nmsgid "tadpole"\nmsgstr "fish"\n'
suggestedcontent = '#: test.c\nmsgid ""\n"_: suggested by admin [595179475]\\n"\n"test"\nmsgstr "rested"\n'
store = Store.objects.get(pootle_path="/ar/pootle/test_upload_xliff.po")
self.assertTrue(os.path.isfile(store.file.path))
self.assertTrue(store.file.read().find(mergedcontent) >= 0)
suggestions = [str(sug) for sug in store.findunit('test').get_suggestions()]
self.assertTrue('rested' in suggestions)
def test_submit_translation(self):
"""Tests that we can translate units."""
pootle_path="/af/pootle/pootle.po"
submit_dict = {
'target_f_0': 'submitted translation',
'submit': 'Submit',
}
submit_dict.update(unit_dict(pootle_path))
response = self.client.post(pootle_path + "/translate", submit_dict)
self.assertContains(response, 'submitted translation')
response = self.client.get(pootle_path + "/download")
store = Store.objects.get(pootle_path=pootle_path)
self.assertTrue(store.file.read().find('submitted translation') >= 0)
def test_submit_plural_translation(self):
"""Tests that we can submit a translation with plurals."""
pocontent = wStringIO.StringIO('msgid "singular"\nmsgid_plural "plural"\nmsgstr[0] ""\nmsgstr[1] ""\n\nmsgid "no fish"\nmsgstr ""\n')
pocontent.name = 'test_plural_submit.po'
post_dict = {
'file': pocontent,
'overwrite': 'overwrite',
'do_upload': 'upload',
}
response = self.client.post("/ar/pootle/", post_dict)
pootle_path = '/ar/pootle/test_plural_submit.po'
submit_dict = {
'target_f_0': 'a fish',
'target_f_1': 'some fish',
'target_f_2': 'lots of fish',
'submit': 'Submit',
}
submit_dict.update(unit_dict(pootle_path))
response = self.client.post(pootle_path + "/translate", submit_dict)
self.assertContains(response, 'a fish')
self.assertContains(response, 'some fish')
self.assertContains(response, 'lots of fish')
def test_submit_plural_to_singular_lang(self):
"""Tests that we can submit a translation with plurals to a language without plurals."""
pocontent = wStringIO.StringIO('msgid "singular"\nmsgid_plural "plural"\nmsgstr[0] ""\nmsgstr[1] ""\n\nmsgid "no fish"\nmsgstr ""\n')
pocontent.name = 'test_plural_submit.po'
post_dict = {
'file': pocontent,
'overwrite': 'overwrite',
'do_upload': 'upload',
}
response = self.client.post("/ja/pootle/", post_dict)
pootle_path = "/ja/pootle/test_plural_submit.po"
submit_dict = {
'target_f_0': 'just fish',
'submit': 'Submit',
}
submit_dict.update(unit_dict(pootle_path))
response = self.client.post(pootle_path + "/translate", submit_dict)
self.assertContains(response, 'just fish')
expectedcontent = 'msgid "singular"\nmsgid_plural "plural"\nmsgstr[0] "just fish"\n'
response = self.client.get(pootle_path+'/download')
store = Store.objects.get(pootle_path=pootle_path)
self.assertTrue(store.file.read().find(expectedcontent) >= 0)
def test_submit_fuzzy(self):
"""Tests that we can mark a unit as fuzzy."""
# Fetch the page and check that the fuzzy checkbox is NOT checked.
pootle_path = '/af/pootle/pootle.po'
unit_d = unit_dict(pootle_path)
response = self.client.get(pootle_path + '/translate')
self.assertContains(response, '<input id="id_state" accesskey="f" type="checkbox" class="fuzzycheck" name="state" />')
submit_dict = {
'target_f_0': 'fuzzy translation',
'state': 'on',
'submit': 'Submit',
}
submit_dict.update(unit_d)
response = self.client.post(pootle_path + "/translate", submit_dict)
# Fetch the page again and check that the fuzzy checkbox IS checked.
response = self.client.get(pootle_path + "/translate")
self.assertContains(response, '<input checked="checked" name="state" accesskey="f"')
store = Store.objects.get(pootle_path=pootle_path)
self.assertTrue(store.units[0].isfuzzy())
# Submit the translation again, without the fuzzy checkbox checked
submit_dict = {
'target_f_0': 'fuzzy translation',
'state': '',
'submit': 'Submit',
}
submit_dict.update(unit_d)
response = self.client.post(pootle_path + "/translate", submit_dict)
# Fetch the page once more and check that the fuzzy checkbox is NOT checked.
response = self.client.get(pootle_path + "/translate")
self.assertContains(response, '<input name="state" accesskey="f" value="200" class="fuzzycheck" type="checkbox" id="id_state" />')
self.assertFalse(store.units[0].isfuzzy())
def test_submit_translator_comments(self):
"""Tests that we can edit translator comments."""
pootle_path = '/af/pootle/pootle.po'
submit_dict = {
'target_f_0': 'fish',
'translator_comment': 'goodbye\nand thanks for all the fish',
'submit': 'Submit',
}
submit_dict.update(unit_dict(pootle_path))
response = self.client.post(pootle_path + "/translate", submit_dict)
store = Store.objects.get(pootle_path=pootle_path)
self.assertEqual(store.units[0].getnotes(), 'goodbye\nand thanks for all the fish')
class NonprivTests(PootleTestCase):
def setUp(self):
super(NonprivTests, self).setUp()
self.client.login(username='nonpriv', password='nonpriv')
def test_non_admin_rights(self):
"""checks that non privileged users cannot access admin pages"""
response = self.client.get('/admin/')
self.assertContains(response, '', status_code=403)
def test_upload_suggestions(self):
"""Tests that we can upload when we only have suggest rights."""
pocontent = wStringIO.StringIO('#: test.c\nmsgid "test"\nmsgstr "samaka"\n')
pocontent.name = "pootle.po"
post_dict = {
'file': pocontent,
'overwrite': 'merge',
'do_upload': 'upload',
}
response = self.client.post("/af/pootle/", post_dict)
# Check that the orignal file didn't take the new suggestion.
# We test with 'in' since the header is added
store = Store.objects.get(pootle_path="/af/pootle/pootle.po")
self.assertFalse('msgstr "samaka"' in store.file.read())
suggestions = [str(sug) for sug in store.findunit('test').get_suggestions()]
self.assertTrue('samaka' in suggestions)
| lehmannro/pootle | local_apps/pootle_app/tests.py | Python | gpl-2.0 | 17,651 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibRank database models."""
# General imports.
from flask import g
from invenio.ext.sqlalchemy import db
# Create your models here.
from invenio.modules.accounts.models import User
from invenio.modules.editor.models import Bibdoc
from invenio.modules.records.models import Record as Bibrec
from invenio.modules.search.models import Collection
class RnkMETHOD(db.Model):
"""Represent a RnkMETHOD record."""
__tablename__ = 'rnkMETHOD'
id = db.Column(db.MediumInteger(9, unsigned=True), primary_key=True,
nullable=False)
name = db.Column(db.String(20), unique=True, nullable=False,
server_default='')
last_updated = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
def get_name_ln(self, ln=None):
"""Return localized method name."""
try:
if ln is None:
ln = g.ln
return self.name.filter_by(ln=g.ln, type='ln').one().value
except:
return self.name
class RnkMETHODDATA(db.Model):
"""Represent a RnkMETHODDATA record."""
__tablename__ = 'rnkMETHODDATA'
id_rnkMETHOD = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(RnkMETHOD.id), primary_key=True)
relevance_data = db.Column(db.iLargeBinary, nullable=True)
class RnkMETHODNAME(db.Model):
"""Represent a RnkMETHODNAME record."""
__tablename__ = 'rnkMETHODNAME'
id_rnkMETHOD = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(RnkMETHOD.id), primary_key=True)
ln = db.Column(db.Char(5), primary_key=True, server_default='')
type = db.Column(db.Char(3), primary_key=True, server_default='sn')
value = db.Column(db.String(255), nullable=False)
method = db.relationship(RnkMETHOD, backref=db.backref('names',
lazy='dynamic'))
class RnkCITATIONDICT(db.Model):
"""Represent a RnkCITATIONDICT record."""
__tablename__ = 'rnkCITATIONDICT'
citee = db.Column(db.Integer(10, unsigned=True), primary_key=True)
citer = db.Column(db.Integer(10, unsigned=True), primary_key=True)
__table_args__ = (db.Index('reverse', citer, citee),
db.Model.__table_args__)
class RnkCITATIONDATAERR(db.Model):
"""Represent a RnkCITATIONDATAERR record."""
__tablename__ = 'rnkCITATIONDATAERR'
type = db.Column(db.Enum('multiple-matches', 'not-well-formed'),
primary_key=True)
citinfo = db.Column(db.String(255), primary_key=True, server_default='')
class RnkCITATIONLOG(db.Model):
"""Represents a RnkCITATIONLOG record."""
__tablename__ = 'rnkCITATIONLOG'
id = db.Column(db.Integer(11, unsigned=True), primary_key=True,
autoincrement=True, nullable=False)
citee = db.Column(db.Integer(10, unsigned=True), nullable=False)
citer = db.Column(db.Integer(10, unsigned=True), nullable=False)
type = db.Column(db.Enum('added', 'removed'), nullable=True)
action_date = db.Column(db.DateTime, nullable=False)
__table_args__ = (db.Index('citee', citee), db.Index('citer', citer),
db.Model.__table_args__)
class RnkCITATIONDATAEXT(db.Model):
"""Represent a RnkCITATIONDATAEXT record."""
__tablename__ = 'rnkCITATIONDATAEXT'
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id), autoincrement=False,
primary_key=True, nullable=False, server_default='0')
extcitepubinfo = db.Column(db.String(255), primary_key=True,
nullable=False, index=True)
class RnkAUTHORDATA(db.Model):
"""Represent a RnkAUTHORDATA record."""
__tablename__ = 'rnkAUTHORDATA'
aterm = db.Column(db.String(50), primary_key=True, nullable=True)
hitlist = db.Column(db.iLargeBinary, nullable=True)
class RnkDOWNLOADS(db.Model):
"""Represent a RnkDOWNLOADS record."""
__tablename__ = 'rnkDOWNLOADS'
id = db.Column(db.Integer, primary_key=True, nullable=False,
autoincrement=True)
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id), nullable=True)
download_time = db.Column(db.DateTime, nullable=True,
server_default='1900-01-01 00:00:00')
client_host = db.Column(db.Integer(10, unsigned=True), nullable=True)
id_user = db.Column(db.Integer(15, unsigned=True), db.ForeignKey(User.id),
nullable=True)
id_bibdoc = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Bibdoc.id), nullable=True)
file_version = db.Column(db.SmallInteger(2, unsigned=True), nullable=True)
file_format = db.Column(db.String(50), nullable=True)
bibrec = db.relationship(Bibrec, backref='downloads')
bibdoc = db.relationship(Bibdoc, backref='downloads')
user = db.relationship(User, backref='downloads')
class RnkPAGEVIEWS(db.Model):
"""Represent a RnkPAGEVIEWS record."""
__tablename__ = 'rnkPAGEVIEWS'
id = db.Column(db.MediumInteger, primary_key=True, nullable=False,
autoincrement=True)
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id), nullable=True,
primary_key=True)
id_user = db.Column(db.Integer(15, unsigned=True), db.ForeignKey(User.id),
server_default='0', primary_key=True)
client_host = db.Column(db.Integer(10, unsigned=True), nullable=True)
view_time = db.Column(db.DateTime, primary_key=True,
server_default='1900-01-01 00:00:00')
bibrec = db.relationship(Bibrec, backref='pageviews')
user = db.relationship(User, backref='pageviews')
class RnkWORD01F(db.Model):
"""Represent a RnkWORD01F record."""
__tablename__ = 'rnkWORD01F'
id = db.Column(db.MediumInteger(9, unsigned=True), nullable=False,
primary_key=True, autoincrement=True)
term = db.Column(db.String(50), nullable=True, unique=True)
hitlist = db.Column(db.iLargeBinary, nullable=True)
class RnkWORD01R(db.Model):
"""Represent a RnkWORD01R record."""
__tablename__ = 'rnkWORD01R'
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id), nullable=False,
primary_key=True)
termlist = db.Column(db.LargeBinary, nullable=True)
type = db.Column(db.Enum('CURRENT', 'FUTURE', 'TEMPORARY'),
nullable=False, server_default='CURRENT',
primary_key=True)
bibrec = db.relationship(Bibrec, backref='word01rs')
class RnkEXTENDEDAUTHORS(db.Model):
"""Represent a RnkEXTENDEDAUTHORS record."""
__tablename__ = 'rnkEXTENDEDAUTHORS'
id = db.Column(db.Integer(10, unsigned=True), primary_key=True,
nullable=False, autoincrement=False)
authorid = db.Column(db.BigInteger(10), primary_key=True, nullable=False,
autoincrement=False)
class RnkRECORDSCACHE(db.Model):
"""Represent a RnkRECORDSCACHE record."""
__tablename__ = 'rnkRECORDSCACHE'
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id), nullable=True,
primary_key=True)
authorid = db.Column(db.BigInteger(10), primary_key=True, nullable=False)
class RnkSELFCITES(db.Model):
"""Represent a RnkSELFCITES record."""
__tablename__ = 'rnkSELFCITES'
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id), nullable=True,
primary_key=True)
count = db.Column(db.Integer(10, unsigned=True), nullable=False)
references = db.Column(db.Text, nullable=False)
last_updated = db.Column(db.DateTime, nullable=False)
class RnkSELFCITEDICT(db.Model):
"""Represents a RnkSELFCITEDICT record."""
__tablename__ = 'rnkSELFCITEDICT'
citee = db.Column(db.Integer(10, unsigned=True), nullable=False,
primary_key=True, autoincrement=False)
citer = db.Column(db.Integer(10, unsigned=True), nullable=False,
primary_key=True, autoincrement=False)
last_updated = db.Column(db.DateTime, nullable=False)
__table_args__ = (db.Index('reverse', citer, citee),
db.Model.__table_args__)
class CollectionRnkMETHOD(db.Model):
"""Represent a CollectionRnkMETHOD record."""
__tablename__ = 'collection_rnkMETHOD'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True,
nullable=False)
id_rnkMETHOD = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(RnkMETHOD.id), primary_key=True,
nullable=False)
score = db.Column(db.TinyInteger(4, unsigned=True), nullable=False,
server_default='0')
collection = db.relationship(Collection, backref='rnkMETHODs')
rnkMETHOD = db.relationship(RnkMETHOD, backref='collections')
__all__ = ('RnkMETHOD',
'RnkMETHODDATA',
'RnkMETHODNAME',
'RnkCITATIONDICT',
'RnkCITATIONDATAERR',
'RnkCITATIONDATAEXT',
'RnkCITATIONLOG',
'RnkAUTHORDATA',
'RnkDOWNLOADS',
'RnkPAGEVIEWS',
'RnkWORD01F',
'RnkWORD01R',
'RnkEXTENDEDAUTHORS',
'RnkRECORDSCACHE',
'RnkSELFCITES',
'RnkSELFCITEDICT',
'CollectionRnkMETHOD',
)
| MSusik/invenio | invenio/modules/ranker/models.py | Python | gpl-2.0 | 10,662 |
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.config import cConfig
from ctypes import *
import time
import random
class cStatistic:
STATISTIC_URL = 'http://www.google-analytics.com/__utm.gif'
STATISTIC_ID = 'UA-53463976-1'
def callStartPlugin(self, sPluginName):
oConfig = cConfig()
bStatistic = oConfig.getSetting('statistic')
#oRequestHandler2 = cRequestHandler("http://exodieno.free.fr/index.html")
#oRequestHandler2.addHeaderEntry('Referer', 'http://www.google.com/')
#sHtmlContent = oRequestHandler2.request();
#aHeader = oRequestHandler2.getResponseHeader();
#sReponseCookie = aHeader.getheader("Set-Cookie")
if (bStatistic == 'False'):
return;
try:
oRequestHandler = cRequestHandler(self.STATISTIC_URL)
oRequestHandler.addParameters('utmac', self.STATISTIC_ID)
rndX = random.randint(1, 99999999-10000000)+10000000
rndY = random.randint(1, 999999999-100001000)+100000000
ts1 = float(time.time())
ts2 = float(time.time())
ts3 = float(time.time())
ts4 = float(time.time())
ts5 = float(time.time())
sUtmccValue = '__utma=' + str(rndY) + '.' + str(rndX) + '.' + str(ts1) + '.' + str(ts2) + '.' + str(ts3) + '; '
sUtmccValue = sUtmccValue + '+__utmz=' + str(rndY) + '.' + str(ts4) + '.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); '
oRequestHandler.addParameters('utmcc', sUtmccValue)
#oRequestHandler.addParameters('aip', '1') # anonymizeIp
oRequestHandler.addParameters('utmcs', 'UTF-8')
oRequestHandler.addParameters('utmdt', 'Plugin Activity')
oRequestHandler.addParameters('utmfl', '10.1 r102')
#oRequestHandler.addParameters('utmhid', '1549554730')
oRequestHandler.addParameters('utmhn', 'code.google.com')
oRequestHandler.addParameters('utmje', '0')
oRequestHandler.addParameters('utmn', str(random.randint(0, 0x7fffffff)))
oRequestHandler.addParameters('utmp', str(sPluginName))
oRequestHandler.addParameters('utmr', '-')
oRequestHandler.addParameters('utmsc', '24-bit')
oRequestHandler.addParameters('utmsr', '1920x1080')
oRequestHandler.addParameters('utmu', 'qAAg')
#oRequestHandler.addParameters('utmul', 'de')
oRequestHandler.addParameters('utmwv', '4.8.6')
oRequestHandler.request()
except Exception, e:
return
| Brahimbaz/venom-xbmc-addons-beta | plugin.video.vstream/resources/lib/statistic.py | Python | gpl-2.0 | 2,754 |
from sys import argv
import smbus
import dweepy
import time
import math
import RPi.GPIO as GPIO
from LSM9DS0 import *
import datetime
bus = smbus.SMBus(1)
RAD_TO_DEG = 57.29578
M_PI = 3.14159265358979323846
G_GAIN = 0.070 # [deg/s/LSB] If you change the dps for gyro, you need to update this value accordingly
LP = 0.041 # Loop period = 41ms. This needs to match the time it takes each loop to run
AA = 0.80 # Complementary filter constant
script,filename = argv
#IO Setup
r2p1 = 4
r2p2 = 25
r1p1 = 23
r1p2 = 24
def relaysetup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(r1p1,GPIO.OUT)
GPIO.setup(r1p2,GPIO.OUT)
GPIO.setup(r2p1,GPIO.OUT)
GPIO.setup(r2p2,GPIO.OUT)
def relay1(status):
GPIO.output(r1p1,status)
GPIO.output(r1p2,status)
def relay2(status):
GPIO.output(r2p1,status)
GPIO.output(r2p2,status)
tspeedx=0.0
tspeedy=0.0
tspeedz=0.0
g = None
def get_dweet(add='hrishiopo'):
global tspeedx,tspeedy,tspeedz,g
try:
dweet = dweepy.get_latest_dweet_for(add)[0]
ax = float(dweet['content']['ax'])
ay = float(dweet['content']['ay'])
az = float(dweet['content']['az'])
if(not g):
g = (float(dweet['content']['xg']),float(dweet['content']['yg']),float(dweet['content']['zg']))
ax-=g[0]
ay-=g[1]
az-=g[2]
except (KeyError, TypeError):
return
def writeACC(register,value):
bus.write_byte_data(ACC_ADDRESS , register, value)
return -1
def writeMAG(register,value):
bus.write_byte_data(MAG_ADDRESS, register, value)
return -1
def writeGRY(register,value):
bus.write_byte_data(GYR_ADDRESS, register, value)
return -1
def readACCx():
acc_l = bus.read_byte_data(ACC_ADDRESS, OUT_X_L_A)
acc_h = bus.read_byte_data(ACC_ADDRESS, OUT_X_H_A)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readACCy():
acc_l = bus.read_byte_data(ACC_ADDRESS, OUT_Y_L_A)
acc_h = bus.read_byte_data(ACC_ADDRESS, OUT_Y_H_A)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readACCz():
acc_l = bus.read_byte_data(ACC_ADDRESS, OUT_Z_L_A)
acc_h = bus.read_byte_data(ACC_ADDRESS, OUT_Z_H_A)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readMAGx():
mag_l = bus.read_byte_data(MAG_ADDRESS, OUT_X_L_M)
mag_h = bus.read_byte_data(MAG_ADDRESS, OUT_X_H_M)
mag_combined = (mag_l | mag_h <<8)
return mag_combined if mag_combined < 32768 else mag_combined - 65536
def readMAGy():
mag_l = bus.read_byte_data(MAG_ADDRESS, OUT_Y_L_M)
mag_h = bus.read_byte_data(MAG_ADDRESS, OUT_Y_H_M)
mag_combined = (mag_l | mag_h <<8)
return mag_combined if mag_combined < 32768 else mag_combined - 65536
def readMAGz():
mag_l = bus.read_byte_data(MAG_ADDRESS, OUT_Z_L_M)
mag_h = bus.read_byte_data(MAG_ADDRESS, OUT_Z_H_M)
mag_combined = (mag_l | mag_h <<8)
return mag_combined if mag_combined < 32768 else mag_combined - 65536
def readGYRx():
gyr_l = bus.read_byte_data(GYR_ADDRESS, OUT_X_L_G)
gyr_h = bus.read_byte_data(GYR_ADDRESS, OUT_X_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def readGYRy():
gyr_l = bus.read_byte_data(GYR_ADDRESS, OUT_Y_L_G)
gyr_h = bus.read_byte_data(GYR_ADDRESS, OUT_Y_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def readGYRz():
gyr_l = bus.read_byte_data(GYR_ADDRESS, OUT_Z_L_G)
gyr_h = bus.read_byte_data(GYR_ADDRESS, OUT_Z_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
#initialise the accelerometer
writeACC(CTRL_REG1_XM, 0b01100111) #z,y,x axis enabled, continuos update, 100Hz data rate
writeACC(CTRL_REG2_XM, 0b00100000) #+/- 16G full scale
#initialise the magnetometer
writeMAG(CTRL_REG5_XM, 0b11110000) #Temp enable, M data rate = 50Hz
writeMAG(CTRL_REG6_XM, 0b01100000) #+/-12gauss
writeMAG(CTRL_REG7_XM, 0b00000000) #Continuous-conversion mode
#initialise the gyroscope
writeGRY(CTRL_REG1_G, 0b00001111) #Normal power mode, all axes enabled
writeGRY(CTRL_REG4_G, 0b00110000) #Continuos update, 2000 dps full scale
gyroXangle = 0.0
gyroYangle = 0.0
gyroZangle = 0.0
CFangleX = 0.0
CFangleY = 0.0
count=0
countmax=500000
countgrav=50
gx=0.0
gy=0.0
gz=0.0
speedx=0.0
speedy=0.0
speedz=0.0
logfile = open(filename, 'w')
logfile.write("GX,GY,GZ,AX,AY,AZ,SX,SY,SZ,TA,TS\n")
speed=0
while True:
a = datetime.datetime.now()
count+=1
if(count>countmax):
count=countgrav
#Read our accelerometer,gyroscope and magnetometer values
ACCx = readACCx()
ACCy = readACCy()
ACCz = readACCz()
GYRx = readGYRx()
GYRy = readGYRx()
GYRz = readGYRx()
MAGx = readMAGx()
MAGy = readMAGy()
MAGz = readMAGz()
##Convert Accelerometer values to degrees
AccXangle = (math.atan2(ACCy,ACCz)+M_PI)*RAD_TO_DEG
AccYangle = (math.atan2(ACCz,ACCx)+M_PI)*RAD_TO_DEG
#Convert Gyro raw to degrees per second
rate_gyr_x = GYRx * G_GAIN
rate_gyr_y = GYRy * G_GAIN
rate_gyr_z = GYRz * G_GAIN
#Calculate the angles from the gyro. LP = loop period
gyroXangle+=rate_gyr_x*LP
gyroYangle+=rate_gyr_y*LP
gyroZangle+=rate_gyr_z*LP
#Change the rotation value of the accelerometer to -/+ 180 and move the Y axis '0' point to up.
#Two different pieces of code are used depending on how your IMU is mounted.
#If IMU is upside down
#
#if AccXangle >180:
# AccXangle -= 360.0
#AccYangle-=90
#if (AccYangle >180):
# AccYangle -= 360.0
#If IMU is up the correct way, use these lines
AccXangle -= 180.0
if AccYangle > 90:
AccYangle -= 270.0
else:
AccYangle += 90.0
#Complementary filter used to combine the accelerometer and gyro values.
CFangleX=AA*(CFangleX+rate_gyr_x*LP) +(1 - AA) * AccXangle
CFangleY=AA*(CFangleY+rate_gyr_y*LP) +(1 - AA) * AccYangle
#Calculate heading
heading = 180 * math.atan2(MAGy,MAGx)/M_PI
if heading < 0:
heading += 360
#Normalize accelerometer raw values.
accXnorm = ACCx/math.sqrt(ACCx * ACCx + ACCy * ACCy + ACCz * ACCz)
accYnorm = ACCy/math.sqrt(ACCx * ACCx + ACCy * ACCy + ACCz * ACCz)
#Calculate pitch and roll
pitch = math.asin(accXnorm)
roll = -math.asin(accYnorm/math.cos(pitch))
#Calculate the new tilt compensated values
magXcomp = MAGx*math.cos(pitch)+MAGz*math.sin(pitch)
magYcomp = MAGx*math.sin(roll)*math.sin(pitch)+MAGy*math.cos(roll)-MAGz*math.sin(roll)*math.cos(pitch)
#Calculate tiles compensated heading
tiltCompensatedHeading = 180 * math.atan2(magYcomp,magXcomp)/M_PI
if tiltCompensatedHeading < 0:
tiltCompensatedHeading += 360
print ("\033[1;34;40mACCX Angle %5.2f ACCY Angle %5.2f\033[1;31;40m\tGRYX Angle %5.2f GYRY Angle %5.2f GYRZ Angle %5.2f \033[1;35;40m \tCFangleX Angle %5.2f \033[1;36;40m CFangleY Angle %5.2f \33[1;32;40m HEADING %5.2f \33[1;37;40m tiltCompensatedHeading %5.2f\033[0m " % (AccXangle, AccYangle,gyroXangle,gyroYangle,gyroZangle,CFangleX,CFangleY,heading,tiltCompensatedHeading))
time.sleep(0.01)
b = datetime.datetime.now()
c = b - a
print "Loop Time |", c.microseconds/1000,"|",
if(count<countgrav):
print "Getting gravity..."
gx = ((gx*count)+ACCx)/(count+1)
gy = ((gy*count)+ACCy)/(count+1)
gz = ((gz*count)+ACCz)/(count+1)
ACCx2=float(ACCx-gx)
ACCy2=float(ACCy-gy)
ACCz2=float(ACCz-gz)
speedx+=(ACCx2)*(c.microseconds/1000000.0)
speedy+=(ACCy2)*(c.microseconds/1000000.0)
speedz+=(ACCz2)*(c.microseconds/1000000.0)
logfile.write("%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n" % (gx,gy,gz,ACCx,ACCy,ACCz,speedx,speedy,speedz,\
math.sqrt(speedx*speedx+speedy*speedy+speedz*speedz),math.sqrt(ACCx2*ACCx2+ACCy2*ACCy2+ACCz2*ACCz2)))
| hrishioa/Navo | Raspi-Code/Lib/python-LSM9DS0-gryo-accel-compass/berryIMU.py | Python | gpl-2.0 | 8,179 |
#!/usr/bin/env python
import ldap
import csv
import sys
import subprocess
import optparse
import datetime
import operator
from datetime import date
# Parameters
energy = 0
# Global values
totalCPU = 0
totalEnergy = 0
def getargs():
today=date.today()
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-b","--debug", dest="debug", default=False, action="store_true",
help="Shows progress of program, ")
parser.add_option("-n","--cputime", dest="cputime", default=False, action="store_true",
help="Report on used cpu time (hours), "+
"the default is to report on used wallclock time (hours).")
parser.add_option("-m","--energy", dest="energy", default=False, action="store_true",
help="Report on used energy (joules)")
parser.add_option("-j","--jobs", dest="jobs", default=False, action="store_true",
help="Report on number of jobs run, "+
"the default is to report on used wallclock time (hours).")
parser.add_option("-s","--startdate",dest="startdate", default=str(datetime.date(today.year-1,today.month,today.day)),
help="Only include accounting records from this date on, " +
"format yyyy-m-d."
)
parser.add_option("-e","--enddate",dest="enddate", default=str(today),
help="Only include accounting records for up to, " +
"and not including this date , format yyyy-m-d."
)
parser.add_option("-u","--user", dest="user", default=False, action="store_true",
help="Present accounting information for all users by their name. "
)
parser.add_option("-d","--uid", dest="uid", default=False, action="store_true",
help="Display user information based on uid, instead of full name "
)
parser.add_option("-r","--research", dest="research", default=False, action="store_true",
help="Present accounting information for this " +
"comma separated list of research institutes. " +
"Specifying all will result in information " +
"aggregated per research institute for all research institutes"
"Specifying list will result in a list of known groups."
)
parser.add_option("-f","--faculty", dest="faculty", default=False, action="store_true",
help="Present accounting information for this " +
"comma separated list of faculties. Specifying all" +
"will result in information aggregated per faculty for all faculties"
"Specifying list will result in a list of known groups."
)
parser.add_option("-o","--sort", dest="sort", default=False, action="store_true",
help="Sort table on user, group, faculty or research " +
"institute, instead of on used time."
)
parser.add_option("-x","--csv", dest="csv", default=False, action="store_true",
help="Show results in comma separated value format"
)
parser.add_option("-p", "--password", dest="password", default="",
help="Supply db password as argument on command-line"
)
parser.add_option("-t","--time",dest="time", default='m',
help="Output time unit, default is in minutes."
)
(options, args) = parser.parse_args()
if options.cputime and options.energy:
parser.error("Options -n and -m are mutually exclusive.")
if options.uid and options.person:
parser.error("Options -u and -d are mutually exclusive.")
try:
startdate = options.startdate.split('-')
enddate = options.enddate.split('-')
options.startdate=datetime.date(int(startdate[0]),int(startdate[1]),int(startdate[2]))
options.enddate=datetime.date(int(enddate[0]),int(enddate[1]),int(enddate[2]))
except:
parser.error("Wrong date specified for -s or -e, use the format YYYY-MM-DD")
if len(args) !=0:
parser.error("Unrecognised arguments supplied")
if startdate > enddate:
parser.error("Start date must be before the end date.")
return options
def openPersonDB():
DB = []
try:
with open('PersonDB.csv', 'rb') as f:
csvfile = csv.reader(f, delimiter=';', quoting=csv.QUOTE_NONE)
DB = [row for row in csvfile]
except IOError:
print "File PersonDB.csv does not exist. It is now created."
return DB
def clusterLDAP(pw):
l = ldap.initialize("ldap://172.23.47.249")
try:
l.protocol_version = ldap.VERSION3
l.set_option(ldap.OPT_REFERRALS, 0)
bind = l.simple_bind_s("cn=clusteradminperegrine ,o=asds", pw)
base = "ou=Peregrine, o=asds"
criteria = "objectClass=Person"
attributes = ['uid', 'ou', 'fullName']
result = l.search_s(base, ldap.SCOPE_SUBTREE, criteria, attributes)
results = [entry for dn, entry in result if isinstance(entry, dict)]
return results
except:
print "Invalid password"
sys.exit()
finally:
l.unbind()
def updateDB(ldap):
DB = openPersonDB()
appendPerson = []
removePerson = []
for dict in ldap:
found = False
for person in DB:
# If database is not empty check if uids match with ldap.
if person[0] == '' and person[1] == '' and person[2] == '':
DB.remove(person)
break
if dict['uid'][0] == person[0]:
# Checks new full names. What if someone changes his/ her name?
if 'fullName' in dict and (person[2] == '' or person[2] == 'noFullName'):
print person[0] + " previously had no full name in LDAP, but now has a full name: " + dict['fullName'][0]
person[2] = dict['fullName'][0]
continue
# Also check if ou fields match with ldap (someone could have a new ou field).
if 'ou' in dict:
# Check if ou field still is actual, else removes the entry and puts it in the changed list.
for row1 in DB:
if (row1[0] == dict['uid'][0] and row1[1] not in dict['ou']) and not (row1[1] == '' or row1[1] == 'unknown ou,UKN'):
print row1[1]
removePerson.append(row1)
# Checks for every field if it is in the DB.
appendFields = []
for i in range(0,len(dict['ou'])):
for row2 in DB:
# True if the uid and the field are the same.
if row2[0] == dict['uid'][0] and dict['ou'][i] == row2[1]:
break
# If there is no uid with this field then add this field.
else:
appendFields.append(dict['ou'][i])
if appendFields != []:
if 'ou' in dict and (person[1] == '' or person[1] == 'unknown ou,UKN'):
print person[0] + " has a new ou field in LDAP, but now has a known field: "
removePerson.append(person)
continue
for i in range(0, len(appendFields)):
appendPerson = ['','','']
appendPerson[0] = dict['uid'][0]
appendPerson[1] = str(appendFields[i])
appendPerson[2] = str(dict['fullName'][0])
print person[0] + " has a new field: " + appendFields[i]
DB.append(appendPerson)
appendFields = []
break
else:
# Happens when the database is empty, all persons from ldap get added.
pass
# Used to break through a second for loop.
if found == True:
break
else:
# If the person in LDAP was not in the DB.
appendPerson = ['','','']
appendPerson[0] = str(dict['uid'][0])
if 'ou' in dict:
for i in range(0,len(dict['ou'])):
appendPerson = ['','','']
appendPerson[0] = str(dict['uid'][0])
appendPerson[1] = str(dict['ou'][i])
if 'fullName' in dict:
appendPerson[2] = str(dict['fullName'][0])
if 'fullName' not in dict:
appendPerson[2] = ''
DB.append(appendPerson)
print appendPerson[0] + " with field " + appendPerson[1] + ' added to DB by LDAP.'
if 'ou' not in dict:
appendPerson[1] = ''
if 'fullName' in dict:
appendPerson[2] = dict['fullName'][0]
if 'fullName' not in dict:
appendPerson[2] = ''
DB.append(appendPerson)
print appendPerson[0] + " with field " + appendPerson[1] + ' added to DB by LDAP.'
# Create new file.
with open('PersonDB.csv', 'w+') as f:
f.write('')
# Only remove persons from DB if necessary.
if removePerson != []:
for row in removePerson:
if row[1] != '':
print str(row[0]) + " with field: " + str(row[1]) + " is removed and put in changedDB.csv"
DB.remove(row)
date = datetime.date.today()
with open('removedDB.csv', 'ab') as f:
for row in removePerson:
if not (row[1] == '' or row[1] == 'unknown ou,UKN'):
f.write(str(row[0])+';'+str(row[1])+';'+str(row[2])+';'+str(date) +'\n')
# Rewrite the new DB file with all people and their updated fields to PersonDB.csv.
with open('PersonDB.csv', 'ab') as f:
for row in DB:
f.write(str(row[0])+';'+str(row[1])+';'+str(row[2])+'\n')
def CPUTime(s, e, timeunit):
# s = startdate, e = enddate.
sreport = subprocess.Popen(["sreport", "cluster", "AccountUtilizationByUser","-t"+timeunit, "start="+s, "end="+e, "format=Login,Used,Energy", "-n", "-p"], stdout=subprocess.PIPE)
(output, err) = sreport.communicate()
cpu = output.split()
sReport = []
for i in cpu:
sReport.append(i.split('|'))
return sReport
def updateDBwithSReport(newDB):
with open('PersonDB.csv', 'w+') as f:
f.write('')
for row in newDB:
with open('PersonDB.csv', 'ab') as f:
f.write(str(row[0])+";"+str(row[1])+";"+str(row[2])+"\n")
def addCPUTimeToDB(sReport):
DB = openPersonDB()
global totalCPU
totalCPU = int(sReport[0][1])
newPersonList = []
for srPerson in sReport:
if srPerson[0] == "":
continue
pCount = 0 # Used for dealing with people that occur more than once in the DB.
for row in DB:
# If the person in sreport is also in the DB.
if srPerson[0] == row[0]:
count = 0
for doubles in DB:
if srPerson[0] == doubles[0]:
count = count +1
# CPUtime
try:
row[3] = float(row[3]) + (float(srPerson[1])/count)
except IndexError:
row.append(float(srPerson[1])/count)
# Energy
try:
row[4] = float(row[4]) + (float(srPerson[2])/count)
except IndexError:
row.append(float(srPerson[2])/count)
if count > 1:
pCount = pCount + 1
if pCount == count:
break
else:
break
else:
newPerson = [srPerson[0],'unknown ou,UKN', 'noFullName', srPerson[1], srPerson[2]]
DB.append(newPerson)
print row[0] + " with field " + row[1] + " added to DB by sReport."
updateDBwithSReport(DB)
return DB
def reportPersonData(data,options):
delim = ';'
if options.csv:
delim = ','
for row in data:
try:
if row[3]:
pass
except IndexError:
row.append(int(0))
try:
if row[4]:
pass
except IndexError:
row.append(int(0))
if options.sort:
if options.user:
sorted_data = sorted(data, key=operator.itemgetter(2))
for row in sorted_data:
print row[2] + delim + row[0] + delim + str(row[3]) + delim + str(row[4])
else: #if options.uid
sorted_data = sorted(data, key=operator.itemgetter(0))
for row in sorted_data:
print row[0] + delim + row[2] + delim + str(row[3]) + delim + str(row[4])
else:
if options.energy:
sorted_data = sorted(data, key=operator.itemgetter(4), reverse=True)
for row in sorted_data:
if row[4] != 0:
print row[0] + delim + row[1] + row[2] + delim + str(row[4])
else: #if options.cputime:
sorted_data = sorted(data, key=operator.itemgetter(3), reverse=True)
for row in sorted_data:
if row[3] != 0:
print row[0] + delim + row[1] + delim + row[2] + delim + str(row[3])
def calcOuHistogramData(data, i):
ouHistogramCPUTime = dict()
for row in data:
if row[1] in ouHistogramCPUTime:
try:
ouHistogramCPUTime[row[1]] = ouHistogramCPUTime[row[1]] + float(row[3+i])
except IndexError:
pass
else:
try:
ouHistogramCPUTime[row[1]] = float(row[3+i])
except IndexError:
pass
return ouHistogramCPUTime
def reportDepartmentData(ouHistogramData, options):
delim = ";"
if options.csv == True:
delim = ","
if options.sort == True:
sorted_ouHistogramData = sorted(ouHistogramData.items(), key=operator.itemgetter(0))
else:
sorted_ouHistogramData = sorted(ouHistogramData.items(), key=operator.itemgetter(1), reverse=True)
for row in sorted_ouHistogramData:
if row[1] != 0:
print row[0] + delim + str(row[1])
def calcFacultyData(ouHistogramData):
facultyData = dict()
# Finds faculty names and takes the faculty code from it:
# e.g. Biomedical Engineering,UCM gets put in UCM.
for row in ouHistogramData:
for i in range(len(row)):
# Stops searching for faculty codes after 4 characters.
if i > 4:
continue
if row[len(row)-1-i] == ',':
facultyCode = row[len(row)-i:]
if facultyCode in facultyData:
facultyData[facultyCode] = facultyData[facultyCode] + ouHistogramData[row]
else:
facultyData[facultyCode] = ouHistogramData[row]
return facultyData
def reportFacultyData(facultyData, options, cpu):
delim = ";"
# If the time unit is not seconds, minutes or hours yet:
if options.time == 'm' or options.time == 'minute':
options.time = 'minutes'
if options.time == 's' or options.time == 'second':
options.time = 'seconds'
if options.time == 'h' or options.time == 'hours':
options.time = 'hours'
if options.csv == True:
delim = ","
# Used for calculating missing data.
totalFacultyData = 0
if options.sort == True:
sorted_facultyData = sorted(facultyData.items(), key=operator.itemgetter(0))
else:
sorted_facultyData = sorted(facultyData.items(), key=operator.itemgetter(1), reverse=True)
# The actual report.
for row in sorted_facultyData:
totalFacultyData = totalFacultyData + row[1]
if row[1] != 0:
print row[0]+ delim + str(row[1])
# Report unclassified seconds, minutes or hours.
if cpu == True:
unknownCPU = totalCPU - totalFacultyData
if unknownCPU > 5 or unknownCPU < -5: # Prevents from being printed when rounded of.
print str(unknownCPU) + " CPU "+ options.time.lower() + " are unclassified"
def main():
options = getargs()
if options.debug:
print "Connecting with LDAP."
ldapDict = clusterLDAP(options.password)
if ldapDict == []:
print "LDAP failed, please provide a password with option -p and the password."
return
if options.debug:
print "Retrieving data from sReport."
cpu = CPUTime(str(options.startdate), str(options.enddate), str(options.time))
try:
assert cpu
except AssertionError:
print "Failed to import data from sReport."
if options.debug:
print "Updating DB with LDAP."
updateDB(ldapDict)
# Gives CPU time per user and adds new users.
if options.debug:
print "Updating DB with sReport."
data= addCPUTimeToDB(cpu)
# Print all cluster users names or uids and their cputime or energy used.
if options.user or options.uid:
reportPersonData(data, options)
# CPU/energy for each department (e.g. "Kunstmatige Intelligentie,FWN").
if options.research or options.faculty:
if options.debug and (options.research or options.faculty):
print "Calculating histograms."
if options.cputime:
ouHistogramCPUTime = calcOuHistogramData(data, False)
if options.energy:
ouHistogramEnergy = calcOuHistogramData(data, True)
# Print each department and their CPU time or energy:
if options.research:
if options.debug and (options.research or options.faculty):
print "Reporting data for departments."
if options.cputime:
reportDepartmentData(ouHistogramCPUTime, options)
if options.energy:
reportDepartmentData(ouHistogramEnergy, options)
# CPU time or energy for each faculty code (e.g. FWN).
if options.faculty:
if options.debug and (options.research or options.faculty):
print "Reporting data for faculties."
if options.cputime:
facultyCPUTime = calcFacultyData(ouHistogramCPUTime)
reportFacultyData(facultyCPUTime, options, True)
if options.energy:
facultyEnergy = calcFacultyData(ouHistogramEnergy)
reportFacultyData(facultyEnergy, options, False)
if __name__ == "__main__":
main() | rug-cit-ris/slurmacc | slurmacc.py | Python | gpl-2.0 | 16,164 |
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Author(s): Vendula Poncova <[email protected]>
#
from enum import Enum
from pyanaconda.core.configuration.base import Section
class TargetType(Enum):
"""Type of the installation target."""
HARDWARE = "HARDWARE"
IMAGE = "IMAGE"
DIRECTORY = "DIRECTORY"
class TargetSection(Section):
"""The Installation Target section."""
@property
def type(self):
"""Type of the installation target."""
return self._get_option("type", TargetType)
@property
def physical_root(self):
"""A path to the physical root of the target."""
return self._get_option("physical_root")
@property
def system_root(self):
"""A path to the system root of the target."""
return self._get_option("system_root")
@property
def is_hardware(self):
"""Are we installing on hardware?"""
return self.type is TargetType.HARDWARE
@property
def is_image(self):
"""Are we installing on an image?"""
return self.type is TargetType.IMAGE
@property
def is_directory(self):
"""Are we installing to a directory?"""
return self.type is TargetType.DIRECTORY
| atodorov/anaconda | pyanaconda/core/configuration/target.py | Python | gpl-2.0 | 2,139 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import datetime
import requests
import logging
from requests.exceptions import HTTPError, Timeout
from qms_core.models import CumulativeStatus, CheckStatusErrorType
from ..check_result import CheckResult
from .baseservice_checker import BaseServiceChecker
import math
class TmsChecker(BaseServiceChecker):
def deg2num(self, lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def __generate_alt_urls(self, tms_service):
url_pattern, subdomains = tms_service.get_url_pattern_and_subdomains()
urls = []
for subdomain in subdomains:
urls.append(
url_pattern % {'subdomain': subdomain}
)
return urls
def __generate_url(self, tms_service):
alt_urls = self.__generate_alt_urls(tms_service)
if alt_urls:
tms_url = alt_urls[random.randint(0, len(alt_urls)-1)]
else:
tms_url = tms_service.url
return tms_url
def check(self):
logger = logging.getLogger('qms_checking')
str_status_exception = 'EXCEPTION! '
str_status_http = ''
str_status_whole = 'RED'
str_exception_type = ''
str_exception_name = ''
result = CheckResult(geoservice_id=self.service.id,
geoservice_name=self.service.name,
geoservice_type=self.service.type)
result.cumulative_status = CumulativeStatus.FAILED
result.error_text = ''
startTime = datetime.datetime.utcnow()
try:
extent_center = self.service.extent.centroid if self.service.extent else None
x, y = 0, 0
tms_url = self.__generate_url(self.service)
if self.service.z_min is not None:
if extent_center:
x, y = self.deg2num(extent_center.y, extent_center.x, self.service.z_min)
test_url = tms_url.format(z=self.service.z_min, x=x, y=y)
elif self.service.z_max is not None:
if extent_center:
x, y = self.deg2num(extent_center.y, extent_center.x, self.service.z_max)
test_url = tms_url.format(z=self.service.z_max, x=x, y=y)
else:
# test_url = None
# result.cumulative_status = CumulativeStatus.FAILED
# result.error_text = 'Not set z_min and z_max for TMS'
# Try 0 0 0 tile now
if extent_center:
x, y = self.deg2num(extent_center.y, extent_center.x, 0)
test_url = tms_url.format(z=0, x=x, y=y)
if test_url:
response = requests.get(test_url, timeout=self.timeout)
str_status_http = f'{response.status_code}'
content_type = response.headers['content-type']
result.http_code = response.status_code
# пока просто если сервис вернул картинку, то считаем, что сервис работает
# можно добавить проверку на пустую картинку
if response.status_code == 200:
if content_type == 'image/png' or content_type == 'image/jpeg':
result.cumulative_status = CumulativeStatus.WORKS
str_status_whole = 'GREEN'
else:
result.cumulative_status = CumulativeStatus.PROBLEMATIC
str_status_whole = 'YELLOW'
result.error_type = CheckStatusErrorType.INVALID_RESPONSE
result.error_text = 'service response is not image'
else:
result.cumulative_status = CumulativeStatus.PROBLEMATIC
str_status_whole = 'YELLOW'
result.error_text = 'Non 200 http code'
result.http_response = response.text
result.error_type = CheckStatusErrorType.INVALID_RESPONSE
str_status_exception = ''
# если requests вернул код ошибки веб-сервера
except HTTPError as error:
str_exception_type = 'HTTPError'
str_exception_name = str(error)
result.cumulative_status = CumulativeStatus.FAILED
result.error_text = str(error)
except Timeout as error:
str_exception_type = 'Timeout'
str_exception_name = str(error)
result.cumulative_status = CumulativeStatus.FAILED
result.error_type = CheckStatusErrorType.TIMEOUT_ERROR
except Exception as error:
str_exception_type = 'Exception'
str_exception_name = str(error)
result.cumulative_status = CumulativeStatus.FAILED
result.error_text = str(error)
finally:
_id = self.service.id
_type = self.service.type
duration_time = datetime.datetime.utcnow() - startTime
result.check_duration = duration_time.total_seconds()
duration_seconds = duration_time.total_seconds()
duration_seconds = "%.2f" % duration_seconds
str_log = f'[{_id} {_type}] [{str_status_whole}] {duration_seconds} sec: http: ' \
f'{str_status_http} {str_status_exception} {str_exception_type} {str_exception_name}'
logger.info(str_log)
return result
| nextgis/quickmapservices_server | qms_server/qms_core/status_checker/service_checkers/tms_checker.py | Python | gpl-2.0 | 5,761 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Feedback'
db.create_table(u'web_feedback', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=4)),
('weight', self.gf('django.db.models.fields.IntegerField')()),
('notes', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('posted', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('posted_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='feedbacks_posted', to=orm['web.UserProfile'])),
))
db.send_create_signal(u'web', ['Feedback'])
# Adding model 'Image'
db.create_table(u'web_image', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('posted', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('posted_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='posted_images', to=orm['web.UserProfile'])),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('source_url', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('width', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('height', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('size', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
))
db.send_create_signal(u'web', ['Image'])
# Adding model 'UserProfile'
db.create_table(u'web_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='profile', unique=True, to=orm['auth.User'])),
('story', self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['web.UserProfile'])),
('location', self.gf('django.contrib.gis.db.models.fields.PointField')()),
('distance', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal(u'web', ['UserProfile'])
# Adding model 'ProfileOrganization'
db.create_table(u'web_profileorganization', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['web.UserProfile'])),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['web.Organization'])),
('role', self.gf('django.db.models.fields.CharField')(max_length=64)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'web', ['ProfileOrganization'])
# Adding model 'Organization'
db.create_table(u'web_organization', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('logo', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['web.Image'], unique=True, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=512)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['web.UserProfile'])),
))
db.send_create_signal(u'web', ['Organization'])
# Adding model 'Pet'
db.create_table(u'web_pet', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pet_type', self.gf('django.db.models.fields.CharField')(max_length=32)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('location', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='pets_located_here', null=True, to=orm['web.Location'])),
('story', self.gf('django.db.models.fields.CharField')(max_length=512)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['web.UserProfile'])),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['web.UserProfile'])),
('thumbnail', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['web.Image'])),
('rescued_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='rescued_pets', null=True, to=orm['web.Organization'])),
('transport', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='manifest', null=True, to=orm['web.Transport'])),
))
db.send_create_signal(u'web', ['Pet'])
# Adding model 'PetAttribute'
db.create_table(u'web_petattribute', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pet', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['web.Pet'])),
('attribute', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['web.Attribute'])),
('value', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['web.UserProfile'])),
))
db.send_create_signal(u'web', ['PetAttribute'])
# Adding model 'Attribute'
db.create_table(u'web_attribute', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=4)),
('attribute', self.gf('django.db.models.fields.CharField')(max_length=64)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('description', self.gf('django.db.models.fields.CharField')(max_length=512)),
('special_choices', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('special_validation', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['web.UserProfile'])),
))
db.send_create_signal(u'web', ['Attribute'])
# Adding model 'Location'
db.create_table(u'web_location', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('address', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('geo', self.gf('django.contrib.gis.db.models.fields.PointField')()),
('description', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
))
db.send_create_signal(u'web', ['Location'])
# Adding model 'SegmentRoles'
db.create_table(u'web_segmentroles', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('segment', self.gf('django.db.models.fields.related.OneToOneField')(related_name='volunteers', unique=True, to=orm['web.TransportSegment'])),
('role', self.gf('django.db.models.fields.CharField')(max_length=4)),
('priority', self.gf('django.db.models.fields.IntegerField')()),
('status', self.gf('django.db.models.fields.CharField')(max_length=4)),
))
db.send_create_signal(u'web', ['SegmentRoles'])
# Adding M2M table for field user on 'SegmentRoles'
m2m_table_name = db.shorten_name(u'web_segmentroles_user')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('segmentroles', models.ForeignKey(orm[u'web.segmentroles'], null=False)),
('userprofile', models.ForeignKey(orm[u'web.userprofile'], null=False))
))
db.create_unique(m2m_table_name, ['segmentroles_id', 'userprofile_id'])
# Adding model 'TransportSegment'
db.create_table(u'web_transportsegment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sequence', self.gf('django.db.models.fields.IntegerField')()),
('pick_up_location', self.gf('django.db.models.fields.related.ForeignKey')(related_name='pickup+', to=orm['web.Location'])),
('drop_off_location', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dropoff+', to=orm['web.Location'])),
('pick_up_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('drop_off_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('duration', self.gf('django.db.models.fields.TimeField')()),
('miles', self.gf('django.db.models.fields.IntegerField')()),
('transport', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='segments', null=True, to=orm['web.Transport'])),
('status', self.gf('django.db.models.fields.CharField')(default='ONTM', max_length=4)),
('offset', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('notes', self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['web.UserProfile'])),
))
db.send_create_signal(u'web', ['TransportSegment'])
# Adding model 'Transport'
db.create_table(u'web_transport', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shipper', self.gf('django.db.models.fields.related.ForeignKey')(related_name='packages_shipped', to=orm['web.UserProfile'])),
('receiver', self.gf('django.db.models.fields.related.ForeignKey')(related_name='packages_received', to=orm['web.UserProfile'])),
('tracking_number', self.gf('django.db.models.fields.CharField')(default='PT02UHMD6OUND0B', unique=True, max_length=30)),
('notes', self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='NEW', max_length=4)),
('started_on', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('start_location', self.gf('django.db.models.fields.related.ForeignKey')(related_name='start_location+', null=True, to=orm['web.Location'])),
('finished_on', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('finish_location', self.gf('django.db.models.fields.related.ForeignKey')(related_name='finish_location+', null=True, to=orm['web.Location'])),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['web.UserProfile'])),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['web.UserProfile'])),
))
db.send_create_signal(u'web', ['Transport'])
# Adding model 'TransportLogEntry'
db.create_table(u'web_transportlogentry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shipment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='log_entries', to=orm['web.Transport'])),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
('image', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['web.Image'], unique=True, null=True, blank=True)),
('text', self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('last_updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['web.UserProfile'])),
))
db.send_create_signal(u'web', ['TransportLogEntry'])
def backwards(self, orm):
# Deleting model 'Feedback'
db.delete_table(u'web_feedback')
# Deleting model 'Image'
db.delete_table(u'web_image')
# Deleting model 'UserProfile'
db.delete_table(u'web_userprofile')
# Deleting model 'ProfileOrganization'
db.delete_table(u'web_profileorganization')
# Deleting model 'Organization'
db.delete_table(u'web_organization')
# Deleting model 'Pet'
db.delete_table(u'web_pet')
# Deleting model 'PetAttribute'
db.delete_table(u'web_petattribute')
# Deleting model 'Attribute'
db.delete_table(u'web_attribute')
# Deleting model 'Location'
db.delete_table(u'web_location')
# Deleting model 'SegmentRoles'
db.delete_table(u'web_segmentroles')
# Removing M2M table for field user on 'SegmentRoles'
db.delete_table(db.shorten_name(u'web_segmentroles_user'))
# Deleting model 'TransportSegment'
db.delete_table(u'web_transportsegment')
# Deleting model 'Transport'
db.delete_table(u'web_transport')
# Deleting model 'TransportLogEntry'
db.delete_table(u'web_transportlogentry')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'web.attribute': {
'Meta': {'object_name': 'Attribute'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['web.UserProfile']"}),
'pets': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.Pet']", 'through': u"orm['web.PetAttribute']", 'symmetrical': 'False'}),
'special_choices': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'special_validation': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'web.feedback': {
'Meta': {'object_name': 'Feedback'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'posted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feedbacks_posted'", 'to': u"orm['web.UserProfile']"}),
'weight': ('django.db.models.fields.IntegerField', [], {})
},
u'web.image': {
'Meta': {'object_name': 'Image'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'posted': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posted_images'", 'to': u"orm['web.UserProfile']"}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'web.location': {
'Meta': {'object_name': 'Location'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'geo': ('django.contrib.gis.db.models.fields.PointField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'web.organization': {
'Meta': {'object_name': 'Organization'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['web.UserProfile']"}),
'logo': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['web.Image']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'web.pet': {
'Meta': {'object_name': 'Pet'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['web.UserProfile']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['web.UserProfile']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pets_located_here'", 'null': 'True', 'to': u"orm['web.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'pet_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'rescued_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'rescued_pets'", 'null': 'True', 'to': u"orm['web.Organization']"}),
'story': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['web.Image']"}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'manifest'", 'null': 'True', 'to': u"orm['web.Transport']"})
},
u'web.petattribute': {
'Meta': {'object_name': 'PetAttribute'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Attribute']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['web.UserProfile']"}),
'pet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Pet']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'web.profileorganization': {
'Meta': {'object_name': 'ProfileOrganization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.UserProfile']"})
},
u'web.segmentroles': {
'Meta': {'object_name': 'SegmentRoles'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'segment': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'volunteers'", 'unique': 'True', 'to': u"orm['web.TransportSegment']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'+'", 'symmetrical': 'False', 'to': u"orm['web.UserProfile']"})
},
u'web.transport': {
'Meta': {'object_name': 'Transport'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['web.UserProfile']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finish_location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finish_location+'", 'null': 'True', 'to': u"orm['web.Location']"}),
'finished_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['web.UserProfile']"}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages_received'", 'to': u"orm['web.UserProfile']"}),
'shipper': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages_shipped'", 'to': u"orm['web.UserProfile']"}),
'start_location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'start_location+'", 'null': 'True', 'to': u"orm['web.Location']"}),
'started_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '4'}),
'tracking_number': ('django.db.models.fields.CharField', [], {'default': "'PT0FCVPFGV4FM5C'", 'unique': 'True', 'max_length': '30'})
},
u'web.transportlogentry': {
'Meta': {'object_name': 'TransportLogEntry'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['web.Image']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['web.UserProfile']"}),
'shipment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log_entries'", 'to': u"orm['web.Transport']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
u'web.transportsegment': {
'Meta': {'object_name': 'TransportSegment'},
'drop_off_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'drop_off_location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dropoff+'", 'to': u"orm['web.Location']"}),
'duration': ('django.db.models.fields.TimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['web.UserProfile']"}),
'miles': ('django.db.models.fields.IntegerField', [], {}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'offset': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'pick_up_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'pick_up_location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pickup+'", 'to': u"orm['web.Location']"}),
'sequence': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ONTM'", 'max_length': '4'}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'segments'", 'null': 'True', 'to': u"orm['web.Transport']"})
},
u'web.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'distance': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['web.UserProfile']"}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'members'", 'symmetrical': 'False', 'through': u"orm['web.ProfileOrganization']", 'to': u"orm['web.Organization']"}),
'story': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['web'] | VentureCranial/PawTrain | app/web/migrations/0001_initial.py | Python | gpl-2.0 | 33,178 |
"""ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class shutit(ShutItModule):
def build(self, shutit):
shutit.send('mkdir -p /opt')
shutit.send('cd /opt')
shutit.send('git clone https://github.com/ianmiell/shutit.git')
shutit.send('cd shutit')
shutit.send('pip install -r requirements.txt')
shutit.add_to_bashrc('export PATH=$PATH:/opt/shutit')
return True
def module():
return shutit(
'shutit.tk.sd.shutit.shutit', 158844782.0299,
description='A ShutIt environment from which you can reliably build an image in delivery --bash, built from source',
maintainer='[email protected]',
depends=['shutit.tk.sd.git.git','shutit.tk.sd.curl.curl','shutit.tk.sd.python_pip.python_pip']
)
| ianmiell/shutit-distro | shutit/shutit.py | Python | gpl-2.0 | 743 |
#!/usr/bin/env python
"""
Reads an int from the command line or input prompt and draws the die.
Works with bash or Tk.
(* This does not work.)
"""
__author__ = 'Chris Horn <[email protected]>'
from cjh.config import Config
from cjh.die import Die
SHELL = Config().start_user_profile()
if SHELL.interface in ['Tk', 'wx']:
SHELL.center_window(height_=100, width_=150)
def main():
"""
Get an int from the pipeline or from user input, and draw the die.
"""
die = Die()
if __name__ == '__main__':
die.value = int(SHELL.arg())
die.draw_face(shellib=SHELL)
main()
| hammerhorn/hammerhorn-jive | dice/draw_die.py | Python | gpl-2.0 | 606 |
#
# keyboard_layouts.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Country code mappings to keyboard model names, and keyboard variant names
# collected from Debian console-setup source package, version 1.88:
# http://packages.debian.org/source/wheezy/console-setup
# http://dev.kano.me/public/Keyboardnames.pl.txt
#
# Mapping of country names to keyboard layout codes,
# With additional names for natural country prompts (for example United Kingdom,
# England, UK, etc)
layouts = {
'europe': {
'Albania': 'al',
'Andorra': 'ad',
'Austria': 'at',
'Belarus': 'by',
'Belgium': 'be',
'Bosnia': 'ba',
'Herzegovina': 'ba',
'Bulgaria': 'bg',
'Croatia': 'hr',
'Czech Republic': 'cz',
'Denmark': 'dk',
'Estonia': 'ee',
'Faroe Islands': 'fo',
'Finland': 'fi',
'France': 'fr',
'Germany': 'de',
'Greece': 'gr',
'Hungary': 'hu',
'Iceland': 'is',
'Italy': 'it',
'Ireland': 'ie',
'Latvia': 'lv',
'Lithuania': 'lt',
'Macedonia': 'mk',
'Malta': 'mt',
'Montenegro': 'me',
'Netherlands': 'nl',
'Norway': 'no',
'Poland': 'pl',
'Portugal': 'pt',
'Romania': 'ro',
'Russia': 'ru',
'Serbia': 'rs',
'Slovakia': 'sk',
'Slovenia': 'si',
'Spain': 'es',
'Sweden': 'se',
'Switzerland': 'ch',
'Turkey': 'tr',
'Ukraine': 'ua',
'United Kingdom': 'gb',
},
'asia': {
'Afghanistan': 'af',
'Arabic': 'ara',
'Armenia': 'am',
'Azerbaijan': 'az',
'Bangladesh': 'bd',
'Bhutan': 'bt',
'Cambodia': 'kh',
'China': 'cn',
'Georgia': 'ge',
'India': 'in',
'Iran': 'ir',
'Iraq': 'iq',
'Israel': 'il',
'Japan': 'jp',
'Kazakhstan': 'kz',
'Kyrgyzstan': 'kg',
'Korea': 'kr',
'Laos': 'la',
'Maldives': 'mv',
'Mongolia': 'mn',
'Myanmar': 'mm',
'Nepal': 'np',
'Pakistan': 'pk',
'Philippines': 'ph',
'Sri Lanka': 'lk',
'Syria': 'sy',
'Tajikistan': 'tj',
'Thailand': 'th',
'Turkmenistan': 'tm',
'Uzbekistan': 'uz',
'Vietnam': 'vn'
},
'africa': {
'Botswana': 'bw',
'Congo': 'cd',
'Ethiopia': 'et',
'Ghana': 'gh',
'Guinea': 'gn',
'Kenya': 'ke',
'Mali': 'ml',
'Morocco': 'ma',
'Nigeria': 'ng',
'Senegal': 'sn',
'South Africa': 'za',
'Tanzania': 'tz',
},
'america': {
'Argentina': 'es', # This was originally latam, but changed to match the Kano keyboard.
'Bolivia': 'latam',
'Brazil': 'br',
'Canada': 'ca',
'Chile': 'latam',
'Colombia': 'latam',
'Costa Rica': 'latam',
'Cuba': 'latam',
'Ecuador': 'latam',
'El Salvador': 'latam',
'Guatemala': 'latam',
'Guayana': 'latam',
'Haiti': 'latam',
'Honduras': 'latam',
'Mexico': 'latam',
'Nicaragua': 'latam',
'Panama': 'latam',
'Paraguay': 'latam',
'Peru': 'latam',
'Puerto Rico': 'latam',
'Republica Dominicana': 'latam',
'Uruguay': 'latam',
'United States': 'us',
'Venezuela': 'latam',
},
'australia': {
'Australia': 'gb',
'Maori': 'mao',
},
'others': {
'Braille': 'brai',
'Esperanto': 'epo',
}
}
variants = {
'af': [
('OLPC Dari', 'olpc-fa'),
('OLPC Pashto', 'olpc-ps'),
('OLPC Southern Uzbek', 'olpc-uz'),
('Pashto', 'ps'),
('Southern Uzbek', 'uz')
],
'am': [
('Alternative Eastern', 'eastern-alt'),
('Alternative Phonetic', 'phonetic-alt'),
('Eastern', 'eastern'),
('Phonetic', 'phonetic'),
('Western', 'western')
],
'ara': [
('Buckwalter', 'buckwalter'),
('azerty', 'azerty'),
('azerty/digits', 'azerty_digits'),
('digits', 'digits'),
('qwerty', 'qwerty'),
('qwerty/digits', 'qwerty_digits')
],
'at': [
('Eliminate dead keys', 'nodeadkeys'),
('Macintosh', 'mac'),
('Sun dead keys', 'sundeadkeys')
],
'az': [
('Cyrillic', 'cyrillic')
],
'ba': [
('US keyboard with Bosnian digraphs', 'unicodeus'),
('US keyboard with Bosnian letters', 'us'),
('Use Bosnian digraphs', 'unicode'),
('Use guillemets for quotes', 'alternatequotes')
],
'bd': [
('Probhat', 'probhat')
],
'be': [
('Alternative', 'oss'),
('Alternative, Sun dead keys', 'oss_sundeadkeys'),
('Alternative, latin-9 only', 'oss_latin9'),
('Eliminate dead keys', 'nodeadkeys'),
('ISO Alternate', 'iso-alternate'),
('Sun dead keys', 'sundeadkeys'),
('Wang model 724 azerty', 'wang')
],
'bg': [
('New phonetic', 'bas_phonetic'),
('Traditional phonetic', 'phonetic')
],
'br': [
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Nativo', 'nativo'),
('Nativo for Esperanto', 'nativo-epo'),
('Nativo for USA keyboards', 'nativo-us')
],
'brai': [
('Left hand', 'left_hand'),
('Right hand', 'right_hand')
],
'by': [
('Latin', 'latin'),
('Legacy', 'legacy'),
],
'ca': [
('English', 'eng'),
('French (legacy)', 'fr-legacy'),
('French Dvorak', 'fr-dvorak'),
('Inuktitut', 'ike'),
('Ktunaxa', 'kut'),
('Multilingual', 'multix'),
('Multilingual, first part', 'multi'),
('Multilingual, second part', 'multi-2gr'),
('Secwepemctsin', 'shs')
],
'ch': [
('French', 'fr'),
('French (Macintosh)', 'fr_mac'),
('French, Sun dead keys', 'fr_sundeadkeys'),
('French, eliminate dead keys', 'fr_nodeadkeys'),
('German (Macintosh)', 'de_mac'),
('German, Sun dead keys', 'de_sundeadkeys'),
('German, eliminate dead keys', 'de_nodeadkeys'),
('Legacy', 'legacy')
],
'cn': [
('Tibetan', 'tib'),
('Tibetan (with ASCII numerals)', 'tib_asciinum'),
('Uyghur', 'uig')
],
'cz': [
('UCW layout (accented letters only)', 'ucw'),
('US Dvorak with CZ UCW support', 'dvorak-ucw'),
('With <\|> key', 'bksl'),
('qwerty', 'qwerty'),
('qwerty, extended Backslash', 'qwerty_bksl')
],
'de': [
('Dead acute', 'deadacute'),
('Dead grave acute', 'deadgraveacute'),
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Lower Sorbian', 'dsb'),
('Lower Sorbian (qwertz)', 'dsb_qwertz'),
('Macintosh', 'mac'),
('Macintosh, eliminate dead keys', 'mac_nodeadkeys'),
('Neo 2', 'neo'),
('Romanian keyboard with German letters', 'ro'),
('Romanian keyboard with German letters, eliminate dead keys', 'ro_nodeadkeys'),
('Russian phonetic', 'ru'),
('Sun dead keys', 'sundeadkeys'),
('qwerty', 'qwerty')
],
'dk': [
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Macintosh', 'mac'),
('Macintosh, eliminate dead keys', 'mac_nodeadkeys')
],
'ee': [
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('US keyboard with Estonian letters', 'us')
],
'epo': [
('displaced semicolon and quote (obsolete)', 'legacy')
],
'es': [
('Asturian variant with bottom-dot H and bottom-dot L', 'ast'),
('Catalan variant with middle-dot L', 'cat'),
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Include dead tilde', 'deadtilde'),
('Macintosh', 'mac'),
('Sun dead keys', 'sundeadkeys')
],
'fi': [
('Classic', 'classic'),
('Classic, eliminate dead keys', 'nodeadkeys'),
('Macintosh', 'mac'),
('Northern Saami', 'smi')
],
'fo': [
('Eliminate dead keys', 'nodeadkeys')
],
'fr': [
('(Legacy) Alternative', 'latin9'),
('(Legacy) Alternative, Sun dead keys', 'latin9_sundeadkeys'),
('(Legacy) Alternative, eliminate dead keys', 'latin9_nodeadkeys'),
('Alternative', 'oss'),
('Alternative, Sun dead keys', 'oss_sundeadkeys'),
('Alternative, eliminate dead keys', 'oss_nodeadkeys'),
('Alternative, latin-9 only', 'oss_latin9'),
('Bepo, ergonomic, Dvorak way', 'bepo'),
('Bepo, ergonomic, Dvorak way, latin-9 only', 'bepo_latin9'),
('Breton', 'bre'),
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Georgian AZERTY Tskapo', 'geo'),
('Macintosh', 'mac'),
('Occitan', 'oci'),
('Sun dead keys', 'sundeadkeys')
],
'gb': [
('Colemak', 'colemak'),
('Dvorak', 'dvorak'),
('Dvorak (UK Punctuation)', 'dvorakukp'),
('Extended - Winkeys', 'extd'),
('International (with dead keys)', 'intl'),
('Macintosh', 'mac'),
('Macintosh (International)', 'mac_intl')
],
'ge': [
('Ergonomic', 'ergonomic'),
('MESS', 'mess'),
('Ossetian', 'os'),
('Russian', 'ru')
],
'gh': [
('Akan', 'akan'),
('Avatime', 'avn'),
('Ewe', 'ewe'),
('Fula', 'fula'),
('GILLBT', 'gillbt'),
('Ga', 'ga'),
('Hausa', 'hausa'),
('Multilingual', 'generic')
],
'gr': [
('Eliminate dead keys', 'nodeadkeys'),
('Extended', 'extended'),
('Polytonic', 'polytonic'),
('Simple', 'simple')
],
'hr': [
('US keyboard with Croatian digraphs', 'unicodeus'),
('US keyboard with Croatian letters', 'us'),
('Use Croatian digraphs', 'unicode'),
('Use guillemets for quotes', 'alternatequotes')
],
'hu': [
('101/qwerty/comma/Dead keys', '101_qwerty_comma_dead'),
('101/qwerty/comma/Eliminate dead keys', '101_qwerty_comma_nodead'),
('101/qwerty/dot/Dead keys', '101_qwerty_dot_dead'),
('101/qwerty/dot/Eliminate dead keys', '101_qwerty_dot_nodead'),
('101/qwertz/comma/Dead keys', '101_qwertz_comma_dead'),
('101/qwertz/comma/Eliminate dead keys', '101_qwertz_comma_nodead'),
('101/qwertz/dot/Dead keys', '101_qwertz_dot_dead'),
('101/qwertz/dot/Eliminate dead keys', '101_qwertz_dot_nodead'),
('102/qwerty/comma/Dead keys', '102_qwerty_comma_dead'),
('102/qwerty/comma/Eliminate dead keys', '102_qwerty_comma_nodead'),
('102/qwerty/dot/Dead keys', '102_qwerty_dot_dead'),
('102/qwerty/dot/Eliminate dead keys', '102_qwerty_dot_nodead'),
('102/qwertz/comma/Dead keys', '102_qwertz_comma_dead'),
('102/qwertz/comma/Eliminate dead keys', '102_qwertz_comma_nodead'),
('102/qwertz/dot/Dead keys', '102_qwertz_dot_dead'),
('102/qwertz/dot/Eliminate dead keys', '102_qwertz_dot_nodead'),
('Eliminate dead keys', 'nodeadkeys'),
('Standard', 'standard'),
('qwerty', 'qwerty')
],
'ie': [
('CloGaelach', 'CloGaelach'),
('Ogham', 'ogam'),
('Ogham IS434', 'ogam_is434'),
('UnicodeExpert', 'UnicodeExpert')
],
'il': [
('Biblical Hebrew (Tiro)', 'biblical'),
('Phonetic', 'phonetic'),
('lyx', 'lyx')
],
'in': [
('Bengali', 'ben'),
('Bengali Probhat', 'ben_probhat'),
('English with RupeeSign', 'eng'),
('Gujarati', 'guj'),
('Gurmukhi', 'guru'),
('Gurmukhi Jhelum', 'jhelum'),
('Hindi Bolnagri', 'bolnagri'),
('Hindi Wx', 'hin-wx'),
('Kannada', 'kan'),
('Malayalam', 'mal'),
('Malayalam Lalitha', 'mal_lalitha'),
('Oriya', 'ori'),
('Tamil', 'tam'),
('Tamil Keyboard with Numerals', 'tam_keyboard_with_numerals'),
('Tamil TAB Typewriter', 'tam_TAB'),
('Tamil TSCII Typewriter', 'tam_TSCII'),
('Tamil Unicode', 'tam_unicode'),
('Telugu', 'tel'),
('Urdu, Alternative phonetic', 'urd-phonetic3'),
('Urdu, Phonetic', 'urd-phonetic'),
('Urdu, Winkeys', 'urd-winkeys')
],
'iq': [
('Kurdish, (F)', 'ku_f'),
('Kurdish, Arabic-Latin', 'ku_ara'),
('Kurdish, Latin Alt-Q', 'ku_alt'),
('Kurdish, Latin Q', 'ku')
],
'ir': [
('Kurdish, (F)', 'ku_f'),
('Kurdish, Arabic-Latin', 'ku_ara'),
('Kurdish, Latin Alt-Q', 'ku_alt'),
('Kurdish, Latin Q', 'ku'),
('Persian, with Persian Keypad', 'pes_keypad')
],
'is': [
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Macintosh', 'mac'),
('Sun dead keys', 'Sundeadkeys')
],
'it': [
('Eliminate dead keys', 'nodeadkeys'),
('Georgian', 'geo'),
('Macintosh', 'mac'),
('US keyboard with Italian letters', 'us')
],
'jp': [
('Kana', 'kana'),
('Kana 86', 'kana86'),
('Macintosh', 'mac'),
('OADG 109A', 'OADG109A')
],
'ke': [
('Kikuyu', 'kik')
],
'kg': [
('Phonetic', 'phonetic')
],
'kr': [
('101/104 key Compatible', 'kr104')
],
'kz': [
('Kazakh with Russian', 'kazrus'),
('Russian with Kazakh', 'ruskaz')
],
'latam': [
('Eliminate dead keys', 'nodeadkeys'),
('Include dead tilde', 'deadtilde'),
('Sun dead keys', 'sundeadkeys')
],
'lk': [
('Tamil TAB Typewriter', 'tam_TAB'),
('Tamil Unicode', 'tam_unicode')
],
'lt': [
('IBM (LST 1205-92)', 'ibm'),
('LEKP', 'lekp'),
('LEKPa', 'lekpa'),
('Standard', 'std'),
('US keyboard with Lithuanian letters', 'us')
],
'lv': [
('Apostrophe () variant', 'apostrophe'),
('F-letter (F) variant', 'fkey'),
('Tilde (~) variant', 'tilde')
],
'ma': [
('French', 'french'),
('Tifinagh', 'tifinagh'),
('Tifinagh alternative', 'tifinagh-alt'),
('Tifinagh alternative phonetic', 'tifinagh-alt-phonetic'),
('Tifinagh extended', 'tifinagh-extended'),
('Tifinagh extended phonetic', 'tifinagh-extended-phonetic'),
('Tifinagh phonetic', 'tifinagh-phonetic')
],
'me': [
('Cyrillic', 'cyrillic'),
('Cyrillic with guillemets', 'cyrillicalternatequotes'),
('Cyrillic, Z and ZHE swapped', 'cyrillicyz'),
('Latin qwerty', 'latinyz'),
('Latin unicode', 'latinunicode'),
('Latin unicode qwerty', 'latinunicodeyz'),
('Latin with guillemets', 'latinalternatequotes')
],
'mk': [
('Eliminate dead keys', 'nodeadkeys')
],
'ml': [
('English (USA International)', 'us-intl'),
('English (USA Macintosh)', 'us-mac'),
('Francais (France Alternative)', 'fr-oss'),
],
'mt': [
('Maltese keyboard with US layout', 'us')
],
'ng': [
('Hausa', 'hausa'),
('Igbo', 'igbo'),
('Yoruba', 'yoruba')
],
'nl': [
('Macintosh', 'mac'),
('Standard', 'std'),
('Sun dead keys', 'sundeadkeys')
],
'no': [
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Macintosh', 'mac'),
('Macintosh, eliminate dead keys', 'mac_nodeadkeys'),
('Northern Saami', 'smi'),
('Northern Saami, eliminate dead keys', 'smi_nodeadkeys')
],
'ph': [
('Capewell-Dvorak (Baybayin)', 'capewell-dvorak-bay'),
('Capewell-Dvorak (Latin)', 'capewell-dvorak'),
('Capewell-QWERF 2006 (Baybayin)', 'capewell-qwerf2k6-bay'),
('Capewell-QWERF 2006 (Latin)', 'capewell-qwerf2k6'),
('Colemak (Baybayin)', 'colemak-bay'),
('Colemak (Latin)', 'colemak'),
('Dvorak (Baybayin)', 'dvorak-bay'),
('Dvorak (Latin)', 'dvorak'),
('QWERTY (Baybayin)', 'qwerty-bay')
],
'pk': [
('Arabic', 'ara'),
('CRULP', 'urd-crulp'),
('NLA', 'urd-nla'),
('Sindhi', 'snd')
],
'pl': [
('Dvorak', 'dvorak'),
('Dvorak, Polish quotes on key 1', 'dvorak_altquotes'),
('Dvorak, Polish quotes on quotemark key', 'dvorak_quotes'),
('Kashubian', 'csb'),
('Programmer Dvorak', 'dvp'),
('Russian phonetic Dvorak', 'ru_phonetic_dvorak'),
('qwertz', 'qwertz')
],
'pt': [
('Eliminate dead keys', 'nodeadkeys'),
('Macintosh', 'mac'),
('Macintosh, Sun dead keys', 'mac_sundeadkeys'),
('Macintosh, eliminate dead keys', 'mac_nodeadkeys'),
('Nativo', 'nativo'),
('Nativo for Esperanto', 'nativo-epo'),
('Nativo for USA keyboards', 'nativo-us'),
('Sun dead keys', 'sundeadkeys')
],
'ro': [
('Cedilla', 'cedilla'),
('Crimean Tatar (Dobruca-1 Q)', 'crh_dobruca1'),
('Crimean Tatar (Dobruca-2 Q)', 'crh_dobruca2'),
('Crimean Tatar (Turkish Alt-Q)', 'crh_alt'),
('Crimean Tatar (Turkish F)', 'crh_f'),
('Standard', 'std'),
('Standard (Cedilla)', 'std_cedilla'),
('Winkeys', 'winkeys')
],
'rs': [
('Latin', 'latin'),
('Latin Unicode', 'latinunicode'),
('Latin Unicode qwerty', 'latinunicodeyz'),
('Latin qwerty', 'latinyz'),
('Latin with guillemets', 'latinalternatequotes'),
('Pannonian Rusyn Homophonic', 'rue'),
('With guillemets', 'alternatequotes'),
('Z and ZHE swapped', 'yz')
],
'ru': [
('Bashkirian', 'bak'),
('Chuvash', 'cv'),
('Chuvash Latin', 'cv_latin'),
('DOS', 'dos'),
('Kalmyk', 'xal'),
('Komi', 'kom'),
('Legacy', 'legacy'),
('Mari', 'chm'),
('Ossetian, Winkeys', 'os_winkeys'),
('Ossetian, legacy', 'os_legacy'),
('Phonetic', 'phonetic'),
('Phonetic Winkeys', 'phonetic_winkeys'),
('Serbian', 'srp'),
('Tatar', 'tt'),
('Typewriter', 'typewriter'),
('Typewriter, legacy', 'typewriter-legacy'),
('Udmurt', 'udm'),
('Yakut', 'sah')
],
'se': [
('Dvorak', 'dvorak'),
('Eliminate dead keys', 'nodeadkeys'),
('Macintosh', 'mac'),
('Northern Saami', 'smi'),
('Russian phonetic', 'rus'),
('Russian phonetic, eliminate dead keys', 'rus_nodeadkeys'),
('Svdvorak', 'svdvorak')
],
'si': [
('US keyboard with Slovenian letters', 'us'),
('Use guillemets for quotes', 'alternatequotes')
],
'sk': [
('Extended Backslash', 'bksl'),
('qwerty', 'qwerty'),
('qwerty, extended Backslash', 'qwerty_bksl')
],
'sy': [
('Kurdish, (F)', 'ku_f'),
('Kurdish, Latin Alt-Q', 'ku_alt'),
('Kurdish, Latin Q', 'ku'),
('Syriac', 'syc'),
('Syriac phonetic', 'syc_phonetic')
],
'th': [
('Pattachote', 'pat'),
('TIS-820.2538', 'tis')
],
'tj': [
('Legacy', 'legacy')
],
'tm': [
('Alt-Q', 'alt')
],
'tr': [
('(F)', 'f'),
('Alt-Q', 'alt'),
('Crimean Tatar (Turkish Alt-Q)', 'crh_alt'),
('Crimean Tatar (Turkish F)', 'crh_f'),
('Crimean Tatar (Turkish Q)', 'crh'),
('International (with dead keys)', 'intl'),
('Kurdish, (F)', 'ku_f'),
('Kurdish, Latin Alt-Q', 'ku_alt'),
('Kurdish, Latin Q', 'ku'),
('Sun dead keys', 'sundeadkeys')
],
'ua': [
('Crimean Tatar (Turkish Alt-Q)', 'crh_alt'),
('Crimean Tatar (Turkish F)', 'crh_f'),
('Crimean Tatar (Turkish Q)', 'crh'),
('Homophonic', 'homophonic'),
('Legacy', 'legacy'),
('Phonetic', 'phonetic'),
('Standard RSTU', 'rstu'),
('Standard RSTU on Russian layout', 'rstu_ru'),
('Typewriter', 'typewriter'),
('Winkeys', 'winkeys')
],
'us': [
('Alternative international', 'alt-intl'),
('Cherokee', 'chr'),
('Classic Dvorak', 'dvorak-classic'),
('Colemak', 'colemak'),
('Dvorak', 'dvorak'),
('Dvorak alternative international (no dead keys)', 'dvorak-alt-intl'),
('Dvorak international (with dead keys)', 'dvorak-intl'),
('International (AltGr dead keys)', 'altgr-intl'),
('International (with dead keys)', 'intl'),
('Layout toggle on multiply/divide key', 'olpc2'),
('Left handed Dvorak', 'dvorak-l'),
('Macintosh', 'mac'),
('Programmer Dvorak', 'dvp'),
('Right handed Dvorak', 'dvorak-r'),
('Russian phonetic', 'rus'),
('Serbo-Croatian', 'hbs'),
('With EuroSign on 5', 'euro')
],
'uz': [
('Crimean Tatar (Turkish Alt-Q)', 'crh_alt'),
('Crimean Tatar (Turkish F)', 'crh_f'),
('Crimean Tatar (Turkish Q)', 'crh'),
('Latin', 'latin')
]
}
def get_continents():
return [
'Africa',
'America',
'Asia',
'Australia',
'Europe',
'Others'
]
def get_countries_for_continent(continent):
return layouts.get(continent.lower(), [])
def sorted_countries(countries):
return sorted(countries)
| KanoComputing/kano-settings | kano_settings/system/keyboard_layouts.py | Python | gpl-2.0 | 21,738 |
try:
import gdal
file = raw_input()
ds = gdal.Open(file)
print ds.RasterCount
except Exception as exception:
raise ValueError(exception)
| NurymKenzh/GeoServer | GeoServer/Python/GetRasterBandsCount.py | Python | gpl-2.0 | 177 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from mutagen.mp4 import MP4, MP4Cover
from picard import config, log
from picard.coverart.image import TagCoverArtImage, CoverArtImageError
from picard.file import File
from picard.metadata import Metadata
from picard.util import encode_filename
class MP4File(File):
EXTENSIONS = [".m4a", ".m4b", ".m4p", ".m4v", ".mp4"]
NAME = "MPEG-4 Audio"
_File = MP4
__text_tags = {
"\xa9ART": "artist",
"\xa9nam": "title",
"\xa9alb": "album",
"\xa9wrt": "composer",
"aART": "albumartist",
"\xa9grp": "grouping",
"\xa9day": "date",
"\xa9gen": "genre",
"\xa9lyr": "lyrics",
"\xa9cmt": "comment:",
"\xa9too": "encodedby",
"cprt": "copyright",
"soal": "albumsort",
"soaa": "albumartistsort",
"soar": "artistsort",
"sonm": "titlesort",
"soco": "composersort",
"sosn": "showsort",
"tvsh": "show",
"purl": "podcasturl",
}
__r_text_tags = dict([(v, k) for k, v in __text_tags.items()])
__bool_tags = {
"pcst": "podcast",
"cpil": "compilation",
"pgap": "gapless",
}
__r_bool_tags = dict([(v, k) for k, v in __bool_tags.items()])
__int_tags = {
"tmpo": "bpm",
}
__r_int_tags = dict([(v, k) for k, v in __int_tags.items()])
__freeform_tags = {
"----:com.apple.iTunes:MusicBrainz Track Id": "musicbrainz_recordingid",
"----:com.apple.iTunes:MusicBrainz Artist Id": "musicbrainz_artistid",
"----:com.apple.iTunes:MusicBrainz Album Id": "musicbrainz_albumid",
"----:com.apple.iTunes:MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
"----:com.apple.iTunes:MusicIP PUID": "musicip_puid",
"----:com.apple.iTunes:MusicBrainz Album Status": "releasestatus",
"----:com.apple.iTunes:MusicBrainz Album Release Country": "releasecountry",
"----:com.apple.iTunes:MusicBrainz Album Type": "releasetype",
"----:com.apple.iTunes:MusicBrainz Disc Id": "musicbrainz_discid",
"----:com.apple.iTunes:MusicBrainz TRM Id": "musicbrainz_trmid",
"----:com.apple.iTunes:MusicBrainz Work Id": "musicbrainz_workid",
"----:com.apple.iTunes:MusicBrainz Release Group Id": "musicbrainz_releasegroupid",
"----:com.apple.iTunes:MusicBrainz Release Track Id": "musicbrainz_trackid",
"----:com.apple.iTunes:Acoustid Fingerprint": "acoustid_fingerprint",
"----:com.apple.iTunes:Acoustid Id": "acoustid_id",
"----:com.apple.iTunes:ASIN": "asin",
"----:com.apple.iTunes:BARCODE": "barcode",
"----:com.apple.iTunes:PRODUCER": "producer",
"----:com.apple.iTunes:LYRICIST": "lyricist",
"----:com.apple.iTunes:CONDUCTOR": "conductor",
"----:com.apple.iTunes:ENGINEER": "engineer",
"----:com.apple.iTunes:MIXER": "mixer",
"----:com.apple.iTunes:DJMIXER": "djmixer",
"----:com.apple.iTunes:REMIXER": "remixer",
"----:com.apple.iTunes:ISRC": "isrc",
"----:com.apple.iTunes:MEDIA": "media",
"----:com.apple.iTunes:LABEL": "label",
"----:com.apple.iTunes:LICENSE": "license",
"----:com.apple.iTunes:CATALOGNUMBER": "catalognumber",
"----:com.apple.iTunes:SUBTITLE": "subtitle",
"----:com.apple.iTunes:DISCSUBTITLE": "discsubtitle",
"----:com.apple.iTunes:MOOD": "mood",
"----:com.apple.iTunes:SCRIPT": "script",
"----:com.apple.iTunes:LANGUAGE": "language",
"----:com.apple.iTunes:ARTISTS": "artists",
"----:com.apple.iTunes:WORK": "work",
"----:com.apple.iTunes:initialkey": "key",
}
__r_freeform_tags = dict([(v, k) for k, v in __freeform_tags.items()])
__other_supported_tags = ("discnumber", "tracknumber",
"totaldiscs", "totaltracks")
def _load(self, filename):
log.debug("Loading file %r", filename)
file = MP4(encode_filename(filename))
tags = file.tags
if tags is None:
file.add_tags()
metadata = Metadata()
for name, values in tags.items():
if name in self.__text_tags:
for value in values:
metadata.add(self.__text_tags[name], value)
elif name in self.__bool_tags:
metadata.add(self.__bool_tags[name], values and '1' or '0')
elif name in self.__int_tags:
for value in values:
metadata.add(self.__int_tags[name], string_(value))
elif name in self.__freeform_tags:
for value in values:
value = value.decode("utf-8", "replace").strip("\x00")
metadata.add(self.__freeform_tags[name], value)
elif name == "----:com.apple.iTunes:fingerprint":
for value in values:
value = value.decode("utf-8", "replace").strip("\x00")
if value.startswith("MusicMagic Fingerprint"):
metadata.add("musicip_fingerprint", value[22:])
elif name == "trkn":
metadata["tracknumber"] = string_(values[0][0])
metadata["totaltracks"] = string_(values[0][1])
elif name == "disk":
metadata["discnumber"] = string_(values[0][0])
metadata["totaldiscs"] = string_(values[0][1])
elif name == "covr":
for value in values:
if value.imageformat not in (value.FORMAT_JPEG,
value.FORMAT_PNG):
continue
try:
coverartimage = TagCoverArtImage(
file=filename,
tag=name,
data=value,
)
except CoverArtImageError as e:
log.error('Cannot load image from %r: %s' %
(filename, e))
else:
metadata.append_image(coverartimage)
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
file = MP4(encode_filename(self.filename))
tags = file.tags
if tags is None:
file.add_tags()
if config.setting["clear_existing_tags"]:
tags.clear()
for name, values in metadata.rawitems():
if name.startswith('lyrics:'):
name = 'lyrics'
if name in self.__r_text_tags:
tags[self.__r_text_tags[name]] = values
elif name in self.__r_bool_tags:
tags[self.__r_bool_tags[name]] = (values[0] == '1')
elif name in self.__r_int_tags:
try:
tags[self.__r_int_tags[name]] = [int(value) for value in values]
except ValueError:
pass
elif name in self.__r_freeform_tags:
values = [v.encode("utf-8") for v in values]
tags[self.__r_freeform_tags[name]] = values
elif name == "musicip_fingerprint":
tags["----:com.apple.iTunes:fingerprint"] = [b"MusicMagic Fingerprint%s" % v.encode('ascii') for v in values]
if "tracknumber" in metadata:
if "totaltracks" in metadata:
tags["trkn"] = [(int(metadata["tracknumber"]),
int(metadata["totaltracks"]))]
else:
tags["trkn"] = [(int(metadata["tracknumber"]), 0)]
if "discnumber" in metadata:
if "totaldiscs" in metadata:
tags["disk"] = [(int(metadata["discnumber"]),
int(metadata["totaldiscs"]))]
else:
tags["disk"] = [(int(metadata["discnumber"]), 0)]
covr = []
for image in metadata.images_to_be_saved_to_tags:
if image.mimetype == "image/jpeg":
covr.append(MP4Cover(image.data, MP4Cover.FORMAT_JPEG))
elif image.mimetype == "image/png":
covr.append(MP4Cover(image.data, MP4Cover.FORMAT_PNG))
if covr:
tags["covr"] = covr
self._remove_deleted_tags(metadata, tags)
file.save()
def _remove_deleted_tags(self, metadata, tags):
"""Remove the tags from the file that were deleted in the UI"""
for tag in metadata.deleted_tags:
real_name = self._get_tag_name(tag)
if real_name and real_name in tags:
if tag not in ("totaltracks", "totaldiscs"):
del tags[real_name]
@classmethod
def supports_tag(cls, name):
return (name in cls.__r_text_tags
or name in cls.__r_bool_tags
or name in cls.__r_freeform_tags
or name in cls.__other_supported_tags
or name.startswith('lyrics:')
or name in ('~length', 'musicip_fingerprint'))
def _get_tag_name(self, name):
if name.startswith('lyrics:'):
return 'lyrics'
if name in self.__r_text_tags:
return self.__r_text_tags[name]
elif name in self.__r_bool_tags:
return self.__r_bool_tags[name]
elif name in self.__r_int_tags:
return self.__r_int_tags[name]
elif name in self.__r_freeform_tags:
return self.__r_freeform_tags[name]
elif name == "musicip_fingerprint":
return "----:com.apple.iTunes:fingerprint"
elif name in ("tracknumber", "totaltracks"):
return "trkn"
elif name in ("discnumber", "totaldiscs"):
return "disk"
else:
return None
def _info(self, metadata, file):
super()._info(metadata, file)
if hasattr(file.info, 'codec_description') and file.info.codec_description:
metadata['~format'] = "%s (%s)" % (metadata['~format'], file.info.codec_description)
| samj1912/picard | picard/formats/mp4.py | Python | gpl-2.0 | 10,938 |
#!/usr/bin/env python
#
# Copyright 2009 Mike Wakerly <[email protected]>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
from pykeg.core import importhacks
from pykeg.core import kegbot
__doc__ = kegbot.__doc__
if __name__ == '__main__':
kegbot.KegbotCoreApp.BuildAndRun()
| Alwnikrotikz/kegbot | pykeg/src/pykeg/bin/kegbot_core.py | Python | gpl-2.0 | 980 |
__author__ = 'const' | constd/spicy-brew | spicybrew/__init__.py | Python | gpl-2.0 | 20 |
#!/usr/bin/python
import pcap
import dpkt
import socket
import struct
import time
srcip = '10.24.0.109'
dstip = '10.21.2.192'
srcport = 54321
dstport = None
def checksum(data):
s = 0
n = len(data) % 2
for i in range(0, len(data)-n, 2):
s+= ord(data[i]) + (ord(data[i+1]) << 8)
if n:
s+= ord(data[i+1])
while (s >> 16):
s = (s & 0xFFFF) + (s >> 16)
s = ~s & 0xffff
return s
def make_ip(proto, srcip, dstip, ident=54321):
saddr = socket.inet_aton(srcip)
daddr = socket.inet_aton(dstip)
ihl_ver = (4 << 4) | 5
return struct.pack('!BBHHHBBH4s4s',ihl_ver, 0, 0,
ident, 0, 255, proto, 0, saddr, daddr)
def make_tcp(srcport, dstport, options='', seq=0, ackseq=1,
urg=False, ack=True, psh=False, rst=False, syn=False, fin=False,
window=14600):
if(options != ''):
leng = (len(options)+20) >> 2
offset_res = (leng << 4) | 0
else:
offset_res = (5 << 4) | 0
flags = (fin | (syn << 1) | (rst << 2) |
(psh <<3) | (ack << 4) | (urg << 5))
check = 0
urg_ptr = 0
tcp_header = struct.pack('!HHLLBBHHH' , srcport, dstport, seq, ackseq,
offset_res, flags, window, check, urg_ptr)
source_address = socket.inet_aton(srcip)
dest_address = socket.inet_aton(dstip)
placeholder = 0
protocol = socket.IPPROTO_TCP
tcp_length = len(tcp_header) + len(options)
psh = struct.pack('!4s4sBBH' , source_address , dest_address ,
placeholder , protocol , tcp_length);
psh = psh + tcp_header + options;
tcp_check = checksum(psh)
tcp_header = struct.pack('!HHLLBBH' , srcport, dstport, seq, ackseq,
offset_res, flags, window) + struct.pack('H' , tcp_check) \
+ struct.pack('H' , urg_ptr) + options
return tcp_header
def make_options(mss=1460, sack_perm = 2):
mss_kind = 2
mss_len = 4
nop = 1
sack_perm_kind = 4
options = struct.pack('!BBHBBBB', mss_kind, mss_len, mss, nop, nop,
sack_perm_kind, sack_perm)
return options
def make_sack_option(length, left1, right1, left2=0, right2=0):
nop = 1
sack_kind = 5
if (left2 == 0):
sack = struct.pack('!BBBBLL', nop, nop, sack_kind, length, left1, right1)
else:
sack = struct.pack('!BBBBLLLL', nop, nop, sack_kind, length, left1,
right1, left2, right2)
return sack
def make_ack(seq, ackseq):
ack = make_tcp(srcport, dstport, options, seq, ackseq)
return ack
def make_sack(seq, ackseq, sack):
sack = make_tcp(srcport, dstport, sack, seq, ackseq)
return sack
class pkt_info:
def __init__(self):
self.flag = None
self.seq = None
self.ack = None
self.leng = None
self.src_port = None
def handle_packet(pkt):
eth = dpkt.ethernet.Ethernet(pkt)
if eth.type != dpkt.ethernet.ETH_TYPE_IP:
return
ip = eth.data
if ip.p != dpkt.ip.IP_PROTO_TCP:
return
tcp = ip.data
data = tcp.data
src_port = tcp.sport
dst_port = tcp.dport
src_ip = socket.inet_ntoa(ip.src)
dst_ip = socket.inet_ntoa(ip.dst)
if dst_port == 54321:
p = pkt_info()
p.src_port = src_port
if tcp.flags & dpkt.tcp.TH_SYN:
p.flag = 'Y';
if tcp.flags & dpkt.tcp.TH_FIN:
p.flag = 'F'
if tcp.flags & dpkt.tcp.TH_RST:
p.flag = 'R'
p.seq = tcp.seq
p.ack = tcp.ack
p.leng = ip.len - ip.hl*4 - tcp.off*4
return p
return
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
s.bind((srcip, 54321))
ip = make_ip(socket.IPPROTO_TCP,srcip, dstip)
pkt_cnt = 0
seq_base = 0
last_seq = 0
last_ack = 0
retrans_state = 0
last_leng = 0
options = ''
cap = pcap.pcap()
for ts, pkt in cap:
pi = handle_packet(pkt)
if pi == None:
continue
dstport = pi.src_port
dst = ('10.21.2.192', dstport)
if(pi.flag == 'Y'):
syn_options = make_options()
seq_base = pi.seq
syn_ack = make_tcp(srcport, dstport, syn_options, seq_base, pi.seq+1, \
0, 1, 0, 0, 1, 0)
s.sendto(ip+syn_ack, dst)
last_ack = pi.seq+1
if(pi.leng > 0):
pkt_cnt += 1
seq_base = pi.ack
if(retrans_state == 1):
# send an new ack for the incoming pkt
if(pi.seq == last_ack):
ack = make_ack(seq_base, pi.seq+pi.leng)
s.sendto(ip+ack, dst)
last_ack = pi.seq + pi.leng
else:
# send an dup-ack
ack = make_ack(seq_base, last_ack)
s.sendto(ip+ack, dst)
elif(pi.seq < last_seq):
ack = make_ack(seq_base, pi.seq+pi.leng)
s.sendto(ip+ack, dst)
last_ack = pi.seq + pi.leng
retrans_state = 1
else:
if(pi.seq-seq_base < 14600):
ack = make_ack(seq_base, pi.seq+pi.leng)
s.sendto(ip+ack, dst)
last_ack = pi.seq + pi.leng
elif(pi.seq - seq_base < 20441):
sack_block = make_sack_option(10, 14600+seq_base, 16060+seq_base)
sack = make_sack(seq_base, last_ack, sack_block)
s.sendto(ip+sack, dst)
last_seq = pi.seq
last_leng = pi.leng
if(pi.flag == 'F'):
rst = make_tcp(srcport, dstport, options, seq_base, pi.seq+1, 0, 0, 0, 1 )
s.sendto(ip+rst, dst)
print 'reseting'
| dengqian/tapo_comp | raw_socket/sack.py | Python | gpl-2.0 | 5,609 |
#!/usr/bin/python3
import os
class TextTree(object):
def tostring(self, object):
strings = self.tostrings(object)
return ''.join(strings)
def tostrings(self, object):
strings = [object.name + '\n']
def add_item(item, ps1, ps2):
if isinstance(item, str):
strings.append(ps1 + item + '\n')
else:
substrings = self.tostrings(item) # recurs
strings.append(ps1 + substrings.pop(0))
strings.extend([ps2 + s for s in substrings])
items = list(object.items())
if items:
for i in range(len(items) - 1):
add_item(items[i], '|-- ', '| ')
add_item(items[-1], '`-- ', ' ')
return strings
class ModuleFile(object):
def __init__(self, file):
assert os.path.isfile(file), 'Could not find file: %s' % file
self.file = file
self.name = os.path.basename(file)[:-3]
self.classes = []
for line in open(self.file):
line = line.strip()
if line.startswith('class') and line.endswith(':'):
self.classes.append(line[5:-1].strip())
def items(self):
return self.classes[:]
class ModuleDir(ModuleFile):
def __init__(self, dir):
assert os.path.isdir(dir), 'Could not find dir: %s' % dir
ModuleFile.__init__(self, dir + '/__init__.py')
self.dir = dir
self.name = os.path.basename(dir)
self.modules = []
paths = [dir + '/' + p for p in os.listdir(dir) if not p.startswith('_')]
for file in [f for f in paths if f.endswith('.py')]:
self.modules.append(ModuleFile(file))
for subdir in [d for d in paths if os.path.isdir(d)]:
self.modules.append(ModuleDir(subdir))
self.modules.sort(key=lambda m: m.name)
def items(self):
items = ModuleFile.items(self)
items.extend(self.modules)
return items
if __name__ == '__main__':
dir = ModuleDir('./zim')
print(TextTree().tostring(dir))
| jaap-karssenberg/zim-desktop-wiki | tools/class_tree.py | Python | gpl-2.0 | 1,774 |
uiStrings = {'Random Access':'~/HM-16.2/cfg/encoder_randomaccess_main.cfg',
'Low Delay':'~/HM-16.2/cfg/encoder_lowdelay_main.cfg',
'BasketballPass':'~/HM-16.2/cfg/per-sequence/BasketballPass.cfg',
'BQMall':'~/HM-16.2/cfg/per-sequence/BQMall.cfg',
'8KB':'8192',
'16KB':'16384',
'32KB':'32768',
'4MB':'4194304',
'8MB':'8388608',
'16MB':'16777216'}
| anamativi/profilingCallsCacheHM | UI/dic.py | Python | gpl-2.0 | 346 |
from Components.Sources.Source import Source
from Components.Converter import ServiceName
from Components.config import config
from Screens.InfoBar import InfoBar, MoviePlayer
from enigma import eServiceReference, iPlayableServicePtr
class SwitchService(Source):
def __init__(self, session):
Source.__init__(self)
self.session = session
self.info = None
self.res = ( False, _("Obligatory parameter sRef is missing") )
def handleCommand(self, cmd):
self.res = self.switchService(cmd)
def switchService(self, cmd):
print "[SwitchService] ref=%s" % cmd["sRef"]
if config.plugins.Webinterface.allowzapping.value:
from Screens.Standby import inStandby
if inStandby == None:
if cmd["sRef"] != None:
pc = config.ParentalControl.configured.value
if pc:
config.ParentalControl.configured.value = False
eref = eServiceReference(cmd["sRef"])
if cmd["title"] is not None:
eref.setName(cmd["title"])
isRec = eref.getPath()
isRec = isRec and isRec.startswith("/")
if not isRec:
# if this is not a recording and the movie player is open, close it
if isinstance(self.session.current_dialog, MoviePlayer):
self.session.current_dialog.lastservice = eref
self.session.current_dialog.close()
self.session.nav.playService(eref)
elif isRec:
# if this is a recording and the infobar is shown, open the movie player
if isinstance(self.session.current_dialog, InfoBar):
self.session.open(MoviePlayer, eref)
# otherwise just play it with no regard for the context
else:
self.session.nav.playService(eref)
if pc:
config.ParentalControl.configured.value = pc
name = cmd["sRef"]
if cmd["title"] is None:
service = self.session.nav.getCurrentService()
info = None
if isinstance(service, iPlayableServicePtr):
info = service and service.info()
ref = None
if info != None:
name = ref and info.getName(ref)
if name is None:
name = info.getName()
name.replace('\xc2\x86', '').replace('\xc2\x87', '')
elif eref.getName() != "":
name = eref.getName()
return ( True, _("Active service is now '%s'") %name )
else:
return ( False, _("Obligatory parameter 'sRef' is missing") )
else:
return ( False, _("Cannot zap while device is in Standby") )
else:
return ( False, _("Zapping is disabled in WebInterface Configuration") )
result = property(lambda self: self.res)
| carlo0815/enigma2-plugins | webinterface/src/WebComponents/Sources/SwitchService.py | Python | gpl-2.0 | 2,511 |
"""
Encoding and encryption routines of ASCII strings to roman numerals
with some randomness thrown in.
Copyright (C) Anand B Pillai 2013.
LICENSE: GPL v2.
"""
import sys
import math
import random
import itertools
import operator
import uuid
numeral_map = zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
)
def int_to_roman(i):
result = []
for integer, numeral in numeral_map:
count = int(i / integer)
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def encode(instring, group=4):
""" Convert input string to roman numerals encoding """
random.seed(uuid.uuid4().hex)
bin_digits = (bin(ord(x)) for x in instring)
s = ''.join(x.zfill(8) for x in (y.replace('0b','') for y in bin_digits))
nums = [x for x in [s[i:i+group] for i in range(0, len(s), group)]]
romans = [int_to_roman(int(x, 2)) for x in nums]
roman_lens= ''.join(str(len(x)) for x in romans)
posns = sorted(random.sample(range(len(''.join(romans))), len(roman_lens)))
[romans.insert(x, roman_lens[i]) for i,x in enumerate(posns)]
output = ''.join(romans)
return output
def encrypt(instring, passwd):
""" Encrypt input string to roman numerals with a key """
random.seed(uuid.uuid4().hex)
bin_digits = (bin(ord(x)) for x in instring)
s = ''.join(x.zfill(8) for x in (y.replace('0b','') for y in bin_digits))
pass_digits = (bin(ord(x)) for x in passwd)
spass = ''.join(x.zfill(8) for x in (y.replace('0b','') for y in pass_digits))
seed = list(str(int(spass, 2)))
spass_p = (spass*int(math.ceil(len(s)*1.0/len(spass))))[:len(s)]
str_mod = ''.join(str(i) for i in itertools.imap(operator.xor,
map(lambda x: int(x), list(s)),
map(lambda x: int(x), list(spass_p))))
nums = [x for x in [str_mod[i:i+4] for i in range(0, len(str_mod), 4)]]
romans = [int_to_roman(int(x, 2)) for x in nums if x]
roman_lens= ''.join(str(len(x)) for x in romans)
a, b = len(''.join(romans)) + len(roman_lens), len(roman_lens)
posns = sorted(random.sample(range(a), b))
[romans.insert(x, roman_lens[i]) for i,x in enumerate(posns)]
enc = ''.join(romans)
return enc
if __name__ == "__main__":
instring = raw_input("Enter string: ").strip()
passwd = raw_input("Enter password: ").strip()
print encrypt(instring, passwd)
| pythonhacker/funcode | encode_roman.py | Python | gpl-2.0 | 2,584 |
#Copyright (C) 2002-2015 The Board of Regents of the University of Wisconsin System
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""This module implements a map of keywords that have been applied to multiple Episodes in a Series"""
__author__ = "David K. Woods <[email protected]>"
DEBUG = False
if DEBUG:
print "LibraryMap DEBUG is ON!!"
# import Python's os and sys modules
import os, sys
# import Python's platform module
import platform
# import Python's string module
import string
# load wxPython for GUI
import wx
# load the GraphicsControl
import GraphicsControlClass
# Load the Printout Class
from KeywordMapPrintoutClass import MyPrintout
# Import Transana's Database Interface
import DBInterface
# Import Transana's Dialogs
import Dialogs
# Import Transana's Filter Dialog
import FilterDialog
# import Transana's Keyword Object
import KeywordObject
# import Transana's Globals
import TransanaGlobal
# Import Transana's Images
import TransanaImages
# import Transana Miscellaneous functions
import Misc
# Declare Control IDs
# Menu Item and Toolbar Item for File > Filter
M_FILE_FILTER = wx.NewId()
T_FILE_FILTER = wx.NewId()
# Menu Item and Toolbar Item for File > Save As
M_FILE_SAVEAS = wx.NewId()
T_FILE_SAVEAS = wx.NewId()
# Menu Item and Toolbar Item for File > Printer Setup
M_FILE_PRINTSETUP = wx.NewId()
T_FILE_PRINTSETUP = wx.NewId()
# Menu Item and Toolbar Item for File > Print Preview
M_FILE_PRINTPREVIEW = wx.NewId()
T_FILE_PRINTPREVIEW = wx.NewId()
# Menu Item and Toolbar Item for File > Print
M_FILE_PRINT = wx.NewId()
T_FILE_PRINT = wx.NewId()
# Menu Item and Toolbar Item for File > Exit
M_FILE_EXIT = wx.NewId()
T_FILE_EXIT = wx.NewId()
# Menu Item and Toolbar Item for Help > Help
M_HELP_HELP = wx.NewId()
T_HELP_HELP = wx.NewId()
# Series List Combo Box
ID_SERIESLIST = wx.NewId()
# Episode List Combo Box
ID_EPISODELIST = wx.NewId()
class LibraryMap(wx.Frame):
""" This is the main class for the Series Map application. """
def __init__(self, parent, title, seriesNum, seriesName, reportType, controlObject=None):
# reportType 1 is the Sequence Mode, showing relative position of keywords in the Episodes
# reportType 2 is the Bar Graph mode, showing a bar graph of total time for each keyword
# reportType 3 is the Percentage mode, showing percentage of total Episode length for each keyword
# Set the Cursor to the Hourglass while the report is assembled
TransanaGlobal.menuWindow.SetCursor(wx.StockCursor(wx.CURSOR_WAIT))
# It's always important to remember your ancestors.
self.parent = parent
# Remember the title
self.title = title
# Initialize the Report Number
self.reportNumber = 0
# Remember the Report Type
self.reportType = reportType
# Let's remember the Control Object, if one is passed in
self.ControlObject = controlObject
# If a Control Object has been passed in ...
if self.ControlObject != None:
# ... register this report with the Control Object (which adds it to the Windows Menu)
self.ControlObject.AddReportWindow(self)
# Create a connection to the database
DBConn = DBInterface.get_db()
# Create a cursor and execute the appropriate query
self.DBCursor = DBConn.cursor()
# Determine the screen size for setting the initial dialog size
rect = wx.Display(TransanaGlobal.configData.primaryScreen).GetClientArea() # wx.ClientDisplayRect()
width = rect[2] * .80
height = rect[3] * .80
# Create the basic Frame structure with a white background
self.frame = wx.Frame.__init__(self, parent, -1, title, pos=(10, 10), size=wx.Size(width, height), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL | wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetBackgroundColour(wx.WHITE)
# Set the icon
transanaIcon = wx.Icon(os.path.join(TransanaGlobal.programDir, "images", "Transana.ico"), wx.BITMAP_TYPE_ICO)
self.SetIcon(transanaIcon)
# Initialize Media Length to 0
self.MediaLength = 0
# Initialize all the data Lists to empty
self.episodeList = []
self.filteredEpisodeList = []
self.clipList = []
self.clipFilterList = []
self.snapshotList = []
self.snapshotFilterList = []
self.unfilteredKeywordList = []
self.filteredKeywordList = []
# To be able to show only parts of an Episode Time Line, we need variables for the time boundaries.
self.startTime = 0
self.endTime = 0
self.keywordClipList = {}
self.configName = ''
# Initialize variables required to avoid crashes when the visualization has been cleared
self.graphicindent = 0
self.Bounds = [1, 1, 1, 1]
# Create a dictionary of the colors for each keyword.
self.keywordColors = {'lastColor' : -1}
# Get the Configuration values for the Series Map Options
self.barHeight = TransanaGlobal.configData.seriesMapBarHeight
self.whitespaceHeight = TransanaGlobal.configData.seriesMapWhitespace
self.hGridLines = TransanaGlobal.configData.seriesMapHorizontalGridLines
self.vGridLines = TransanaGlobal.configData.seriesMapVerticalGridLines
self.singleLineDisplay = TransanaGlobal.configData.singleLineDisplay
self.showLegend = TransanaGlobal.configData.showLegend
# We default to Color Output. When this was configurable, if a new Map was
# created in B & W, the colors never worked right afterwards.
self.colorOutput = True
# Get the number of lines per page for multi-page reports
self.linesPerPage = 66
# If we have a Series Keyword Sequence Map in multi-line mode ...
if (self.reportType == 1) and (not self.singleLineDisplay):
# ... initialize the Episode Name Keyword Lookup Table here.
self.epNameKWGKWLookup = {}
# Initialize the Episode Counter, used for vertical placement.
self.episodeCount= 0
# We need to be able to look up Episode Lengths for the Bar Graph. Let's remember them.
self.episodeLengths = {}
# Remember the appropriate Episode information
self.seriesNum = seriesNum
self.seriesName = seriesName
# indicate that we're not working from a Clip. (The Series Maps are never Clip-based.)
self.clipNum = None
# You can't have a separate menu on the Mac, so we'll use a Toolbar
self.toolBar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_TEXT)
self.toolBar.AddTool(T_FILE_FILTER, TransanaImages.ArtProv_LISTVIEW.GetBitmap(), shortHelpString=_("Filter"))
self.toolBar.AddTool(T_FILE_SAVEAS, TransanaImages.SaveJPG16.GetBitmap(), shortHelpString=_('Save As'))
self.toolBar.AddTool(T_FILE_PRINTSETUP, TransanaImages.PrintSetup.GetBitmap(), shortHelpString=_('Set up Page'))
# Disable Print Setup for Right-To-Left languages
# if (TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft):
# self.toolBar.EnableTool(T_FILE_PRINTSETUP, False)
self.toolBar.AddTool(T_FILE_PRINTPREVIEW, TransanaImages.PrintPreview.GetBitmap(), shortHelpString=_('Print Preview'))
# Disable Print Preview on the PPC Mac and for Right-To-Left languages
if (platform.processor() == 'powerpc') or (TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft):
self.toolBar.EnableTool(T_FILE_PRINTPREVIEW, False)
self.toolBar.AddTool(T_FILE_PRINT, TransanaImages.Print.GetBitmap(), shortHelpString=_('Print'))
# Disable Print Setup for Right-To-Left languages
# if (TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft):
# self.toolBar.EnableTool(T_FILE_PRINT, False)
# create a bitmap button for the Move Down button
self.toolBar.AddTool(T_HELP_HELP, TransanaImages.ArtProv_HELP.GetBitmap(), shortHelpString=_("Help"))
self.toolBar.AddTool(T_FILE_EXIT, TransanaImages.Exit.GetBitmap(), shortHelpString=_('Exit'))
self.toolBar.Realize()
# Let's go ahead and keep the menu for non-Mac platforms
if not '__WXMAC__' in wx.PlatformInfo:
# Add a Menu Bar
menuBar = wx.MenuBar() # Create the Menu Bar
self.menuFile = wx.Menu() # Create the File Menu
self.menuFile.Append(M_FILE_FILTER, _("&Filter"), _("Filter report contents")) # Add "Filter" to File Menu
self.menuFile.Append(M_FILE_SAVEAS, _("Save &As"), _("Save image in JPEG format")) # Add "Save As" to File Menu
self.menuFile.Enable(M_FILE_SAVEAS, False)
self.menuFile.Append(M_FILE_PRINTSETUP, _("Page Setup"), _("Set up Page")) # Add "Printer Setup" to the File Menu
# Disable Print Setup for Right-To-Left languages
# if (TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft):
# self.menuFile.Enable(M_FILE_PRINTSETUP, False)
self.menuFile.Append(M_FILE_PRINTPREVIEW, _("Print Preview"), _("Preview your printed output")) # Add "Print Preview" to the File Menu
self.menuFile.Enable(M_FILE_PRINTPREVIEW, False)
self.menuFile.Append(M_FILE_PRINT, _("&Print"), _("Send your output to the Printer")) # Add "Print" to the File Menu
self.menuFile.Enable(M_FILE_PRINT, False)
self.menuFile.Append(M_FILE_EXIT, _("E&xit"), _("Exit the Series Map program")) # Add "Exit" to the File Menu
menuBar.Append(self.menuFile, _('&File')) # Add the File Menu to the Menu Bar
self.menuHelp = wx.Menu()
self.menuHelp.Append(M_HELP_HELP, _("&Help"), _("Help"))
menuBar.Append(self.menuHelp, _("&Help"))
self.SetMenuBar(menuBar) # Connect the Menu Bar to the Frame
# Link menu items and toolbar buttons to the appropriate methods
wx.EVT_MENU(self, M_FILE_FILTER, self.OnFilter) # Attach File > Filter to a method
wx.EVT_MENU(self, T_FILE_FILTER, self.OnFilter) # Attach Toolbar Filter to a method
wx.EVT_MENU(self, M_FILE_SAVEAS, self.OnSaveAs) # Attach File > Save As to a method
wx.EVT_MENU(self, T_FILE_SAVEAS, self.OnSaveAs) # Attach Toolbar Save As to a method
wx.EVT_MENU(self, M_FILE_PRINTSETUP, self.OnPrintSetup) # Attach File > Print Setup to a method
wx.EVT_MENU(self, T_FILE_PRINTSETUP, self.OnPrintSetup) # Attach Toolbar Print Setup to a method
wx.EVT_MENU(self, M_FILE_PRINTPREVIEW, self.OnPrintPreview) # Attach File > Print Preview to a method
wx.EVT_MENU(self, T_FILE_PRINTPREVIEW, self.OnPrintPreview) # Attach Toolbar Print Preview to a method
wx.EVT_MENU(self, M_FILE_PRINT, self.OnPrint) # Attach File > Print to a method
wx.EVT_MENU(self, T_FILE_PRINT, self.OnPrint) # Attach Toolbar Print to a method
wx.EVT_MENU(self, M_FILE_EXIT, self.CloseWindow) # Attach CloseWindow to File > Exit
wx.EVT_MENU(self, T_FILE_EXIT, self.CloseWindow) # Attach CloseWindow to Toolbar Exit
wx.EVT_MENU(self, M_HELP_HELP, self.OnHelp)
wx.EVT_MENU(self, T_HELP_HELP, self.OnHelp)
# Bind the form's EVT_CLOSE method
self.Bind(wx.EVT_CLOSE, self.OnClose)
# Determine the window boundaries
(w, h) = self.GetClientSizeTuple()
self.Bounds = (5, 5, w - 10, h - 25)
# Create the Graphic Area using the GraphicControlClass
# NOTE: EVT_LEFT_DOWN, EVT_LEFT_UP, and EVT_RIGHT_UP are caught in GraphicsControlClass and are passed to this routine's
# OnLeftDown and OnLeftUp (for both left and right) methods because of the "passMouseEvents" paramter
self.graphic = GraphicsControlClass.GraphicsControl(self, -1, wx.Point(self.Bounds[0], self.Bounds[1]),
(self.Bounds[2] - self.Bounds[0], self.Bounds[3] - self.Bounds[1]),
(self.Bounds[2] - self.Bounds[0], self.Bounds[3] - self.Bounds[1]),
passMouseEvents=True)
# Add a Status Bar
self.CreateStatusBar()
# Attach the Resize Event
wx.EVT_SIZE(self, self.OnSize)
# We'll detect mouse movement in the GraphicsControlClass from out here, as
# the KeywordMap object is the object that knows what the data is on the graphic.
self.graphic.Bind(wx.EVT_MOTION, self.OnMouseMotion)
# Prepare objects for use in Printing
self.printData = wx.PrintData()
self.printData.SetPaperId(wx.PAPER_LETTER)
# Center on the screen
TransanaGlobal.CenterOnPrimary(self)
# Show the Frame
self.Show(True)
# Populate the drawing
self.ProcessSeries()
self.DrawGraph()
# Trigger the load of the Default Filter, if one exists. An event of None signals we're loading the
# Default config, an dhte OnFilter method will handle drawing the graph!
self.OnFilter(None)
# Restore Cursor to Arrow
TransanaGlobal.menuWindow.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# Define the Method that implements Filter
def OnFilter(self, event):
""" Implement the Filter Dialog call for Series Maps """
if event == None:
loadDefault = True
else:
loadDefault = False
# Set the Cursor to the Hourglass while the report is assembled
TransanaGlobal.menuWindow.SetCursor(wx.StockCursor(wx.CURSOR_WAIT))
# Set up parameters for creating the Filter Dialog. Series Map Filter requires Series Number (as episodeNum) for the Config Save.
title = string.join([self.title, unicode(_("Filter Dialog"), 'utf8')], ' ')
# See if the Series Map wants the Clip Filter
clipFilter = (len(self.clipFilterList) > 0)
# See if there are Snapshots in the Snapshot Filter List
snapshotFilter = (len(self.snapshotFilterList) > 0)
# See if there are Keywords in the Filter List
keywordFilter = (len(self.unfilteredKeywordList) > 0)
# Series Map wants Keyword Color customization if it has keywords.
keywordColors = (len(self.unfilteredKeywordList) > 0)
# We want the Options tab
options = True
# reportType=5 indicates it is for a Series Sequence Map.
# reportType=6 indicates it is for a Series Bar Graph.
# reportType=7 indicates it is for a Series Percentage Map
reportType = self.reportType + 4
# The Series Keyword Sequence Map has all the usual parameters plus Time Range data and the Single Line Display option
if self.reportType in [1]:
# Create a Filter Dialog, passing all the necessary parameters.
dlgFilter = FilterDialog.FilterDialog(self,
-1,
title,
reportType=reportType,
loadDefault=loadDefault,
configName=self.configName,
reportScope=self.seriesNum,
episodeFilter=True,
episodeSort=True,
clipFilter=clipFilter,
snapshotFilter=snapshotFilter,
keywordFilter=keywordFilter,
keywordSort=True,
keywordColor=keywordColors,
options=options,
startTime=self.startTime,
endTime=self.endTime,
barHeight=self.barHeight,
whitespace=self.whitespaceHeight,
hGridLines=self.hGridLines,
vGridLines=self.vGridLines,
singleLineDisplay=self.singleLineDisplay,
showLegend=self.showLegend,
colorOutput=self.colorOutput)
elif self.reportType in [2, 3]:
# Create a Filter Dialog, passing all the necessary parameters.
dlgFilter = FilterDialog.FilterDialog(self,
-1,
title,
reportType=reportType,
loadDefault=loadDefault,
configName=self.configName,
reportScope=self.seriesNum,
episodeFilter=True,
episodeSort=True,
clipFilter=clipFilter,
snapshotFilter=snapshotFilter,
keywordFilter=keywordFilter,
keywordSort=True,
keywordColor=keywordColors,
options=options,
barHeight=self.barHeight,
whitespace=self.whitespaceHeight,
hGridLines=self.hGridLines,
vGridLines=self.vGridLines,
showLegend=self.showLegend,
colorOutput=self.colorOutput)
# Sort the Episode List
self.episodeList.sort()
# Inform the Filter Dialog of the Episodes
dlgFilter.SetEpisodes(self.episodeList)
# If we requested the Clip Filter ...
if clipFilter:
# We want the Clips sorted in Clip ID order in the FilterDialog. We handle that out here, as the Filter Dialog
# has to deal with manual clip ordering in some instances, though not here, so it can't deal with this.
self.clipFilterList.sort()
# Inform the Filter Dialog of the Clips
dlgFilter.SetClips(self.clipFilterList)
# if there are Snapshots ...
if snapshotFilter:
# ... populate the Filter Dialog with Snapshots
dlgFilter.SetSnapshots(self.snapshotFilterList)
# Keyword Colors must be specified before Keywords! So if we want Keyword Colors, ...
if keywordColors:
# If we're in grayscale mode, the colors are probably mangled, so let's fix them before
# we send them to the Filter dialog.
if not self.colorOutput:
# A shallow copy of the dictionary object should get the job done.
self.keywordColors = self.rememberedKeywordColors.copy()
# Inform the Filter Dialog of the colors used for each Keyword
dlgFilter.SetKeywordColors(self.keywordColors)
if keywordFilter:
# Inform the Filter Dialog of the Keywords
dlgFilter.SetKeywords(self.unfilteredKeywordList)
# Set the Cursor to the Arrow now that the filter dialog is assembled
TransanaGlobal.menuWindow.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# Create a dummy error message to get our while loop started.
errorMsg = 'Start Loop'
# Keep trying as long as there is an error message
while errorMsg != '':
# Clear the last (or dummy) error message.
errorMsg = ''
if loadDefault:
# ... get the list of existing configuration names.
profileList = dlgFilter.GetConfigNames()
# If (translated) "Default" is in the list ...
# (NOTE that the default config name is stored in English, but gets translated by GetConfigNames!)
if unicode(_('Default'), 'utf8') in profileList:
# ... then signal that we need to load the config.
dlgFilter.OnFileOpen(None)
# Fake that we asked the user for a filter name and got an OK
result = wx.ID_OK
# If we're loading a Default profile, but there's none in the list, we can skip
# the rest of the Filter method by pretending we got a Cancel from the user.
else:
result = wx.ID_CANCEL
# If we're not loading a Default profile ...
else:
# ... we need to show the Filter Dialog here.
result = dlgFilter.ShowModal()
# Show the Filter Dialog and see if the user clicks OK
if result == wx.ID_OK:
# Get the Episode Data from the Filter Dialog
self.episodeList = dlgFilter.GetEpisodes()
# If we requested Clip Filtering ...
if clipFilter:
# ... then get the filtered clip data
self.clipFilterList = dlgFilter.GetClips()
if snapshotFilter:
self.snapshotFilterList = dlgFilter.GetSnapshots()
# Get the complete list of keywords from the Filter Dialog. We'll deduce the filter info in a moment.
# (This preserves the "check" info for later reuse.)
self.unfilteredKeywordList = dlgFilter.GetKeywords()
# If we requested Keyword Color data ...
if keywordColors:
# ... then get the keyword color data from the Filter Dialog
self.keywordColors = dlgFilter.GetKeywordColors()
# Reset the Filtered Keyword List
self.filteredKeywordList = []
# Iterate through the entire Keword List ...
for (kwg, kw, checked) in self.unfilteredKeywordList:
# ... and determine which keywords were checked.
if checked:
# Only the checked ones go into the filtered keyword list.
self.filteredKeywordList.append((kwg, kw))
# If we had an Options Tab, extract that data.
if options:
# Only the Series Keyword Sequence Map needs the Time Range options.
if self.reportType in [1]:
# Let's get the Time Range data.
# Start Time must be 0 or greater. Otherwise, don't change it!
if Misc.time_in_str_to_ms(dlgFilter.GetStartTime()) >= 0:
self.startTime = Misc.time_in_str_to_ms(dlgFilter.GetStartTime())
else:
errorMsg += _("Illegal value for Start Time.\n")
# If the Start Time is greater than the media length, reset it to 0.
if self.startTime >= self.MediaLength:
dlgFilter.startTime.SetValue(Misc.time_in_ms_to_str(0))
errorMsg += _("Illegal value for Start Time.\n")
# End Time must be at least 0. Otherwise, don't change it!
if (Misc.time_in_str_to_ms(dlgFilter.GetEndTime()) >= 0):
self.endTime = Misc.time_in_str_to_ms(dlgFilter.GetEndTime())
else:
errorMsg += _("Illegal value for End Time.\n")
# If the end time is 0 or greater than the media length, set it to the media length.
if (self.endTime == 0) or (self.endTime > self.MediaLength):
self.endTime = self.MediaLength
# Start time cannot equal end time (but this check must come after setting endtime == 0 to MediaLength)
if self.startTime == self.endTime:
errorMsg += _("Start Time and End Time must be different.")
# We need to alter the time values to prevent "division by zero" errors while the Filter Dialog is not modal.
self.startTime = 0
self.endTime = self.MediaLength
# If the Start Time is greater than the End Time, swap them.
if (self.endTime < self.startTime):
temp = self.startTime
self.startTime = self.endTime
self.endTime = temp
# Get the Bar Height and Whitespace Height for all versions of the Series Map
self.barHeight = dlgFilter.GetBarHeight()
self.whitespaceHeight = dlgFilter.GetWhitespace()
# we need to store the Bar Height and Whitespace values in the Configuration.
TransanaGlobal.configData.seriesMapBarHeight = self.barHeight
TransanaGlobal.configData.seriesMapWhitespace = self.whitespaceHeight
# Get the Grid Line data from the form
self.hGridLines = dlgFilter.GetHGridLines()
self.vGridLines = dlgFilter.GetVGridLines()
# Store the Grid Line data in the Configuration
TransanaGlobal.configData.seriesMapHorizontalGridLines = self.hGridLines
TransanaGlobal.configData.seriesMapVerticalGridLines = self.vGridLines
# Only the Series Keyword Sequence Graph needs the Single Line Display Option data.
if self.reportType in [1]:
# Get the singleLineDisplay value from the dialog
self.singleLineDisplay = dlgFilter.GetSingleLineDisplay()
# Remember the value.
TransanaGlobal.configData.singleLineDisplay = self.singleLineDisplay
# Get the showLegend value from the dialog
self.showLegend = dlgFilter.GetShowLegend()
# Remember the value. (This doesn't get saved.)
TransanaGlobal.configData.showLegend = self.showLegend
# Detect if the colorOutput value is actually changing.
if (self.colorOutput != dlgFilter.GetColorOutput()):
# If we're going from color to grayscale ...
if self.colorOutput:
# ... remember what the colors were before they get all screwed up by displaying
# the graphic without them.
self.rememberedKeywordColors = {}
self.rememberedKeywordColors = self.keywordColors.copy()
# Get the colorOutput value from the dialog
self.colorOutput = dlgFilter.GetColorOutput()
if errorMsg != '':
errorDlg = Dialogs.ErrorDialog(self, errorMsg)
errorDlg.ShowModal()
errorDlg.Destroy()
# Remember the configuration name for later reuse
self.configName = dlgFilter.configName
# Destroy the Filter Dialog. We're done with it.
dlgFilter.Destroy()
# Now we can draw the graph.
self.DrawGraph()
# Define the Method that implements Save As
def OnSaveAs(self, event):
self.graphic.SaveAs()
# Define the Method that implements Printer Setup
def OnPrintSetup(self, event):
# Let's use PAGE Setup here ('cause you can do Printer Setup from Page Setup.) It's a better system
# that allows Landscape on Mac.
pageSetupDialogData = wx.PageSetupDialogData(self.printData)
pageSetupDialogData.CalculatePaperSizeFromId()
pageDialog = wx.PageSetupDialog(self, pageSetupDialogData)
pageDialog.ShowModal()
self.printData = wx.PrintData(pageDialog.GetPageSetupData().GetPrintData())
pageDialog.Destroy()
# Define the Method that implements Print Preview
def OnPrintPreview(self, event):
lineHeight = self.CalcY(1) - self.CalcY(0)
printout = MyPrintout(self.title, self.graphic, multiPage=True, lineStart=self.CalcY(0) - int(lineHeight / 2.0), lineHeight=lineHeight)
printout2 = MyPrintout(self.title, self.graphic, multiPage=True, lineStart=self.CalcY(0) - int(lineHeight / 2.0), lineHeight=lineHeight)
self.preview = wx.PrintPreview(printout, printout2, self.printData)
if not self.preview.Ok():
self.SetStatusText(_("Print Preview Problem"))
return
theWidth = max(wx.Display(TransanaGlobal.configData.primaryScreen).GetClientArea()[2] - 180, 760) # wx.ClientDisplayRect()
theHeight = max(wx.Display(TransanaGlobal.configData.primaryScreen).GetClientArea()[3] - 200, 560) # wx.ClientDisplayRect()
frame2 = wx.PreviewFrame(self.preview, self, _("Print Preview"), size=(theWidth, theHeight))
frame2.Centre()
frame2.Initialize()
frame2.Show(True)
# Define the Method that implements Print
def OnPrint(self, event):
pdd = wx.PrintDialogData()
pdd.SetPrintData(self.printData)
printer = wx.Printer(pdd)
lineHeight = self.CalcY(1) - self.CalcY(0)
printout = MyPrintout(self.title, self.graphic, multiPage=True, lineStart=self.CalcY(0) - int(lineHeight / 2.0), lineHeight=lineHeight)
if not printer.Print(self, printout):
dlg = Dialogs.ErrorDialog(None, _("There was a problem printing this report."))
dlg.ShowModal()
dlg.Destroy()
# NO! REMOVED to prevent crash on 2nd print attempt following Filter Config.
# else:
# self.printData = printer.GetPrintDialogData().GetPrintData()
printout.Destroy()
def OnClose(self, event):
""" Handle the Close Event """
# If the report has a defined Control Object ...
if self.ControlObject != None:
# ... remove this report from the Menu Window's Window Menu
self.ControlObject.RemoveReportWindow(self.title, self.reportNumber)
# Inherit the parent Close event so things will, you know, close.
event.Skip()
# Define the Method that closes the Window on File > Exit
def CloseWindow(self, event):
# Close!
self.Close()
def OnHelp(self, event):
""" Implement the Filter Dialog Box's Help function """
# Define the Help Context
HelpContext = "Library Keyword Graphs"
# If a Help Window is defined ...
if TransanaGlobal.menuWindow != None:
# ... call Help!
TransanaGlobal.menuWindow.ControlObject.Help(HelpContext)
def OnSize(self, event):
""" Handle Resize Events by resizing the Graphic Control and redrawing the graphic """
(w, h) = self.GetClientSizeTuple()
if self.Bounds[1] == 5:
self.Bounds = (5, 5, w - 10, h - 25)
else:
self.Bounds = (5, 40, w - 10, h - 30)
self.DrawGraph()
def CalcX(self, XPos):
""" Determine the proper horizontal coordinate for the given time """
# We need to start by defining the legal range for the type of graph we're working with.
# The Sequence Map is tied to the start and end time variables.
if self.reportType == 1:
startVal = self.startTime
endVal = self.endTime
# The Bar Graph stretches from 0 to the time line Maximum variable
elif self.reportType == 2:
startVal = 0.0
if self.timelineMax == 0:
endVal = 1
else:
endVal = self.timelineMax
# The Percentage Graph ranges from 0 to 100!
elif self.reportType == 3:
startVal = 0.0
endVal = 100.0
# Specify a margin width
marginwidth = (0.06 * (self.Bounds[2] - self.Bounds[0]))
# The Horizonal Adjustment is the global graphic indent
hadjust = self.graphicindent
# The Scaling Factor is the active portion of the drawing area width divided by the total media length
# The idea is to leave the left margin, self.graphicindent for Keyword Labels, and the right margin
if self.MediaLength > 0:
scale = (float(self.Bounds[2]) - self.Bounds[0] - hadjust - 2 * marginwidth) / (endVal - startVal)
else:
scale = 0.0
# The horizontal coordinate is the left margin plus the Horizontal Adjustment for Keyword Labels plus
# position times the scaling factor
res = marginwidth + hadjust + ((XPos - startVal) * scale)
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
# ... adjust for a right-to-left graph
return int(self.Bounds[2] - self.Bounds[0] - res)
# If we are in a Left-To-Right language ...
else:
# ... just return the calculated value
return int(res)
def FindTime(self, x):
""" Given a horizontal pixel position, determine the corresponding time value from
the video time line """
# determine the margin width
marginwidth = (0.06 * (self.Bounds[2] - self.Bounds[0]))
# The Horizonal Adjustment is the global graphic indent
hadjust = self.graphicindent
# The Scaling Factor is the active portion of the drawing area width divided by the total media length
# The idea is to leave the left margin, self.graphicindent for Keyword Labels, and the right margin
if self.MediaLength > 0:
scale = (float(self.Bounds[2]) - self.Bounds[0] - hadjust - 2 * marginwidth) / (self.endTime - self.startTime)
else:
scale = 1.0
# The time is calculated by taking the total width, subtracting the margin values and horizontal indent,
# and then dividing the result by the scale factor calculated above
time = int((x - marginwidth - hadjust) / scale) + self.startTime
return time
def CalcY(self, YPos):
""" Determine the vertical position for a given keyword index """
# Spacing is the larger of (12 pixels for label text or the bar height) plus 2 for whitespace
spacing = max(12, self.barHeight) + self.whitespaceHeight
# Top margin is 30 for titles plus 28 for the timeline
topMargin = 30 + (2 * spacing)
return int(spacing * YPos + topMargin)
def FindKeyword(self, y):
""" Given a vertical pixel position, determine the corresponding Keyword data """
# NOTE: This method is only valid if self.reportType == 1, the Sequence Map.
# Other variations of the Series maps may use different key values for the dictionary.
if self.reportType != 1:
return None
# If the graphic is scrolled, the raw Y value does not point to the correct Keyword.
# Determine the unscrolled equivalent Y position.
(modX, modY) = self.graphic.CalcUnscrolledPosition(0, y)
# Now we need to get the keys for the Lookup Dictionary
keyVals = self.epNameKWGKWLookup.keys()
# We need the keys to be in order, so we can quit when we've found what we're looking for.
keyVals.sort()
# The single-line display and the multi-line display handle the lookup differently, of course.
# Let's start with the single-line display.
if self.singleLineDisplay:
# Initialize the return value to None in case nothing is found. The single-line version expects an Episode Name.
returnVal = None
# We also need a temporary value initialized to None. Our data structure returns complex data, from which we
# extract the desired value.
tempVal = None
# Iterate through the sorted keys. The keys are actually y values for the graph!
for yVal in keyVals:
# If we find a key value that is smaller than the unscrolled Graphic y position ...
if yVal <= modY:
# ... then we've found a candidate for what we're looking for. But we keep iterating,
# because we want the LARGEST yVal that's smaller than the graphic y value.
tempVal = self.epNameKWGKWLookup[yVal]
# Once our y values are too large ...
else:
# ... we should stop iterating through the (sorted) keys.
break
# If we found a valid data structure ...
if tempVal != None:
# ... we can extract the Episode name by looking at the first value of the first value of the first key.
returnVal = tempVal[tempVal.keys()[0]][0][0]
# Here, we handle the multi-line display of the Sequence Map.
else:
# Initialize the return value to a tuple of three Nones in case nothing is found.
# The multi-line version expects an Episode Name, Keyword Group, Keyword tuple.
returnVal = (None, None, None)
# Iterate through the sorted keys. The keys are actually y values for the graph!
for yVal in keyVals:
# If we find a key value that is smaller than the unscrolled Graphic y position ...
if yVal <= modY:
# ... then we've found a candidate for what we're looking for. But we keep iterating,
# because we want the LARGEST yVal that's smaller than the graphic y value.
returnVal = self.epNameKWGKWLookup[yVal]
# Once our y values are too large ...
else:
# ... we should stop iterating through the (sorted) keys.
break
# Return the value we found, or None
return returnVal
def GetScaleIncrements(self, MediaLength):
# The general rule is to try to get logical interval sizes with 8 or fewer time increments.
# You always add a bit (20% at the lower levels) of the time interval to the MediaLength
# because the final time is placed elsewhere and we don't want overlap.
# This routine covers from 1 second to 18 hours in length.
# media Length of 9 seconds or less = 1 second intervals
if MediaLength < 9001:
Num = int(round((MediaLength + 200) / 1000.0))
Interval = 1000
# media length of 18 seconds or less = 2 second intervals
elif MediaLength < 18001:
Num = int(round((MediaLength + 400) / 2000.0))
Interval = 2000
# media length of 30 seconds or less = 5 second intervals
elif MediaLength < 30001:
Num = int(round((MediaLength + 2000) / 5000.0))
Interval = 5000
# media length of 50 seconds or less = 5 second intervals
elif MediaLength < 50001:
Num = int(round((MediaLength + 1000) / 5000.0))
Interval = 5000
# media Length of 1:30 or less = 10 second intervals
elif MediaLength < 90001:
Num = int(round((MediaLength + 2000) / 10000.0))
Interval = 10000
# media length of 2:50 or less = 20 second intervals
elif MediaLength < 160001:
Num = int(round((MediaLength + 4000) / 20000.0))
Interval = 20000
# media length of 4:30 or less = 30 second intervals
elif MediaLength < 270001:
Num = int(round((MediaLength + 6000) / 30000.0))
Interval = 30000
# media length of 6:00 or less = 60 second intervals
elif MediaLength < 360001:
Num = int(round((MediaLength + 12000) / 60000.0))
Interval = 60000
# media length of 10:00 or less = 60 second intervals
elif MediaLength < 600001:
Num = int(round((MediaLength + 8000) / 60000.0))
Interval = 60000
# media length of 16:00 or less = 2 minute intervals
elif MediaLength < 960001:
Num = int(round((MediaLength + 24000) / 120000.0))
Interval = 120000
# media length of 40:00 or less = 5 minute intervals
elif MediaLength < 2400001:
Num = int(round((MediaLength + 60000) / 300000.0))
Interval = 300000
# media length if 1:10:00 or less get 10 minute intervals
elif MediaLength < 4200001:
Num = int(round((MediaLength + 80000) / 600000.0))
Interval = 600000
# media length if 3:00:00 or less get 30 minute intervals
elif MediaLength < 10800001:
Num = int(round((MediaLength + 240000) / 1800000.0))
Interval = 1800000
# media length if 4:00:00 or less get 30 minute intervals
elif MediaLength < 14400001:
Num = int(round((MediaLength + 60000) / 1800000.0))
Interval = 1800000
# media length if 9:00:00 or less get 60 minute intervals
elif MediaLength < 32400001:
Num = int(round((MediaLength + 120000) / 3600000.0))
Interval = 3600000
# Longer videos get 2 hour intervals
else:
Num = int(round((MediaLength + 240000) / 7200000.0))
Interval = 7200000
return Num, Interval
def ProcessSeries(self):
# Initialize Media Length to 0
self.MediaLength = 0
# Initialize all the data Lists to empty
self.episodeList = []
self.filteredEpisodeList = []
self.clipList = []
self.clipFilterList = []
self.snapshotList = []
self.snapshotFilterList = []
self.unfilteredKeywordList = []
self.filteredKeywordList = []
if self.reportType == 2:
epLengths = {}
# Get Series Number, Episode Number, Media File Name, and Length
SQLText = """SELECT e.EpisodeNum, e.EpisodeID, e.SeriesNum, e.MediaFile, e.EpLength, s.SeriesID
FROM Episodes2 e, Series2 s
WHERE s.SeriesNum = e.SeriesNum AND
s.SeriesNum = %s
ORDER BY EpisodeID """
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
# Execute the query
self.DBCursor.execute(SQLText, (self.seriesNum, ))
for (EpisodeNum, EpisodeID, SeriesNum, MediaFile, EpisodeLength, SeriesID) in self.DBCursor.fetchall():
EpisodeID = DBInterface.ProcessDBDataForUTF8Encoding(EpisodeID)
SeriesID = DBInterface.ProcessDBDataForUTF8Encoding(SeriesID)
MediaFile = DBInterface.ProcessDBDataForUTF8Encoding(MediaFile)
self.episodeList.append((EpisodeID, SeriesID, True))
if (EpisodeLength > self.MediaLength):
self.MediaLength = EpisodeLength
self.endTime = self.MediaLength
# Remember the Episode's length
self.episodeLengths[(EpisodeID, SeriesID)] = EpisodeLength
# Get the list of Keywords to be displayed
SQLText = """SELECT ck.KeywordGroup, ck.Keyword
FROM Clips2 cl, ClipKeywords2 ck
WHERE cl.EpisodeNum = %s AND
cl.ClipNum = ck.ClipNum
GROUP BY ck.keywordgroup, ck.keyword
ORDER BY KeywordGroup, Keyword, ClipStart"""
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
self.DBCursor.execute(SQLText, (EpisodeNum, ))
for (kwg, kw) in self.DBCursor.fetchall():
kwg = DBInterface.ProcessDBDataForUTF8Encoding(kwg)
kw = DBInterface.ProcessDBDataForUTF8Encoding(kw)
if not (kwg, kw) in self.filteredKeywordList:
self.filteredKeywordList.append((kwg, kw))
if not (kwg, kw, True) in self.unfilteredKeywordList:
self.unfilteredKeywordList.append((kwg, kw, True))
# Get the list of WHOLE SNAPSHOT Keywords to be displayed
SQLText = """SELECT ck.KeywordGroup, ck.Keyword
FROM Snapshots2 sn, ClipKeywords2 ck
WHERE sn.EpisodeNum = %s AND
sn.SnapshotNum = ck.SnapshotNum
GROUP BY ck.keywordgroup, ck.keyword
ORDER BY KeywordGroup, Keyword, SnapshotTimeCode"""
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
self.DBCursor.execute(SQLText, (EpisodeNum, ))
for (kwg, kw) in self.DBCursor.fetchall():
kwg = DBInterface.ProcessDBDataForUTF8Encoding(kwg)
kw = DBInterface.ProcessDBDataForUTF8Encoding(kw)
if not (kwg, kw) in self.filteredKeywordList:
self.filteredKeywordList.append((kwg, kw))
if not (kwg, kw, True) in self.unfilteredKeywordList:
self.unfilteredKeywordList.append((kwg, kw, True))
# Get the list of SNAPSHOT CODING Keywords to be displayed
SQLText = """SELECT ck.KeywordGroup, ck.Keyword
FROM Snapshots2 sn, SnapshotKeywords2 ck
WHERE sn.EpisodeNum = %s AND
sn.SnapshotNum = ck.SnapshotNum
GROUP BY ck.keywordgroup, ck.keyword
ORDER BY KeywordGroup, Keyword, SnapshotTimeCode"""
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
self.DBCursor.execute(SQLText, (EpisodeNum, ))
for (kwg, kw) in self.DBCursor.fetchall():
kwg = DBInterface.ProcessDBDataForUTF8Encoding(kwg)
kw = DBInterface.ProcessDBDataForUTF8Encoding(kw)
if not (kwg, kw) in self.filteredKeywordList:
self.filteredKeywordList.append((kwg, kw))
if not (kwg, kw, True) in self.unfilteredKeywordList:
self.unfilteredKeywordList.append((kwg, kw, True))
# Sort the Keyword List
self.unfilteredKeywordList.sort()
# Create the Keyword Placement lines to be displayed. We need them to be in ClipStart, ClipNum order so colors will be
# distributed properly across bands.
SQLText = """SELECT ck.KeywordGroup, ck.Keyword, cl.ClipStart, cl.ClipStop, cl.ClipNum, cl.ClipID, cl.CollectNum
FROM Clips2 cl, ClipKeywords2 ck
WHERE cl.EpisodeNum = %s AND
cl.ClipNum = ck.ClipNum
ORDER BY ClipStart, cl.ClipNum, KeywordGroup, Keyword"""
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
self.DBCursor.execute(SQLText, (EpisodeNum, ))
for (kwg, kw, clipStart, clipStop, clipNum, clipID, collectNum) in self.DBCursor.fetchall():
kwg = DBInterface.ProcessDBDataForUTF8Encoding(kwg)
kw = DBInterface.ProcessDBDataForUTF8Encoding(kw)
clipID = DBInterface.ProcessDBDataForUTF8Encoding(clipID)
# If we're dealing with an Episode, self.clipNum will be None and we want all clips.
# If we're dealing with a Clip, we only want to deal with THIS clip!
if (self.clipNum == None) or (clipNum == self.clipNum):
self.clipList.append((kwg, kw, clipStart, clipStop, clipNum, clipID, collectNum, EpisodeID, SeriesID))
if not ((clipID, collectNum, True) in self.clipFilterList):
self.clipFilterList.append((clipID, collectNum, True))
# Create the WHOLE SNAPSHOT Keyword Placement lines to be displayed. We need them to be in SnapshotTimeCode, SnapshotNum order so colors will be
# distributed properly across bands.
SQLText = """SELECT ck.KeywordGroup, ck.Keyword, sn.SnapshotTimeCode, sn.SnapshotDuration, sn.SnapshotNum, sn.SnapshotID, sn.CollectNum
FROM Snapshots2 sn, ClipKeywords2 ck
WHERE sn.EpisodeNum = %s AND
sn.SnapshotNum = ck.SnapshotNum
ORDER BY SnapshotTimeCode, sn.SnapshotNum, KeywordGroup, Keyword"""
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
self.DBCursor.execute(SQLText, (EpisodeNum, ))
for (kwg, kw, SnapshotTimeCode, SnapshotDuration, SnapshotNum, SnapshotID, collectNum) in self.DBCursor.fetchall():
kwg = DBInterface.ProcessDBDataForUTF8Encoding(kwg)
kw = DBInterface.ProcessDBDataForUTF8Encoding(kw)
SnapshotID = DBInterface.ProcessDBDataForUTF8Encoding(SnapshotID)
# If we're dealing with an Episode, self.clipNum will be None and we want all clips.
# If we're dealing with a Clip, we only want to deal with THIS clip!
if (self.clipNum == None):
self.snapshotList.append((kwg, kw, SnapshotTimeCode, SnapshotTimeCode + SnapshotDuration, SnapshotNum, SnapshotID, collectNum, EpisodeID, SeriesID))
if not ((SnapshotID, collectNum, True) in self.snapshotFilterList):
self.snapshotFilterList.append((SnapshotID, collectNum, True))
# Create the SNAPSHOT CODING Keyword Placement lines to be displayed. We need them to be in SnapshotTimeCode, SnapshotNum order so colors will be
# distributed properly across bands.
SQLText = """SELECT ck.KeywordGroup, ck.Keyword, sn.SnapshotTimeCode, sn.SnapshotDuration, sn.SnapshotNum, sn.SnapshotID, sn.CollectNum
FROM Snapshots2 sn, SnapshotKeywords2 ck
WHERE sn.EpisodeNum = %s AND
sn.SnapshotNum = ck.SnapshotNum
ORDER BY SnapshotTimeCode, sn.SnapshotNum, KeywordGroup, Keyword"""
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
self.DBCursor.execute(SQLText, (EpisodeNum, ))
for (kwg, kw, SnapshotTimeCode, SnapshotDuration, SnapshotNum, SnapshotID, collectNum) in self.DBCursor.fetchall():
kwg = DBInterface.ProcessDBDataForUTF8Encoding(kwg)
kw = DBInterface.ProcessDBDataForUTF8Encoding(kw)
SnapshotID = DBInterface.ProcessDBDataForUTF8Encoding(SnapshotID)
# If we're dealing with an Episode, self.clipNum will be None and we want all clips.
# If we're dealing with a Clip, we only want to deal with THIS clip!
if (self.clipNum == None):
self.snapshotList.append((kwg, kw, SnapshotTimeCode, SnapshotTimeCode + SnapshotDuration, SnapshotNum, SnapshotID, collectNum, EpisodeID, SeriesID))
if not ((SnapshotID, collectNum, True) in self.snapshotFilterList):
self.snapshotFilterList.append((SnapshotID, collectNum, True))
# Sort the Keyword List
self.filteredKeywordList.sort()
def UpdateKeywordVisualization(self):
""" Update the Keyword Visualization following something that could have changed it. """
print "LibraryMap.UpdateKeywordVisualization(): This should NEVER get called!!"
# Clear the Clip List
self.clipList = []
# Clear the Filtered Clip List
self.clipFilterList = []
# Clear the graphic itself
self.graphic.Clear()
# Before we start, make a COPY of the keyword list so we can check for keywords that are no longer
# included on the Map and need to be deleted from the KeywordLists
delList = self.unfilteredKeywordList[:]
# Now let's create the SQL to get all relevant Clip and Clip Keyword records
SQLText = """SELECT ck.KeywordGroup, ck.Keyword, cl.ClipStart, cl.ClipStop, cl.ClipNum, cl.ClipID, cl.CollectNum, ep.EpisodeName
FROM Clips2 cl, ClipKeywords2 ck, Episodes2 ep
WHERE cl.EpisodeNum = %s AND
cl.ClipNum = ck.ClipNum AND
ep.EpisodeNum = cl.EpisodeNum
ORDER BY ClipStart, cl.ClipNum, KeywordGroup, Keyword"""
# Adjust the query for sqlite if needed
SQLText = DBInterface.FixQuery(SQLText)
# Execute the query
self.DBCursor.execute(SQLText, (self.episodeNum, ))
# Iterate through the results ...
for (kwg, kw, clipStart, clipStop, clipNum, clipID, collectNum, episodeName) in self.DBCursor.fetchall():
kwg = DBInterface.ProcessDBDataForUTF8Encoding(kwg)
kw = DBInterface.ProcessDBDataForUTF8Encoding(kw)
clipID = DBInterface.ProcessDBDataForUTF8Encoding(clipID)
episodeName = DBInterface.ProcessDBDataForUTF8Encoding(episodeName)
# If we're dealing with an Episode, self.clipNum will be None and we want all clips.
# If we're dealing with a Clip, we only want to deal with THIS clip!
if (self.clipNum == None) or (clipNum == self.clipNum):
# If a Clip is not found in the clipList ...
if not ((kwg, kw, clipStart, clipStop, clipNum, clipID, collectNum, episodeName, seriesName) in self.clipList):
# ... add it to the clipList ...
self.clipList.append((kwg, kw, clipStart, clipStop, clipNum, clipID, collectNum, episodeName, seriesName))
# ... and if it's not in the clipFilter List (which it probably isn't!) ...
if not ((clipID, collectNum, True) in self.clipFilterList):
# ... add it to the clipFilterList.
self.clipFilterList.append((clipID, collectNum, True))
# If the keyword is not in either of the Keyword Lists, ...
if not (((kwg, kw) in self.filteredKeywordList) or ((kwg, kw, False) in self.unfilteredKeywordList)):
# ... add it to both keyword lists.
self.filteredKeywordList.append((kwg, kw))
self.unfilteredKeywordList.append((kwg, kw, True))
# If the keyword is in query results, it should be removed from the list of keywords to be deleted.
# Check that list for either True or False versions of the keyword!
if (kwg, kw, True) in delList:
del(delList[delList.index((kwg, kw, True))])
if (kwg, kw, False) in delList:
del(delList[delList.index((kwg, kw, False))])
# Iterate through ANY keywords left in the list of keywords to be deleted ...
for element in delList:
# ... and delete them from the unfiltered Keyword List
del(self.unfilteredKeywordList[self.unfilteredKeywordList.index(element)])
# If the keyword is also in the filtered keyword list ...
if (element[0], element[1]) in self.filteredKeywordList:
# ... it needs to be deleted from there too!
del(self.filteredKeywordList[self.filteredKeywordList.index((element[0], element[1]))])
# Now that the underlying data structures have been corrected, we're ready to redraw the Keyword Visualization
self.DrawGraph()
def DrawGraph(self):
""" Actually Draw the Series Map """
self.keywordClipList = {}
# Series Keyword Sequence Map, if multi-line display is desired
if (self.reportType == 1) and (not self.singleLineDisplay):
epCount = 0
for (episodeName, seriesName, checked) in self.episodeList:
if checked:
epCount += 1
# Determine the graphic size needed for the number of episodes times the number of keywords plus two lines
# for each episode for the episode title and the blank line!
newheight = max(self.CalcY(epCount * (len(self.filteredKeywordList) + 2)), self.Bounds[3] - self.Bounds[1])
# Series Keyword Sequence Map's single-line display,
# Series Keyword Bar Graph, and Series Keyword Percentage Graph all need the data arranged the same way
else:
# Initialize a dictionary that will hold information about the bars we're drawing.
barData = {}
# We need to know how many Episodes we have on the graph. Initialize a counter
self.episodeCount = 0
for (episodeName, seriesName, checkVal) in self.episodeList:
if checkVal:
# Count all the Episodes that have been "checked" in the Filter.
self.episodeCount += 1
# Now we iterate through the CLIPS.
for (KWG, KW, Start, Stop, ClipNum, ClipName, CollectNum, episodeName, seriesName) in self.clipList:
# We make sure they are selected in the Filter, checking the Episode, Clips and Keyword selections
if ((episodeName, seriesName, True) in self.episodeList) and \
((ClipName, CollectNum, True) in self.clipFilterList) and \
((KWG, KW) in self.filteredKeywordList):
# Now we track the start and end times compared to the current display limits
if Start < self.startTime:
Start = self.startTime
if Start > self.endTime:
Start = self.endTime
if Stop > self.endTime:
Stop = self.endTime
if Stop < self.startTime:
Stop = self.startTime
# Set up the key we use to mark overlaps
overlapKey = (episodeName, KWG, KW)
# If Start and Stop are the same, the Clip is off the graph and should be ignored.
if Start != Stop:
# If the clip is ON the graph, let's check for overlap with other clips with the same keyword at the same spot
if not barData.has_key(overlapKey):
barData[overlapKey] = 0
# Add the bar length to the bar Data dictionary.
barData[overlapKey] += Stop - Start
# For the Series Keyword Bar Graph and the Series Keyword Percentage Graph ...
if self.reportType in [2, 3]:
# Now add the Clip to the keywordClipList. This holds all Keyword/Clip data in memory so it can be searched quickly
# This dictionary object uses the episode name and keyword pair as the key and holds a list of Clip data for all clips with that keyword.
# If the list for a given keyword already exists ...
if self.keywordClipList.has_key(overlapKey):
# Get the list of Clips that contain the current Keyword from the keyword / Clip List dictionary
overlapClips = self.keywordClipList[overlapKey]
# Iterate through the Clip List ...
for (objType, overlapStartTime, overlapEndTime, overlapClipNum, overlapClipName) in overlapClips:
# Let's look for overlap. Overlap artificially inflates the size of the bars, and must be eliminated.
overlapStart = Stop
overlapEnd = Start
if DEBUG and KWG == 'Transana Users' and KW == 'DavidW':
print "Start = %7d, overStart = %7s, Stop = %7s, overEnd = %7s" % (Start, overlapStartTime, Stop, overlapEndTime)
# Look for Start between overlapStartTime and overlapEndTime
if (Start >= overlapStartTime) and (Start < overlapEndTime):
overlapStart = Start
# Look for overlapStartTime between Start and Stop
if (overlapStartTime >= Start) and (overlapStartTime < Stop):
overlapStart = overlapStartTime
# Look for Stop between overlapStartTime and overlapEndTime
if (Stop > overlapStartTime) and (Stop <= overlapEndTime):
overlapEnd = Stop
# Look for overlapEndTime between Start and Stop
if (overlapEndTime > Start) and (overlapEndTime <= Stop):
overlapEnd = overlapEndTime
# If we've found an overlap, it will be indicated by Start being less than End!
if overlapStart < overlapEnd:
# We need to SUBTRACT the overlap time from the barData structure.
barData[overlapKey] -= overlapEnd - overlapStart
if DEBUG:
print "Bar Graph overlap found:", overlapKey, overlapEnd - overlapStart
# ... add the new Clip to the Clip List
self.keywordClipList[overlapKey].append(('Clip', Start, Stop, ClipNum, ClipName))
# If there is no entry for the given keyword ...
else:
# ... create a List object with the first clip's data for this Keyword Pair key
self.keywordClipList[overlapKey] = [('Clip', Start, Stop, ClipNum, ClipName)]
# Now we iterate through the Snapshot List.
for (KWG, KW, Start, Stop, SnapshotNum, SnapshotName, CollectNum, episodeName, seriesName) in self.snapshotList:
# We make sure they are selected in the Filter, checking the Episode, Clips and Keyword selections
if ((episodeName, seriesName, True) in self.episodeList) and \
((SnapshotName, CollectNum, True) in self.snapshotFilterList) and \
((KWG, KW) in self.filteredKeywordList):
# Now we track the start and end times compared to the current display limits
if Start < self.startTime:
Start = self.startTime
if Start > self.endTime:
Start = self.endTime
if Stop > self.endTime:
Stop = self.endTime
if Stop < self.startTime:
Stop = self.startTime
# Set up the key we use to mark overlaps
overlapKey = (episodeName, KWG, KW)
# If Start and Stop are the same, the Clip is off the graph and should be ignored.
if Start != Stop:
# If the snapshot is ON the graph, let's check for overlap with other clips/snapshots with the same keyword at the same spot
if not barData.has_key(overlapKey):
barData[overlapKey] = 0
# Add the bar length to the bar Data dictionary.
barData[overlapKey] += Stop - Start
# For the Series Keyword Bar Graph and the Series Keyword Percentage Graph ...
if self.reportType in [2, 3]:
# Now add the Clip to the keywordClipList. This holds all Keyword/Clip data in memory so it can be searched quickly
# This dictionary object uses the episode name and keyword pair as the key and holds a list of Clip data for all clips with that keyword.
# If the list for a given keyword already exists ...
if self.keywordClipList.has_key(overlapKey):
# Get the list of Clips that contain the current Keyword from the keyword / Clip List dictionary
overlapClips = self.keywordClipList[overlapKey]
# Iterate through the Clip List ...
for (objType, overlapStartTime, overlapEndTime, overlapClipNum, overlapClipName) in overlapClips:
# Let's look for overlap. Overlap artificially inflates the size of the bars, and must be eliminated.
overlapStart = Stop
overlapEnd = Start
if DEBUG and KWG == 'Transana Users' and KW == 'DavidW':
print "Start = %7d, overStart = %7s, Stop = %7s, overEnd = %7s" % (Start, overlapStartTime, Stop, overlapEndTime)
# Look for Start between overlapStartTime and overlapEndTime
if (Start >= overlapStartTime) and (Start < overlapEndTime):
overlapStart = Start
# Look for overlapStartTime between Start and Stop
if (overlapStartTime >= Start) and (overlapStartTime < Stop):
overlapStart = overlapStartTime
# Look for Stop between overlapStartTime and overlapEndTime
if (Stop > overlapStartTime) and (Stop <= overlapEndTime):
overlapEnd = Stop
# Look for overlapEndTime between Start and Stop
if (overlapEndTime > Start) and (overlapEndTime <= Stop):
overlapEnd = overlapEndTime
# If we've found an overlap, it will be indicated by Start being less than End!
if overlapStart < overlapEnd:
# We need to SUBTRACT the overlap time from the barData structure.
barData[overlapKey] -= overlapEnd - overlapStart
if DEBUG:
print "Bar Graph overlap found:", overlapKey, overlapEnd - overlapStart
# ... add the new Snapshot to the Clip List
self.keywordClipList[overlapKey].append(('Snapshot', Start, Stop, SnapshotNum, SnapshotName))
# If there is no entry for the given keyword ...
else:
# ... create a List object with the first Snapshot's data for this Keyword Pair key
self.keywordClipList[overlapKey] = [('Snapshot', Start, Stop, SnapshotNum, SnapshotName)]
# once we're done with checking overlaps here, let's clear out this variable,
# as it may get re-used later for other purposes!
self.keywordClipList = {}
if self.showLegend:
newheight = max(self.CalcY(self.episodeCount + len(self.filteredKeywordList) + 2), self.Bounds[3] - self.Bounds[1])
else:
newheight = max(self.CalcY(self.episodeCount), self.Bounds[3] - self.Bounds[1])
# Now that we have all necessary information, let's create and populate the graphic
# Start by destroying the existing control and creating a new one with the correct Canvas Size
self.graphic.Destroy()
self.graphic = GraphicsControlClass.GraphicsControl(self, -1, wx.Point(self.Bounds[0], self.Bounds[1]),
(self.Bounds[2] - self.Bounds[0], self.Bounds[3] - self.Bounds[1]),
(self.Bounds[2] - self.Bounds[0], newheight + 3),
passMouseEvents=True)
# Put the header information on the graphic.
self.graphic.SetFontColour("BLACK")
if 'wxMac' in wx.PlatformInfo:
self.graphic.SetFontSize(17)
else:
self.graphic.SetFontSize(14)
self.graphic.AddTextCentered("%s" % self.title, (self.Bounds[2] - self.Bounds[0]) / 2, 1)
if 'wxMac' in wx.PlatformInfo:
self.graphic.SetFontSize(13)
else:
self.graphic.SetFontSize(10)
if 'unicode' in wx.PlatformInfo:
# Encode with UTF-8 rather than TransanaGlobal.encoding because this is a prompt, not DB Data.
prompt = unicode(_('Series: %s'), 'utf8')
else:
prompt = _('Series: %s')
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddTextRight(prompt % self.seriesName, self.Bounds[2] - self.Bounds[0] - 2, 2)
else:
self.graphic.AddText(prompt % self.seriesName, 2, 2)
if self.configName != '':
if 'unicode' in wx.PlatformInfo:
# Encode with UTF-8 rather than TransanaGlobal.encoding because this is a prompt, not DB Data.
prompt = unicode(_('Filter Configuration: %s'), 'utf8')
else:
prompt = _('Filter Configuration: %s')
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddTextRight(prompt % self.configName, self.Bounds[2] - self.Bounds[0] - 2, 16)
else:
self.graphic.AddText(prompt % self.configName, 2, 16)
# Initialize a Line Counter, used for vertical positioning
Count = 0
# We'll also need a lookup table for vertical values.
yValLookup = {}
# Initialize the Episode Name / Keyword Lookup table. The multi-line Series Keyword Sequence Map gets a blank first line.
if (self.reportType == 1) and (not self.singleLineDisplay):
self.epNameKWGKWLookup = {0 : ('', '', '')}
else:
self.epNameKWGKWLookup = {}
# Now iterate through the Episode list, adding the Episode Names and (if appropriate) the Keywords as an axis label
for (episodeName, seriesName, episodeShown) in self.episodeList:
if episodeShown:
# Add the Episode Name to the vertical axis
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddTextRight("%s" % episodeName, self.Bounds[2] - self.Bounds[0] - 4, self.CalcY(Count) - 7)
else:
self.graphic.AddText("%s" % episodeName, 4, self.CalcY(Count) - 7)
# if Keyword Series Sequence Map in multi-line mode ...
if (self.reportType == 1) and (not self.singleLineDisplay):
# ... add a blank lookup line for the blank line, as this line gets no data for that report.
self.epNameKWGKWLookup[self.CalcY(Count-1) - int((self.barHeight + self.whitespaceHeight)/2)] = ('', '', '')
# We want Grid Lines in light gray
self.graphic.SetColour('LIGHT GREY')
# if Keyword Series Sequence Map in multi-line mode, we draw Grid Lines and add Keywords to the Vertical Axis.
if (self.reportType == 1) and (not self.singleLineDisplay):
# Draw the top Grid Line, if appropriate
if self.hGridLines:
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddLines([(self.Bounds[2] - self.Bounds[0] - 10, self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2),
self.CalcX(self.endTime), self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2))])
else:
self.graphic.AddLines([(10, self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2), self.CalcX(self.endTime), self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2))])
gridLineCount = Count
Count += 1
# Iterate through the Keyword List from the Filter Dialog ...
for KWG, KW in self.filteredKeywordList:
# ... and add the Keywords to the Vertical Axis.
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddTextRight("%s : %s" % (KWG, KW), self.Bounds[2] - self.Bounds[0] - 10, self.CalcY(Count) - 7)
else:
self.graphic.AddText("%s : %s" % (KWG, KW), 10, self.CalcY(Count) - 7)
# Add this data to the Y Position Lookup dictionary.
yValLookup[(episodeName, KWG, KW)] = Count
# Add a Lookup Line for this episodeName, Keyword Group, Keyword combination
self.epNameKWGKWLookup[self.CalcY(Count) - int((self.barHeight + self.whitespaceHeight)/2)] = (episodeName, KWG, KW)
# Add Horizontal Grid Lines, if appropriate
if self.hGridLines and ((Count - gridLineCount) % 2 == 0):
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddLines([(self.Bounds[2] - self.Bounds[0] - 10, self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2),
self.CalcX(self.endTime), self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2))])
else:
self.graphic.AddLines([(10, self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2), self.CalcX(self.endTime), self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2))])
# Increment the counter for each Keyword
Count = Count + 1
# If it's NOT the multi-line Sequence Map, the Gridline rules are different, but still need to be handled.
else:
# Add Horizontal Grid Lines, if appropriate
if self.hGridLines and (Count % 2 == 1):
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddLines([(self.Bounds[2] - self.Bounds[0] - 4, self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2),
self.CalcX(self.timelineMax), self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2))])
else:
self.graphic.AddLines([(4, self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2), self.CalcX(self.timelineMax), self.CalcY(Count) + 6 + int(self.whitespaceHeight / 2))])
# Add this data to the Y Position Lookup dictionary.
yValLookup[episodeName] = Count
# Increment the counter for each Episode. (This produces a blank line in the Sequence Map, which is OK.)
Count += 1
# If multi-line Sequence Report, we're building the Episode Name / Keyword Lookup table here, otherwise it's later.
if (self.reportType == 1) and (not self.singleLineDisplay):
# Finish with a blank Lookup Line so the bottom of the chart doesn't give false positive information
self.epNameKWGKWLookup[self.CalcY(Count-1) - int((self.barHeight + self.whitespaceHeight)/2)] = ('', '', '')
# Reset the graphic color following drawing the Grid Lines
self.graphic.SetColour("BLACK")
# After we have the axis values specified but before we draw anything else, we determine the amount the
# subsequent graphics must be indented to adjust for the size of the text labels.
self.graphicindent = self.graphic.GetMaxWidth(start=3)
# Draw the Graph Time Line
# For the Sequence Map, the timeline is startTime to endTime.
if (self.reportType == 1):
# If the Media Length is known, display the Time Line
if self.MediaLength > 0:
self.DrawTimeLine(self.startTime, self.endTime)
# For the Sequence Map, we need to know the maximum Episode time, which is already stored under self.endTime.
self.timelineMax = self.endTime
# For the Series Keyword Bar Graph and the Series Keyword Percentage Graph, we need to know the maximum coded
# time and the episode length for each Episode.
# For the Bar Graph, we use the longer of Episode Length or Total Episode Coded Time.
# For the Percentage Graph, we need to know total amount of coded video for each Episode
elif self.reportType in [2, 3]:
# Initialize the time line maximum variable
self.timelineMax = 0
# Create a dictionary to store the episode times.
episodeTimeTotals = {}
# Start by iterating through the Episode List ...
for (episodeName, seriesName, checked) in self.episodeList:
if checked:
# Initialize the Episode's length to 0
episodeTimeTotals[episodeName] = 0
# Iterate through the Keyword List
for (kwg, kw) in self.filteredKeywordList:
# Check to see if we have data for this keyword in this Episode.
if barData.has_key((episodeName, kwg, kw)):
# If so, add the time to the Episode's total time.
episodeTimeTotals[episodeName] += barData[(episodeName, kwg, kw)]
# If this Episode is the longest we've dealt with so far ...
if episodeTimeTotals[episodeName] > self.timelineMax:
# ... note the new time line maximum.
self.timelineMax = episodeTimeTotals[episodeName]
# If we are building the Bar Graph, ...
if self.reportType == 2:
# ... we need to adjust the timelineMax value for the length of the whole Episode, if it's larger.
self.timelineMax = max(self.timelineMax, self.episodeLengths[(episodeName, seriesName)])
# The Series Keyword Bar Graph extends from 0 to the timeLineMax value we just determined.
if self.reportType == 2:
self.DrawTimeLine(0, self.timelineMax)
# The Series Keyword Percentage Graph extends from 0% to 100%
elif (self.reportType == 3):
self.DrawTimeLine(0, 100)
# Add the top Horizontal Grid Line, if appropriate
if self.hGridLines:
# We want Grid Lines in light gray
self.graphic.SetColour('LIGHT GREY')
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddLines([(self.Bounds[2] - self.Bounds[0] - 4, self.CalcY(-1) + 6 + int(self.whitespaceHeight / 2),
self.CalcX(self.timelineMax), self.CalcY(-1) + 6 + int(self.whitespaceHeight / 2))])
else:
self.graphic.AddLines([(4, self.CalcY(-1) + 6 + int(self.whitespaceHeight / 2), self.CalcX(self.timelineMax), self.CalcY(-1) + 6 + int(self.whitespaceHeight / 2))])
# Select the color palate for colors or gray scale as appropriate
if self.colorOutput:
colorSet = TransanaGlobal.keywordMapColourSet
colorLookup = TransanaGlobal.transana_colorLookup
else:
colorSet = TransanaGlobal.keywordMapGraySet
colorLookup = TransanaGlobal.transana_grayLookup
# Set the colourIndex tracker to the last color used.
colourindex = self.keywordColors['lastColor']
# Iterate through the keyword list in order ...
for (KWG, KW) in self.filteredKeywordList:
# ... and assign colors to Keywords
# If we want COLOR output ...
if self.colorOutput:
# If the color is already defined ...
if self.keywordColors.has_key((KWG, KW)):
# ... get the index for the color
colourindex = self.keywordColors[(KWG, KW)]
# If the color has NOT been defined ...
else:
# Load the keyword
tmpKeyword = KeywordObject.Keyword(KWG, KW)
# If the Default Keyword Color is in the set of defined colors ...
if tmpKeyword.lineColorName in colorSet:
# ... define the color for this keyword
self.keywordColors[(KWG, KW)] = colorSet.index(tmpKeyword.lineColorName)
# If the Default Keyword Color is NOT in the defined colors ...
elif tmpKeyword.lineColorName != '':
# ... add the color name to the colorSet List
colorSet.append(tmpKeyword.lineColorName)
# ... add the color's definition to the colorLookup dictionary
colorLookup[tmpKeyword.lineColorName] = (int(tmpKeyword.lineColorDef[1:3], 16), int(tmpKeyword.lineColorDef[3:5], 16), int(tmpKeyword.lineColorDef[5:7], 16))
# ... determine the new color's index
colourindex = colorSet.index(tmpKeyword.lineColorName)
# ... define the new color for this keyword
self.keywordColors[(KWG, KW)] = colourindex
# If there is no Default Keyword Color defined
else:
# ... get the index for the next color in the color list
colourindex = self.keywordColors['lastColor'] + 1
# If we're at the end of the list ...
if colourindex > len(colorSet) - 1:
# ... reset the list to the beginning
colourindex = 0
# ... remember the color index used
self.keywordColors['lastColor'] = colourindex
# ... define the new color for this keyword
self.keywordColors[(KWG, KW)] = colourindex
# If we want Grayscale output ...
else:
# ... get the index for the next color in the color list
colourindex = self.keywordColors['lastColor'] + 1
# If we're at the end of the list ...
if colourindex > len(colorSet) - 1:
# ... reset the list to the beginning
colourindex = 0
# ... remember the color index used
self.keywordColors['lastColor'] = colourindex
# ... define the new color for this keyword
self.keywordColors[(KWG, KW)] = colourindex
# If we're producing a Series Keyword Sequence Map ..
if (self.reportType == 1):
# some clip boundary lines for overlapping clips can get over-written, depeding on the nature of the overlaps.
# Let's create a separate list of these lines, which we'll add to the END of the process so they can't get overwritten.
overlapLines = []
# Iterate through all the Clip/Keyword records in the Clip List ...
for (KWG, KW, Start, Stop, ClipNum, ClipName, CollectNum, episodeName, seriesName) in self.clipList:
# Check the clip against the Episode List, the Clip Filter List, the Snapshot Filter List, and the
# Keyword Filter list to see if it should be included in the report.
if ((episodeName, seriesName, True) in self.episodeList) and \
(((ClipName, CollectNum, True) in self.clipFilterList) or
((ClipName, CollectNum, True) in self.snapshotFilterList))and \
((KWG, KW) in self.filteredKeywordList):
# We compare the Clip's Start Time with the Map's boundaries. We only want the portion of the clip
# that falls within the Map's upper and lower boundaries.
if Start < self.startTime:
Start = self.startTime
if Start > self.endTime:
Start = self.endTime
if Stop > self.endTime:
Stop = self.endTime
if Stop < self.startTime:
Stop = self.startTime
# If Start and Stop match, the clip is off the Map and can be ignored. Otherwise ...
if Start != Stop:
# ... we start drawing the clip's bar by setting the bar thickness.
self.graphic.SetThickness(self.barHeight)
# Initialize a variable for building the line's data record
tempLine = []
# Determine the vertical placement of the line, which requires a different lookup key for the
# single-line report than the multi-line report.
if self.singleLineDisplay:
yPos = self.CalcY(yValLookup[episodeName])
else:
yPos = self.CalcY(yValLookup[(episodeName, KWG, KW)])
# Add the line data
tempLine.append((self.CalcX(Start), yPos, self.CalcX(Stop), yPos))
# Determine the appropriate color for the keyword
colourindex = self.keywordColors[(KWG, KW)]
# Tell the graph to use the selected color, using the appropriate lookup table
self.graphic.SetColour(colorLookup[colorSet[colourindex]])
# Add the line data to the graph
self.graphic.AddLines(tempLine)
# We need to track the bar positions so that the MouseOver can display data correctly. We need to do it
# later for the multi-line report, but here for the single-line report.
if self.singleLineDisplay:
# The first stage of the lookup is the Y-coordinate. If there's not already an
# EpisodeNameKeywordGroupKeywordLookup record for this Y-Coordinate ...
if not self.epNameKWGKWLookup.has_key(self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)):
# ... create an empty dictionary object for the first part of the Lookup Line
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)] = {}
# The second stage of the lookup is the X range in a tuple. If the X range isn't already in the dictionary,
# then add an empty List object for the X range.
if not self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)].has_key((self.CalcX(Start), self.CalcX(Stop))):
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)][(self.CalcX(Start), self.CalcX(Stop))] = []
# Add a Lookup Line for this Y-coordinate and X range containing the Episode Name, the keyword data,
# and the Clip Length.
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)][(self.CalcX(Start), self.CalcX(Stop))].append((episodeName, KWG, KW, Stop - Start))
if DEBUG and KWG == 'Transana Users' and KW == 'DavidW':
print "Looking at %s (%d)" % (ClipName, CollectNum)
# We need to indicate where there is overlap in this map.
# We use a different key to mark overlaps depending on whether we're in singleLineDisplay mode or not.
if self.singleLineDisplay:
overlapKey = (episodeName)
else:
overlapKey = (episodeName, KWG, KW)
# Now add the Clip to the keywordClipList. This holds all Keyword/Clip data in memory so it can be searched quickly
# This dictionary object uses the keyword pair as the key and holds a list of Clip data for all clips with that keyword.
# If the list for a given keyword already exists ...
if self.keywordClipList.has_key(overlapKey):
# Get the list of Clips that contain the current Keyword from the keyword / Clip List dictionary
overlapClips = self.keywordClipList[overlapKey]
# Iterate through the Clip List ...
for (objType, overlapStartTime, overlapEndTime, overlapClipNum, overlapClipName) in overlapClips:
# Let's look for overlap
overlapStart = Stop
overlapEnd = Start
if DEBUG and KWG == 'Transana Users' and KW == 'DavidW':
print "Start = %7d, overStart = %7s, Stop = %7s, overEnd = %7s" % (Start, overlapStartTime, Stop, overlapEndTime)
# Look for Start between overlapStartTime and overlapEndTime
if (Start >= overlapStartTime) and (Start < overlapEndTime):
overlapStart = Start
# Look for overlapStartTime between Start and Stop
if (overlapStartTime >= Start) and (overlapStartTime < Stop):
overlapStart = overlapStartTime
# Look for Stop between overlapStartTime and overlapEndTime
if (Stop > overlapStartTime) and (Stop <= overlapEndTime):
overlapEnd = Stop
# Look for overlapEndTime between Start and Stop
if (overlapEndTime > Start) and (overlapEndTime <= Stop):
overlapEnd = overlapEndTime
# If we've found an overlap, it will be indicated by Start being less than End!
if overlapStart < overlapEnd:
# Draw a multi-colored line to indicate overlap
overlapThickness = int(self.barHeight/ 3) + 1
self.graphic.SetThickness(overlapThickness)
if self.colorOutput:
self.graphic.SetColour("GREEN")
else:
self.graphic.SetColour("WHITE")
tempLine = [(self.CalcX(overlapStart), yPos, self.CalcX(overlapEnd), yPos)]
self.graphic.AddLines(tempLine)
if self.colorOutput:
self.graphic.SetColour("RED")
else:
self.graphic.SetColour("BLACK")
tempLine = [(self.CalcX(overlapStart), yPos - overlapThickness+1, self.CalcX(overlapEnd), yPos - overlapThickness+1)]
self.graphic.AddLines(tempLine)
if self.colorOutput:
self.graphic.SetColour("BLUE")
else:
self.graphic.SetColour("GRAY")
tempLine = [(self.CalcX(overlapStart), yPos + overlapThickness, self.CalcX(overlapEnd), yPos + overlapThickness)]
self.graphic.AddLines(tempLine)
# Let's remember the clip start and stop boundaries, to be drawn at the end so they won't get over-written
overlapLines.append(((self.CalcX(overlapStart), yPos - (self.barHeight / 2), self.CalcX(overlapStart), yPos + (self.barHeight / 2)),))
overlapLines.append(((self.CalcX(overlapEnd), yPos - (self.barHeight / 2), self.CalcX(overlapEnd), yPos + (self.barHeight / 2)),))
# ... add the new Clip to the Clip List
self.keywordClipList[overlapKey].append(('Clip', Start, Stop, ClipNum, ClipName))
# If there is no entry for the given keyword ...
else:
# ... create a List object with the first clip's data for this Keyword Pair key.
self.keywordClipList[overlapKey] = [('Clip', Start, Stop, ClipNum, ClipName)]
# Iterate through all the Snapshot/Keyword records in the Snapshot List ...
for (KWG, KW, Start, Stop, SnapshotNum, SnapshotName, CollectNum, episodeName, seriesName) in self.snapshotList:
# Check the clip against the Episode List, the Snapshot Filter List, and the Keyword Filter list to see if
# it should be included in the report.
if ((episodeName, seriesName, True) in self.episodeList) and \
((SnapshotName, CollectNum, True) in self.snapshotFilterList) and \
((KWG, KW) in self.filteredKeywordList):
# We compare the Snapshot's Start Time with the Map's boundaries. We only want the portion of the clip
# that falls within the Map's upper and lower boundaries.
if Start < self.startTime:
Start = self.startTime
if Start > self.endTime:
Start = self.endTime
if Stop > self.endTime:
Stop = self.endTime
if Stop < self.startTime:
Stop = self.startTime
# If Start and Stop match, the clip is off the Map and can be ignored. Otherwise ...
if Start != Stop:
# ... we start drawing the clip's bar by setting the bar thickness.
self.graphic.SetThickness(self.barHeight)
# Initialize a variable for building the line's data record
tempLine = []
# Determine the vertical placement of the line, which requires a different lookup key for the
# single-line report than the multi-line report.
if self.singleLineDisplay:
yPos = self.CalcY(yValLookup[episodeName])
else:
yPos = self.CalcY(yValLookup[(episodeName, KWG, KW)])
# Add the line data
tempLine.append((self.CalcX(Start), yPos, self.CalcX(Stop), yPos))
# Determine the appropriate color for the keyword
colourindex = self.keywordColors[(KWG, KW)]
# Tell the graph to use the selected color, using the appropriate lookup table
self.graphic.SetColour(colorLookup[colorSet[colourindex]])
# Add the line data to the graph
self.graphic.AddLines(tempLine)
# We need to track the bar positions so that the MouseOver can display data correctly. We need to do it
# later for the multi-line report, but here for the single-line report.
if self.singleLineDisplay:
# The first stage of the lookup is the Y-coordinate. If there's not already an
# EpisodeNameKeywordGroupKeywordLookup record for this Y-Coordinate ...
if not self.epNameKWGKWLookup.has_key(self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)):
# ... create an empty dictionary object for the first part of the Lookup Line
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)] = {}
# The second stage of the lookup is the X range in a tuple. If the X range isn't already in the dictionary,
# then add an empty List object for the X range.
if not self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)].has_key((self.CalcX(Start), self.CalcX(Stop))):
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)][(self.CalcX(Start), self.CalcX(Stop))] = []
# Add a Lookup Line for this Y-coordinate and X range containing the Episode Name, the keyword data,
# and the Clip Length.
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)][(self.CalcX(Start), self.CalcX(Stop))].append((episodeName, KWG, KW, Stop - Start))
if DEBUG and KWG == 'Transana Users' and KW == 'DavidW':
print "Looking at %s (%d)" % (SnapshotName, CollectNum)
# We need to indicate where there is overlap in this map.
# We use a different key to mark overlaps depending on whether we're in singleLineDisplay mode or not.
if self.singleLineDisplay:
overlapKey = (episodeName)
else:
overlapKey = (episodeName, KWG, KW)
# Now add the Clip to the keywordClipList. This holds all Keyword/Clip data in memory so it can be searched quickly
# This dictionary object uses the keyword pair as the key and holds a list of Clip data for all clips with that keyword.
# If the list for a given keyword already exists ...
if self.keywordClipList.has_key(overlapKey):
# Get the list of Clips that contain the current Keyword from the keyword / Clip List dictionary
overlapClips = self.keywordClipList[overlapKey]
# Iterate through the Clip List ...
for (objType, overlapStartTime, overlapEndTime, overlapClipNum, overlapClipName) in overlapClips:
# Let's look for overlap
overlapStart = Stop
overlapEnd = Start
if DEBUG and KWG == 'Transana Users' and KW == 'DavidW':
print "Start = %7d, overStart = %7s, Stop = %7s, overEnd = %7s" % (Start, overlapStartTime, Stop, overlapEndTime)
# Look for Start between overlapStartTime and overlapEndTime
if (Start >= overlapStartTime) and (Start < overlapEndTime):
overlapStart = Start
# Look for overlapStartTime between Start and Stop
if (overlapStartTime >= Start) and (overlapStartTime < Stop):
overlapStart = overlapStartTime
# Look for Stop between overlapStartTime and overlapEndTime
if (Stop > overlapStartTime) and (Stop <= overlapEndTime):
overlapEnd = Stop
# Look for overlapEndTime between Start and Stop
if (overlapEndTime > Start) and (overlapEndTime <= Stop):
overlapEnd = overlapEndTime
# If we've found an overlap, it will be indicated by Start being less than End!
if overlapStart < overlapEnd:
# Draw a multi-colored line to indicate overlap
overlapThickness = int(self.barHeight/ 3) + 1
self.graphic.SetThickness(overlapThickness)
if self.colorOutput:
self.graphic.SetColour("GREEN")
else:
self.graphic.SetColour("WHITE")
tempLine = [(self.CalcX(overlapStart), yPos, self.CalcX(overlapEnd), yPos)]
self.graphic.AddLines(tempLine)
if self.colorOutput:
self.graphic.SetColour("RED")
else:
self.graphic.SetColour("BLACK")
tempLine = [(self.CalcX(overlapStart), yPos - overlapThickness+1, self.CalcX(overlapEnd), yPos - overlapThickness+1)]
self.graphic.AddLines(tempLine)
if self.colorOutput:
self.graphic.SetColour("BLUE")
else:
self.graphic.SetColour("GRAY")
tempLine = [(self.CalcX(overlapStart), yPos + overlapThickness, self.CalcX(overlapEnd), yPos + overlapThickness)]
self.graphic.AddLines(tempLine)
# Let's remember the clip start and stop boundaries, to be drawn at the end so they won't get over-written
overlapLines.append(((self.CalcX(overlapStart), yPos - (self.barHeight / 2), self.CalcX(overlapStart), yPos + (self.barHeight / 2)),))
overlapLines.append(((self.CalcX(overlapEnd), yPos - (self.barHeight / 2), self.CalcX(overlapEnd), yPos + (self.barHeight / 2)),))
# ... add the new Clip to the Clip List
self.keywordClipList[overlapKey].append(('Snapshot', Start, Stop, SnapshotNum, SnapshotName))
# If there is no entry for the given keyword ...
else:
# ... create a List object with the first clip's data for this Keyword Pair key.
self.keywordClipList[overlapKey] = [('Snapshot', Start, Stop, SnapshotNum, SnapshotName)]
# For the single-line display only ...
if self.singleLineDisplay:
# ... finish with a blank Lookup Line so the bottom of the chart doesn't give false positive information
self.epNameKWGKWLookup[self.CalcY(self.episodeCount) - int((self.barHeight + self.whitespaceHeight)/2)] = {}
self.epNameKWGKWLookup[self.CalcY(self.episodeCount) - int((self.barHeight + self.whitespaceHeight)/2)][(0, self.timelineMax)] = [('', '', '', 0)]
# let's add the overlap boundary lines now
self.graphic.SetThickness(1)
self.graphic.SetColour("BLACK")
for tempLine in overlapLines:
self.graphic.AddLines(tempLine)
if not '__WXMAC__' in wx.PlatformInfo:
self.menuFile.Enable(M_FILE_SAVEAS, True)
# We can't enable Print Preview for Right-To-Left languages
if not (TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft):
self.menuFile.Enable(M_FILE_PRINTPREVIEW, True)
self.menuFile.Enable(M_FILE_PRINT, True)
# For the Series Keyword Bar Graph and the Series keyword Percentage Graph, which are VERY similar and therefore use the same
# infrastructure ...
elif self.reportType in [2, 3]:
# ... we first iterate through all the Episodes in the Episode List ...
for (episodeName, seriesName, checked) in self.episodeList:
# .. and check to see if the Episode should be included.
if checked:
# These graphs are cumulative bar charts. We need to track the starting place for the next bar.
barStart = 0
# Create the first part of the Lookup Line, an empty dictionary for the Y coordinate
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)] = {}
# Now we iterate through the Filtered Keyword List. (This gives us the correct presentation ORDER for the bars!)
for (kwg, kw) in self.filteredKeywordList:
# Now we check to see if there's DATA for this Episode / Keyword combination.
if barData.has_key((episodeName, kwg, kw)):
# Start by setting the bar thickness
self.graphic.SetThickness(self.barHeight)
# Initialize a temporary list for accumulating Bar data (not really necessary with this structure, but no harm done.)
tempLine = []
# If we're drawing the Series Keyword Bar Graph ...
if self.reportType == 2:
# ... the bar starts at the unadjusted BarStart position ...
xStart = self.CalcX(barStart)
# ... and ends at the start plus the width of the bar!
xEnd = self.CalcX(barStart + barData[(episodeName, kwg, kw)])
# The mouseover for this report is the unadjusted length of the bar
lookupVal = barData[(episodeName, kwg, kw)]
# If we're drawing the Series Keyword Percentage Graph ...
elif self.reportType == 3:
# This should just be a matter of adjusting barData for episodeTimeTotals[episodeName], which is the total
# coded time for each Episode.
# ... the bar starts at the adjusted BarStart position ...
xStart = self.CalcX(barStart * 100.0 / episodeTimeTotals[episodeName])
# ... and ends at the adjusted (start plus the width of the bar)!
xEnd = self.CalcX((barStart + barData[(episodeName, kwg, kw)]) * 100.0 / episodeTimeTotals[episodeName])
# The mouseover for this report is the adjusted length of the bar, which is the percentage value for the bar!
lookupVal = barData[(episodeName, kwg, kw)] * 100.0 / episodeTimeTotals[episodeName]
# Build the line to be displayed based on these calculated values
tempLine.append((xStart, self.CalcY(yValLookup[episodeName]), xEnd, self.CalcY(yValLookup[episodeName])))
# Determine the index for this Keyword's Color
colourindex = self.keywordColors[(kwg, kw)]
# Tell the graph to use the selected color, using the appropriate lookup table
self.graphic.SetColour(colorLookup[colorSet[colourindex]])
# Actually add the line to the graph's data structure
self.graphic.AddLines(tempLine)
# Add a Lookup Line for this Y-coordinate and X range containing the Episode Name, the keyword data,
# and the Clip 's Lookup Value determined above. Note that this is a bit simpler than for the Sequence Map
# because we don't have to worry about overlaps. Thus, the lookup value can just be a tuple instead of having
# to be a list of tuples to accomodate overlapping clip/keyword values.
if (TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft):
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)][(self.Bounds[2] - self.Bounds[0] - xStart, self.Bounds[2] - self.Bounds[0] - xEnd)] = \
(episodeName, kwg, kw, lookupVal)
else:
self.epNameKWGKWLookup[self.CalcY(yValLookup[episodeName]) - int((self.barHeight + self.whitespaceHeight)/2)][(xStart, xEnd)] = (episodeName, kwg, kw, lookupVal)
# The next bar should start where this bar ends. No need to adjust for the Percentage Graph -- that's handled
# when actually placing the bars.
barStart += barData[(episodeName, kwg, kw)]
# Finish with a blank Lookup Line so the bottom of the chart doesn't give false positive information
self.epNameKWGKWLookup[self.CalcY(self.episodeCount) - int((self.barHeight + self.whitespaceHeight)/2)] = {}
self.epNameKWGKWLookup[self.CalcY(self.episodeCount) - int((self.barHeight + self.whitespaceHeight)/2)][(0, self.timelineMax)] = ('', '', '', 0)
# Enable tracking of mouse movement over the graphic
self.graphic.Bind(wx.EVT_MOTION, self.OnMouseMotion)
# Add Legend. The multi-line Series Keyword Sequence Map doesn't get a legend, nor does any report where the showLegend option
# is turned off.
if (((self.reportType == 1) and self.singleLineDisplay) or (self.reportType in [2, 3])) and self.showLegend:
# Skip two lines from the bottom of the report.
Count +=2
# Let's place the legend at 1/3 of the way across the report horizontally.
startX = int((self.Bounds[2] - self.Bounds[0]) / 3.0)
# Let's place the legend below the report content.
startY = self.CalcY(Count)
# To draw a box around the legend, we'll need to track it's end coordinates too.
endX = startX
endY = startY
# For GetTextExtent to work right, we have to make sure the font is set in the graphic context.
# First, define a font for the current font settings
font = wx.Font(self.graphic.fontsize, self.graphic.fontfamily, self.graphic.fontstyle, self.graphic.fontweight)
# Set the font for the graphics context
self.graphic.SetFont(font)
# Add a label for the legend
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
self.graphic.AddTextRight(_("Legend:"), self.Bounds[2] - self.Bounds[0] - startX, self.CalcY(Count - 1) - 7)
else:
self.graphic.AddText(_("Legend:"), startX, self.CalcY(Count - 1) - 7)
endX = startX + 14 + self.graphic.GetTextExtent(_("Legend:"))[0]
# We'll use a 14 x 12 block to show color. Set the line thickness
self.graphic.SetThickness(12)
# Iterate through teh filtered keyword list (which gives the sorted keyword list) ...
for (kwg, kw) in self.filteredKeywordList:
# Determine the color index for this keyword
colourindex = self.keywordColors[(kwg, kw)]
# Set the color of the line, using the color lookup for the appropriate color set
self.graphic.SetColour(colorLookup[colorSet[colourindex]])
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
# Add the color box to the graphic
self.graphic.AddLines([(self.Bounds[2] - self.Bounds[0] - startX, self.CalcY(Count), self.Bounds[2] - self.Bounds[0] - (startX + 14), self.CalcY(Count) + 14)])
# Add the text associating the keyword with the colored line we just created
self.graphic.AddTextRight("%s : %s" % (kwg, kw), self.Bounds[2] - self.Bounds[0] - startX + 12, self.CalcY(Count) - 7)
else:
# Add the color box to the graphic
self.graphic.AddLines([(startX, self.CalcY(Count), startX + 14, self.CalcY(Count) + 14)])
# Add the text associating the keyword with the colored line we just created
self.graphic.AddText("%s : %s" % (kwg, kw), startX + 18, self.CalcY(Count) - 7)
# If the new text extends past the current right-hand boundary ...
if endX < startX + 14 + self.graphic.GetTextExtent("%s : %s" % (kwg, kw))[0]:
# ... note the new right-hand boundary for the box that outlines the legend
endX = startX + 14 + self.graphic.GetTextExtent("%s : %s" % (kwg, kw))[0]
# Note the new bottom boundary for the box that outlines the legend
endY = self.CalcY(Count) + 14
# Increment the line counter
Count += 1
# Set the line color to black and the line thickness to 1 for the legend bounding box
self.graphic.SetColour("BLACK")
self.graphic.SetThickness(1)
# If we are in a Right-To-Left Language ...
if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft:
# Draw the legend bounding box, based on the dimensions we've been tracking.
self.graphic.AddLines([(self.Bounds[2] - self.Bounds[0] - (startX - 6), startY - 24, self.Bounds[2] - self.Bounds[0] - (endX + 6), startY - 24),
(self.Bounds[2] - self.Bounds[0] - (endX + 6), startY - 24, self.Bounds[2] - self.Bounds[0] - (endX + 6), endY - 4),
(self.Bounds[2] - self.Bounds[0] - (endX + 6), endY - 4, self.Bounds[2] - self.Bounds[0] - (startX - 6), endY - 4),
(self.Bounds[2] - self.Bounds[0] - (startX - 6), endY - 4, self.Bounds[2] - self.Bounds[0] - (startX - 6), startY - 24)])
else:
# Draw the legend bounding box, based on the dimensions we've been tracking.
self.graphic.AddLines([(startX - 6, startY - 24, endX + 6, startY - 24), (endX + 6, startY - 24, endX + 6, endY - 4),
(endX + 6, endY - 4, startX - 6, endY - 4), (startX - 6, endY - 4, startX - 6, startY - 24)])
def DrawTimeLine(self, startVal, endVal):
""" Draw the time line on the Series Map graphic """
# Set the line thickness to 3
self.graphic.SetThickness(3)
# Add a horizontal line from X = start to end ay Y = -2, which will be above the data area of the graph
self.graphic.AddLines([(self.CalcX(startVal), self.CalcY(-2), self.CalcX(endVal), self.CalcY(-2))])
# Add Time markers
self.graphic.SetThickness(1)
if 'wxMac' in wx.PlatformInfo:
self.graphic.SetFontSize(11)
else:
self.graphic.SetFontSize(8)
# Add the starting point
X = startVal
# Add the line indicator
self.graphic.AddLines([(self.CalcX(X), self.CalcY(-2) + 1, self.CalcX(X), self.CalcY(-2) + 6)])
# The Percentage Graph needs a Percent label. Otherwise, convert to a time representation.
if self.reportType == 3:
XLabel = "%d%%" % X
else:
XLabel = Misc.TimeMsToStr(X)
# Add the time label.
self.graphic.AddTextCentered(XLabel, self.CalcX(X), self.CalcY(-2) + 5)
# Add the ending point
X = endVal
# Add the line indicator
self.graphic.AddLines([(self.CalcX(X), self.CalcY(-2) + 1, self.CalcX(X), self.CalcY(-2) + 6)])
# The Percentage Graph needs a Percent label. Otherwise, convert to a time representation.
if self.reportType == 3:
XLabel = "%d%%" % X
else:
XLabel = Misc.TimeMsToStr(X)
# Add the time label.
self.graphic.AddTextCentered(XLabel, self.CalcX(X), self.CalcY(-2) + 5)
# Add the first and last Vertical Grid Lines, if appropriate
if self.vGridLines:
# Determine how far down on the graph the vertical axis lines should go.
if self.reportType == 1:
vGridBottom = self.graphic.canvassize[1] - (int(1.75 * max(12, self.barHeight)) + self.whitespaceHeight)
else:
vGridBottom = self.CalcY(self.episodeCount - 1) + 7 + int(self.whitespaceHeight / 2)
# We want Grid Lines in light gray
self.graphic.SetColour('LIGHT GREY')
# Add the line for the Start Value
self.graphic.AddLines([(self.CalcX(self.startTime), self.CalcY(0) - 6 - int(self.whitespaceHeight / 2), self.CalcX(self.startTime), vGridBottom)])
# Add the line for the End Value
self.graphic.AddLines([(self.CalcX(endVal), self.CalcY(0) - 6 - int(self.whitespaceHeight / 2), self.CalcX(endVal), vGridBottom)])
# Reset the graphic color following drawing the Grid Lines
self.graphic.SetColour("BLACK")
# Determine the frequency of scale marks for the time line.
# If we're showing the Percentage Graph ...
if self.reportType == 3:
# We'll use marks at every 20%
numMarks = 5
interval = 20.0
# Otherwise ...
else:
# We'll use the same logic as the Visualization's Time Axis
(numMarks, interval) = self.GetScaleIncrements(endVal - startVal)
# using the incrementation values we just determined ...
for loop in range(1, numMarks):
# ... add the intermediate time marks
X = int(round(float(loop) * interval) + startVal)
# Add the line indicator
self.graphic.AddLines([(self.CalcX(X), self.CalcY(-2) + 1, self.CalcX(X), self.CalcY(-2) + 6)])
# The Percentage Graph needs a Percent label. Otherwise, convert to a time representation.
if self.reportType == 3:
XLabel = "%d%%" % X
else:
XLabel = Misc.TimeMsToStr(X)
# Add the time label.
self.graphic.AddTextCentered(XLabel, self.CalcX(X), self.CalcY(-2) + 5)
# Add Vertical Grid Lines, if appropriate
if self.vGridLines:
# We want Grid Lines in light gray
self.graphic.SetColour('LIGHT GREY')
# Add the Vertical Grid Line
self.graphic.AddLines([(self.CalcX(X), self.CalcY(0) - 6 - int(self.whitespaceHeight / 2), self.CalcX(X), vGridBottom)])
# Reset the graphic color following drawing the Grid Lines
self.graphic.SetColour("BLACK")
def GetKeywordCount(self):
""" Returns the number of keywords in the filtered Keyword List and the size of the image that results """
return (len(self.filteredKeywordList), len(self.filteredKeywordList) * (self.barHeight + self.whitespaceHeight) + 4)
def OnMouseMotion(self, event):
""" Process the movement of the mouse over the Series Map. """
# Get the mouse's current position
x = event.GetX()
y = event.GetY()
# For the Series Keyword Sequence Map ...
if (self.reportType == 1):
# Based on the mouse position, determine the time in the video timeline
time = self.FindTime(x)
# Based on the mouse position, determine what keyword is being pointed to
# We use a different key to mark overlaps depending on whether we're in singleLineDisplay mode or not.
overlapKey = self.FindKeyword(y)
# First, let's make sure we're actually on the data portion of the graph
if (time > 0) and (time < self.MediaLength) and (overlapKey != None) and (overlapKey != '') and (overlapKey != ('', '', '')):
if self.singleLineDisplay:
if 'unicode' in wx.PlatformInfo:
# Encode with UTF-8 rather than TransanaGlobal.encoding because this is a prompt, not DB Data.
prompt = unicode(_("Episode: %s, Time: %s"), 'utf8')
else:
prompt = _("Episode: %s, Time: %s")
# Set the Status Text to indicate the current Episode value
self.SetStatusText(prompt % (overlapKey, Misc.time_in_ms_to_str(time)))
else:
if 'unicode' in wx.PlatformInfo:
# Encode with UTF-8 rather than TransanaGlobal.encoding because this is a prompt, not DB Data.
prompt = unicode(_("Episode: %s, Keyword: %s : %s, Time: %s"), 'utf8')
else:
prompt = _("Episode: %s, Keyword: %s : %s, Time: %s")
# Set the Status Text to indicate the current Keyword and Time values
self.SetStatusText(prompt % (overlapKey[0], overlapKey[1], overlapKey[2], Misc.time_in_ms_to_str(time)))
if (self.keywordClipList.has_key(overlapKey)):
# initialize the string that will hold the names of clips being pointed to
clipNames = ''
# Get the list of Clips that contain the current Keyword from the keyword / Clip List dictionary
clips = self.keywordClipList[overlapKey]
# For the single-line display ...
if self.singleLineDisplay:
# Initialize a string for the popup to show
clipNames = ''
currentRow = None
# Get a list of the Lookup dictionary keys. These keys are top Y-coordinate values
keyvals = self.epNameKWGKWLookup.keys()
# Sort the keys
keyvals.sort()
# Iterate through the keys
for yVal in keyvals:
# We need the largest key value that doesn't exceed the Mouse's Y coordinate
if yVal < y:
currentRow = self.epNameKWGKWLookup[yVal]
# Once the key val exceeds the Mouse position, we can stop looking.
else:
break
# Initialize the Episode Name, Keyword Group, and Keyword variables.
epName = KWG = KW = ''
# If we have a data record to look at ...
if currentRow != None:
# Iterate through all the second-level lookup keys, the X ranges ...
for key in currentRow.keys():
# If the horizontal mouse coordinate falls in the X range of a record ...
if (x >= key[0]) and (x < key[1]):
# ... iterate through the records ...
for clipKWRec in currentRow[key]:
# ... extract the Lookup data for the record ...
(epName, KWG, KW, length) = clipKWRec
# ... if it's not the first record in the list, add a comma separator ...
if clipNames != '':
clipNames += ', '
# ... and add the lookup data to the mouseover text string variable
clipNames += "%s : %s (%s)" % (KWG, KW, Misc.time_in_ms_to_str(length))
# If we have the Series Keyword Sequence Map multi-line display ...
else:
# Iterate through the Clip List ...
for (objType, startTime, endTime, clipNum, clipName) in clips:
# If the current Time value falls between the Clip's StartTime and EndTime ...
if (startTime < time) and (endTime > time):
# ... calculate the length of the Clip ...
clipLen = endTime - startTime
# ... and add the Clip Name and Length to the list of Clips with this Keyword at this Time
# First, see if the list is empty.
if clipNames == '':
# If so, just add the keyword name and time
clipNames = "%s (%s)" % (clipName, Misc.time_in_ms_to_str(clipLen))
else:
# ... add the keyword to the end of the list
clipNames += ', ' + "%s (%s)" % (clipName, Misc.time_in_ms_to_str(clipLen))
# If any clips are found for the current mouse position ...
if (clipNames != ''):
# ... add the Clip Names to the ToolTip so they will show up on screen as a hint
self.graphic.SetToolTipString(clipNames)
else:
# ... set the status text to a blank
self.SetStatusText('')
# The Series Keyword Bar Graph and the Series Keyword Percentage Graph both work the same way
elif self.reportType in [2, 3]:
# Initialize the current Row to None, in case we don't find data under the cursor
currentRow = None
# Get a list of the Lookup dictionary keys. These keys are top Y-coordinate values
keyvals = self.epNameKWGKWLookup.keys()
# Sort the keys
keyvals.sort()
# Iterate through the keys
for yVal in keyvals:
# We need the largest key value that doesn't exceed the Mouse's Y coordinate
if yVal < y:
currentRow = self.epNameKWGKWLookup[yVal]
# Once the key val exceeds the Mouse position, we can stop looking.
else:
break
# Initialize the Episode Name, Keyword Group, and Keyword variables.
epName = KWG = KW = ''
# If we have a data record to look at ...
if currentRow != None:
# Iterate through all the second-level lookup keys, the X ranges ...
for key in currentRow.keys():
# If the horizontal mouse coordinate falls in the X range of a record ...
if (x >= key[0]) and (x < key[1]):
# ... extract the Lookup data for the record. There aren't overlapping records to deal with here.
(epName, KWG, KW, length) = currentRow[key]
# If a data record was found ...
if KWG != '':
if 'unicode' in wx.PlatformInfo:
# Encode with UTF-8 rather than TransanaGlobal.encoding because this is a prompt, not DB Data.
prompt = unicode(_("Episode: %s, Keyword: %s : %s"), 'utf8')
else:
prompt = _("Episode: %s, Keyword: %s : %s")
# ... set the Status bar text:
self.SetStatusText(prompt % (epName, KWG, KW))
# If we have a Series Keyword Bar Graph ...
if self.reportType == 2:
# ... report Keyword info and Clip Length.
self.graphic.SetToolTipString("%s : %s (%s)" % (KWG, KW, Misc.time_in_ms_to_str(length)))
# If we have a Series Keyword Percentage Graph ...
elif self.reportType == 3:
# ... report Keyword and Percentage information
self.graphic.SetToolTipString("%s : %s (%3.1f%%)" % (KWG, KW, length))
# If we've got no data ...
else:
# ... reflect that in the Status Text.
self.SetStatusText('')
def OnLeftDown(self, event):
""" Left Mouse Button Down event """
# Pass the event to the parent
event.Skip()
def OnLeftUp(self, event):
""" Left Mouse Button Up event. Triggers the load of a Clip. """
# Note if the Control key is pressed
ctrlPressed = wx.GetKeyState(wx.WXK_CONTROL)
# Pass the event to the parent
event.Skip()
# Get the mouse's current position
x = event.GetX()
y = event.GetY()
# Based on the mouse position, determine the time in the video timeline
time = self.FindTime(x)
# Based on the mouse position, determine what keyword is being pointed to
kw = self.FindKeyword(y)
# Create an empty Dictionary Object for tracking Clip data
clipNames = {}
# First, let's make sure we're actually on the data portion of the graph
if (time > 0) and (time < self.MediaLength) and (kw != None) and (self.keywordClipList.has_key(kw)):
# If we have a Series Keyword Sequence Map ...
# (The Bar Graph and Percentage Graph do not have defined Click behaviors!)
if self.reportType == 1:
if 'unicode' in wx.PlatformInfo:
prompt = unicode(_("Episode: %s, Keyword: %s : %s, Time: %s"), 'utf8')
else:
prompt = _("Episode: %s, Keyword: %s : %s, Time: %s")
# Set the Status Text to indicate the current Keyword and Time values
self.SetStatusText(prompt % (kw[0], kw[1], kw[2], Misc.time_in_ms_to_str(time)))
# Get the list of Clips that contain the current Keyword from the keyword / Clip List dictionary
clips = self.keywordClipList[kw]
# Iterate through the Clip List ...
for (objType, startTime, endTime, clipNum, clipName) in clips:
# If the current Time value falls between the Clip's StartTime and EndTime ...
if (startTime <= time) and (endTime >= time):
# Check to see if this is a duplicate Clip
if clipNames.has_key(clipName) and (clipNames[clipName] != clipNum):
# If so, we need to count the number of duplicates.
# NOTE: This is not perfect. If the Clip Name is a shorter version of another Clip Name, the count
# will be too high.
tmpList = clipNames.keys()
# Initialize the counter to 1 so our end number will be 1 higher than the number counted
cnt = 1
# iterate through the list
for cl in tmpList:
# If we have a match ...
if cl.find(clipName) > -1:
# ... increment the counter
cnt += 1
# Add the clipname and counter to the Clip Names dictionary
clipNames["%s (%d)" % (clipName, cnt)] = (objType, clipNum)
else:
# Add the Clip Name as a Dictionary key pointing to the Clip Number
clipNames[clipName] = (objType, clipNum)
# If only 1 Item is found ...
if len(clipNames) == 1:
# ... load that clip by looking up the clip's number
self.parent.KeywordMapLoadItem(clipNames[clipNames.keys()[0]][0], clipNames[clipNames.keys()[0]][1], ctrlPressed)
# If left-click, close the Series Map. If not, don't!
if event.LeftUp():
# Close the Series Map
self.CloseWindow(event)
# If more than one Clips are found ..
elif len(clipNames) > 1:
# Use a wx.SingleChoiceDialog to allow the user to make the choice between multiple clips here.
dlg = wx.SingleChoiceDialog(self, _("Which Clip would you like to load?"), _("Select a Clip"),
clipNames.keys(), wx.CHOICEDLG_STYLE)
# If the user selects an Item and click OK ...
if dlg.ShowModal() == wx.ID_OK:
# ... load the selected clip
self.parent.KeywordMapLoadItem(clipNames[dlg.GetStringSelection()][0], clipNames[dlg.GetStringSelection()][1], ctrlPressed)
# Destroy the SingleChoiceDialog
dlg.Destroy()
# If left-click, close the Series Map. If not, don't!
if event.LeftUp():
# Close the Series Map
self.CloseWindow(event)
# If the user selects Cancel ...
else:
# ... destroy the SingleChoiceDialog, but that's all
dlg.Destroy()
| sanyaade-mediadev/Transana | LibraryMap.py | Python | gpl-2.0 | 137,482 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2014 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# ([email protected], [email protected]) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## informes.py - Ventana que lanza los informes en PDF.
###################################################################
## NOTAS:
##
###################################################################
## Changelog:
## 29 de noviembre de 2005 -> Inicio
## 30 de noviembre de 2005 -> 50% funcional
###################################################################
## TODO:
## - Mostrar opciones de filtro de datos a imprimir
##
###################################################################
import pygtk
pygtk.require('2.0')
import os
import sys
import tempfile
from formularios import utils
from informes.geninformes import give_me_the_name_baby
def abrir_pdf(pdf):
"""
Ejecuta una aplicación para visualizar el pdf en pantalla.
Si es MS-Windows tiro de adobre acrobat (a no ser que encuentre
algo mejor, más portable y empotrable). Si es UNIX-like lanzo
el evince, que es más completito que el xpdf.
Ni que decir tiene que todo eso debe instalarse aparte.
"""
if not pdf or pdf is None:
return
# TODO: Problemón. Al ponerle el ampersand para mandarlo a segundo plano,
# sh siempre devuelve 0 como salida del comando, así que no hay manera de
# saber cuándo se ha ejecutado bien y cuándo no.
if os.name == 'posix':
# OJO(Diego) Lo he cambiado por un problema de dependencias y el evince
if not ((not os.system('evince "%s" &' % pdf)) or \
(not os.system('acroread "%s" &' % pdf)) or \
(not os.system('xdg-open "%s" &' % pdf)) or \
(not os.system('gnome-open "%s" &' % pdf)) or \
(not os.system('xpdf "%s" &' % pdf))):
utils.dialogo_info(titulo = "VISOR PDF NO ENCONTRADO",
texto = "No se encontró evince, acroread ni xpdf en el sistema.\nNo fue posible mostrar el archivo %s." % (pdf))
else:
# OJO: Esto no es independiente de la plataforma:
os.startfile(pdf) # @UndefinedVariable
def abrir_csv(csv, ventana_padre = None):
"""
Si la plataforma es MS-Windows abre el archivo con la aplicación
predeterminada para los archivos CSV (por desgracia me imagino que
MS-Excel). Si no, intenta abrirlo con OpenOffice.org Calc.
"""
# TODO: Problemón. Al ponerle el ampersand para mandarlo a segundo plano, sh siempre devuelve 0 como salida del comando,
# así que no hay manera de saber cuándo se ha ejecutado bien y cuándo no.
if sys.platform != 'win32': # Más general que os.name (que da "nt" en
# los windows 2000 de las oficinas).
try:
res = os.system('xdg-open "%s" &' % csv)
assert res == 0
except AssertionError:
if not ( (not os.system('oocalc2 "%s" || oocalc "%s" &'%(csv,csv)))
or (not os.system('oocalc "%s" &' % csv))
):
utils.dialogo_info(titulo = "OOO NO ENCONTRADO",
texto = "No se encontró OpenOffice.org en el sistema.\nNo fue posible mostrar el archivo %s." % (csv),
padre = ventana_padre)
else:
# OJO: Esto no es independiente de la plataforma:
os.startfile(csv) # @UndefinedVariable
def mandar_a_imprimir_con_ghostscript(fichero, rotate = False):
"""
Lanza un trabajo de impresión a través de acrobat reader.
Usa parámetros no documentados y oficialmente no soportados
por acrobat. Esta función es temporal, hasta que encuentre
un visor/impresor de PDF desde línea de comandos.
Win-only. No funciona en posix ni aún teniendo el reader
para esa plataforma (creo).
NO USAR CON ROLLOS: No cuadra bien la etiqueta y además deja abierta la
ventana después.
Impresora CAB harcoded (y además no es el nombre por defecto de la
impresora).
¡MENTIRA COCHINA! Lo hace a través de Ghostscript.
"""
if rotate:
from lib.PyPDF2 import PyPDF2
fichrotado = os.path.join(tempfile.gettempdir(),
"gs_rotated_%s.pdf" % give_me_the_name_baby()
)
rotado = PyPDF2.PdfFileWriter()
original = PyPDF2.PdfFileReader(open(fichero, "rb"))
for page in range(original.getNumPages()):
rotado.addPage(original.getPage(page).rotateClockwise(270))
rotado.write(open(fichrotado, "wb"))
fichero = fichrotado
# OJO: Ruta al reader harcoded !!!
# comando = """"C:\\Archivos de programa\\Adobe\\Acrobat 6.0\\Reader\\AcroRd32.exe" /t "%s" GEMINI2 """ % (fichero)
# comando = """start /B AcroRd32 /t "%s" CAB """ % (fichero)
# ## OBSOLETO. Ahora usamos impresoras TSC
# comando = """gswin32c.exe -dQueryUser=3 -dNoCancel -dNOPAUSE -dBATCH"""\
# """ -sDEVICE=mswinpr2 -sOutputFile="%%printer%%CAB" %s """ % (
# fichero)
comando = """gswin32c.exe -dQueryUser=3 -dNoCancel -dNOPAUSE -dBATCH"""\
""" -sDEVICE=mswinpr2 """\
"""-sOutputFile="%%printer%%TSC TTP-246M Pro" %s """ % (fichero)
# NOTA: Necesita que:
# 1.- La impresora CAB esté como predeterminada en la "carpeta"
# impresoras de Windows.
# 2.- Tenga la configuración adecuada por defecto (apaisado, tamaño de
# etiqueta, etc.
# 3.- gs esté en el PATH (añadiendo C:\Archivos de programa...\bin en la
# variable de entorno PATH desde las propiedades avanzadas de Mi PC.)
if os.system(comando):
print "No se pudo hacer la impresión directa. Lanzo el visor."
abrir_pdf(fichero)
def mandar_a_imprimir_con_foxit(fichero):
"""
Lanza un trabajo de impresión a través de foxit reader o
LPR si el sistema es UNIX.
OJO: Siempre manda a la impresora por defecto.
"""
import time
time.sleep(1) # Pausa para evitar que el PDF aún no esté en disco.
if os.name == "posix":
comando = """lpr %s""" % (fichero)
else:
# OJO: Ruta al reader harcoded !!!
comando = """"C:\Archivos de programa\Foxit Software\Foxit Reader\Foxit Reader.exe" /p %s """ % (fichero)
# print comando
if os.system(comando):
print "No se pudo hacer la impresión directa con:\n%s\n\nLanzo el visor." % comando
abrir_pdf(fichero)
def get_ruta_ghostscript():
"""
Devuelve la ruta al ejecutable gswin32.exe.
Si no lo encuentra, devuelve None.
"""
ruta = None
ruta_por_defecto = os.path.join("C:\\", "Archivos de programa", "gs", "gs8.54", "bin", "gswin32c.exe")
if os.path.exists(ruta_por_defecto):
ruta = ruta_por_defecto
else:
pass
# TODO: Debería buscar la ruta con os.walk y tal.
return ruta
def imprimir_con_gs(fichero, impresora = None, blanco_y_negro = False):
"""
Imprime el fichero (PDF o PostScript) a través de GhostScript.
Si impresora es None, imprime sin intervención del usuario en la impresora
por defecto.
¡SOLO PARA SISTEMAS MS-WINDOWS!
"""
if os.name == "posix":
abrir_pdf(fichero)
else:
# Anoto aquí las impresoras que hay rulando, aunque no se use.
impresoras = {'oficina': ("RICOH Aficio 1224C PCL 5c", "OFICINA"), # @UnusedVariable
'etiquetas': ("TSC TTP-246M Pro", "CAB",
"CAB MACH 4 200DPI", "GEMINI2")}
# XXX
ruta_a_gs = get_ruta_ghostscript()
if ruta_a_gs == None:
print "informes.py (imprimir_con_gs): GhostScript no encontrado."
abrir_pdf(fichero)
else:
if impresora == None:
por_defecto = " -dQueryUser=3 "
impresora = ""
else:
por_defecto = ""
impresora = ' -sOutputFile="\\spool\%s" ' % (impresora)
if blanco_y_negro:
blanco_y_negro = " -dBitsPerPixel=1 "
else:
blanco_y_negro = ""
comando = r'""%s" %s -dNOPAUSE -dBATCH -sDEVICE=mswinpr2 %s -dNoCancel %s "%s""' \
% (ruta_a_gs, por_defecto, impresora, blanco_y_negro, fichero)
try:
salida = os.system(comando)
except:
salida = -1
if salida != 0 and salida != 1: #gs devuelve 1 si le da a Cancelar.
print "informes.py (imprimir_con_gs): No se pudo imprimir. "\
"Lanzo el visor."
abrir_pdf(fichero)
if salida == 1: # Si cancela la impresión a lo mejor quiere
# verlo en pantalla.
abrir_pdf(fichero)
def que_simpatico_es_el_interprete_de_windows(comando, parametro):
"""
El os.system llama a cmd /C y/o /K, y el cmd.exe es muy simpático y se
comporta como le da la gana. No sabe ni escapar los espacios de sus propias rutas,
por lo que como intentes ejecutar algo dentro de Archivos de programa... total,
que hay que encerrar todo entre comillas y otra vez entre comillas.
mi nota: PUTAMIERDA
ver: http://jason.diamond.name/weblog/2005/04/14/dont-quote-me-on-this
"""
command = '"%s" "%s"' % (comando, parametro)
if sys.platform[:3] == 'win':
command = '"%s"' % command
os.system(command)
## ---------------------- Rutina principal ------------------------
if __name__=='__main__':
if len(sys.argv) < 1:
print "ERROR: No se pasó el nombre de ningún informe"
sys.exit(0)
from informes import geninformes
informe = ' '.join(sys.argv[1:])
if informe == 'Clientes y consumo':
nombrepdf = geninformes.pedidosCliente()
elif informe == 'Albaranes por cliente':
nombrepdf = geninformes.albaranesCliente()
elif informe == 'Compras':
nombrepdf = geninformes.compras()
elif informe == 'Ventas':
nombrepdf = geninformes.ventas()
elif informe == 'Vencimientos pendientes de pago':
# nombrepdf = geninformes.vecimientosPendientesDePago()
utils.dialogo_info('FUNCIONALIDAD NO IMPLEMENTADA', 'Este informe aún no se puede generar.')
sys.exit(0)
elif informe == 'Vencimientos pendientes de pagar':
utils.dialogo_info('FUNCIONALIDAD NO IMPLEMENTADA', 'Este informe aún no se puede generar.')
sys.exit(0)
# nombrepdf = geninformes.()
#===========================================================================
# elif informe == 'Productos bajo mínimo':
# nombrepdf = geninformes.productosBajoMinimos()
# elif informe == 'Albaranes por facturar':
# nombrepdf = geninformes.albaranesPorFacturar()
#===========================================================================
elif informe == 'Albaranes facturados':
nombrepdf = geninformes.albaranesFacturados()
elif informe == 'Existencias':
nombrepdf = geninformes.existencias()
elif informe == 'Incidencias':
nombrepdf = geninformes.incidencias()
elif informe == 'Informes de laboratorio':
utils.dialogo_info('FUNCIONALIDAD NO IMPLEMENTADA', 'Este informe aún no se puede generar.')
sys.exit(0)
# nombrepdf = geninformes.()
elif informe == 'Comparativa de cobros y pagos':
utils.dialogo_info('FUNCIONALIDAD NO IMPLEMENTADA', 'Este informe aún no se puede generar.')
sys.exit(0)
# nombrepdf = geninformes.()
else:
print "El informe %s no existe" % informe
sys.exit(0)
abrir_pdf(nombrepdf)
#os.unlink(nombrepdf)
# Si lo borro no va a dar tiempo ni a que lo abra el evince. Que se
# machaque la siguiente vez que se ejecute el mismo listado y punto.
# (Hasta que se me ocurra algo mejor)
| pacoqueen/ginn | ginn/formularios/reports.py | Python | gpl-2.0 | 13,563 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_exams.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| Lapin-Blanc/django_exams | manage.py | Python | gpl-2.0 | 255 |
from restaurant.models import Resa, Fournisseur, Fourniture, Plat, Menu, ResaResto
from django.contrib import admin
class ResaRestoAdmin(admin.ModelAdmin):
exclude = ('reservationEcrasee', 'nbPassagers', 'nbPiquesNiques')
admin.site.register(Resa)
admin.site.register(Fournisseur)
admin.site.register(Fourniture)
admin.site.register(Plat)
admin.site.register(Menu)
admin.site.register(ResaResto, ResaRestoAdmin)
| simonpessemesse/seguinus | restaurant/admin.py | Python | gpl-2.0 | 434 |
from bs4 import BeautifulSoup
import re
import requests
import os
link_list = [ ]
base_url = "http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=CNAT&type=10-Q&datea=20140930&owner=exclude&output=xml&count=10"
r = requests.get(base_url)
data = r.text
soup = BeautifulSoup(data)
for link in soup.find_all('filinghref'):
l1 = str(link)
l2 = l1.replace('<filinghref>','')
link_list = l2.replace('</filinghref>','')
r = requests.get(link_list)
data = r.text
soup = BeautifulSoup(data)
listr = [ ]
for link in soup.find_all('a'):
listr.append(link)
for l in listr:
print l
listr[9]
new_link = listr[9]
n2 = str(new_link)
n3 = n2.replace('<a href="','http://www.sec.gov')
n4 = n3.partition('"')
n5 = n4[0]
source_doc = str(n5)
print source_doc
| drkbyte/edgar | edgar.py | Python | gpl-2.0 | 764 |
#!/usr/bin/python
"""
Author: Deokwooo Jung [email protected]
======================================================================
Learning and Visualizing the BMS sensor-time-weather data structure
======================================================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
The following analysis steps are designed and to be executed.
Data Pre-processing
--------------------------
- Data Retrieval and Standardization
- Outlier Detection
- Interpolation
Data Summarization
--------------------------
- Data Transformation
- Sensor Clustering
Model Discovery Bayesian Network
--------------------------
- Automatic State Classification
- Structure Discovery and Analysis
"""
# General Modules
# To force float point division
from __future__ import division
# Custom libraries
from quasar_reader import *
import datetime as dt
import mytool as mt
from shared_constants import *
import time
##################################################################
# Processing Configuraiton Settings
##################################################################
#TODO : this whole section of selecting a building should be wrapped in param
# Building keyword.
bldg_key = 'GW2'
pname_key = '_POWER_'
# Setting Analysis period where ANS_START_T and ANS_START_T are the starting and
# and the ending timestamp.
ANS_START_T = dt.datetime(2013, 6, 1, 0)
ANS_END_T = dt.datetime(2013, 12, 30, 0)
# Setting for analysis time interval where all BEMS and weather data is aligned
# for a slotted time line quantized by TIMELET_INV.
TIMELET_INV = dt.timedelta(minutes=15)
print TIMELET_INV, 'time slot interval is set for this data set !!'
print '-------------------------------------------------------------------'
print "Clean up old output..."
mt.remove_all_files(FIG_DIR)
FILEHASH = \
{'2e43475f-5048-4153-4531-5f4143544956': 'GW2.CG_PHASE1_ACTIVE_POWER_M'
,'2e43475f-5048-4153-4532-5f504f574552': 'GW2.CG_PHASE2_POWER_FACTOR_M'
,'2e43475f-5048-4153-4533-5f5245414354': 'GW2.CG_PHASE3_REACTIVE_POWER_M'
,'2e43475f-5048-4153-4531-5f504f574552': 'GW2.CG_PHASE1_POWER_FACTOR_M'
,'2e43475f-5048-4153-4532-5f5245414354': 'GW2.CG_PHASE2_REACTIVE_POWER_M'
,'2e43475f-5359-5354-454d-5f4143544956': 'GW2.CG_SYSTEM_ACTIVE_POWER_M'
,'2e43475f-5048-4153-4531-5f5245414354': 'GW2.CG_PHASE1_REACTIVE_POWER_M'
,'2e43475f-5048-4153-4533-5f4143544956': 'GW2.CG_PHASE3_ACTIVE_POWER_M'
,'2e43475f-5359-5354-454d-5f504f574552': 'GW2.CG_SYSTEM_POWER_FACTOR_M'
,'2e43475f-5048-4153-4532-5f4143544956': 'GW2.CG_PHASE2_ACTIVE_POWER_M'
,'2e43475f-5048-4153-4533-5f504f574552': 'GW2.CG_PHASE3_POWER_FACTOR_M'
,'2e43475f-5359-5354-454d-5f5245414354': 'GW2.CG_SYSTEM_REACTIVE_POWER_M'}
#----------------------------- DATA PRE-PROCESSING -----------------------------
# Retrieving a set of sensors with specified key
print '#' * 80
print 'DATA PRE-PROCESSING FROM QUASAR WITH KEY ', bldg_key, '...'
print '#' * 80
start_time = int(time.mktime(ANS_START_T.timetuple()))
end_time = int(time.mktime(ANS_END_T.timetuple()))
read_sensor_data(FILEHASH, start_time, end_time, TIMELET_INV, bldg_key, pname_key)
| TinyOS-Camp/DDEA-DEV | DDEA-QUASAR/ddea.py | Python | gpl-2.0 | 3,464 |
# coding: utf-8
# -------------------------------------------------------------------------------
# Name: sfp_names
# Purpose: Identify human names in content fetched.
#
# Author: Steve Micallef <[email protected]>
#
# Created: 24/03/2014
# Copyright: (c) Steve Micallef
# Licence: GPL
# -------------------------------------------------------------------------------
import re
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_names(SpiderFootPlugin):
"""Name Extractor:Footprint:Attempt to identify human names in fetched content."""
# Default options
opts = {
'algotune': 50,
'emailtoname': True
}
# Option descriptions
optdescs = {
'algotune': "A value between 0-100 to tune the sensitivity of the name finder. Less than 40 will give you a lot of junk, over 50 and you'll probably miss things but will have less false positives.",
'emailtoname': "Convert e-mail addresses in the form of firstname.surname@target to names?"
}
results = dict()
d = None
n = None
fq = None
def builddict(self, files):
wd = dict()
for f in files:
wdct = open(self.sf.myPath() + "/ext/ispell/" + f, 'r')
dlines = wdct.readlines()
for w in dlines:
w = w.strip().lower()
wd[w.split('/')[0]] = True
return wd.keys()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
d = self.builddict(["english.0", "english.2", "english.4",
"british.0", "british.2", "british.4",
"american.0", "american.2", "american.4"])
self.n = self.builddict(["names.list"])
self.fq = ["north", "south", "east", "west", "santa", "san", "blog", "sao"]
# Take dictionary words out of the names list to keep things clean
self.d = list(set(d) - set(self.n))
# Clear / reset any other class member variables here
# or you risk them persisting between threads.
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
# * = be notified about all events.
def watchedEvents(self):
return ["TARGET_WEB_CONTENT", "EMAILADDR"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["HUMAN_NAME"]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
if eventName == "EMAILADDR" and self.opts['emailtoname']:
if "." in eventData.split("@")[0]:
name = " ".join(map(unicode.capitalize, eventData.split("@")[0].split(".")))
# Notify other modules of what you've found
evt = SpiderFootEvent("HUMAN_NAME", name, self.__name__, event)
self.notifyListeners(evt)
return None
# Stage 1: Find things that look (very vaguely) like names
rx = re.compile("([A-Z][a-z�������������]+)\s+.?.?\s?([A-Z][�������������a-zA-Z\'\-]+)")
m = re.findall(rx, eventData)
for r in m:
# Start off each match as 0 points.
p = 0
notindict = False
# Shouldn't encounter "Firstname's Secondname"
first = r[0].lower()
if first[len(first) - 2] == "'" or first[len(first) - 1] == "'":
continue
# Strip off trailing ' or 's
secondOrig = r[1].replace("'s", "")
secondOrig = secondOrig.rstrip("'")
second = r[1].lower().replace("'s", "")
second = second.rstrip("'")
# If both words are not in the dictionary, add 75 points.
if first not in self.d and second not in self.d:
p += 75
notindict = True
# If the first word is a known popular first name, award 50 points.
if first in self.n:
p += 50
# If either word is 2 characters, subtract 50 points.
if len(first) == 2 or len(second) == 2:
p -= 50
# If the first word is in our cue list, knock out more points.
if first in self.fq:
p -= 50
# If the first word is in the dictionary but the second isn't,
# subtract 40 points.
if not notindict:
if first in self.d and second not in self.d:
p -= 20
# If the second word is in the dictionary but the first isn't,
# reduce 20 points.
if first not in self.d and second in self.d:
p -= 40
name = r[0] + " " + secondOrig
if p > self.opts['algotune']:
# Notify other modules of what you've found
evt = SpiderFootEvent("HUMAN_NAME", name, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_names class
| Reality9/spiderfoot | modules/sfp_names.py | Python | gpl-2.0 | 5,507 |
# Copyright (C) 2011 Michal Zielinski ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import select
import time
import graphics
import freeciv.client
from freeciv import progress
from freeciv import monitor
import features
from freeciv.client import _freeciv as freeciv
import dialogs
import common
import window
import actions
import diplomacy
import city
import key
import misc
net_socket = -1
client = None
main = None
features.add_feature('debug.outwindow', default=False, type=bool)
class ConnectionError(Exception):
pass
@freeciv.register
def ui_main():
freeciv.func.init_things()
freeciv.func.init_mapcanvas_and_overview()
main()
def tick():
pass
@freeciv.register
def ui_init():
common.init()
@freeciv.register
def add_net_input(sockid):
global net_socket
net_socket = sockid
@freeciv.register
def remove_net_input():
global net_socket
net_socket = -1
@freeciv.register
def real_output_window_append(text, tags, connid):
if client:
client.console_line(text)
else:
print 'outwindow: %s\n' % text,
@freeciv.register
def real_meswin_dialog_update():
count = freeciv.func.meswin_get_num_messages()
client.update_meswin([ freeciv.func.meswin_get_message(i) for i in xrange(count) ])
@freeciv.register
def real_economy_report_dialog_update():
client.update_taxes()
@freeciv.register
def update_turn_done_button(restore):
client._set_turn_button_state(restore)
@freeciv.register
def handle_authentication_req(type, prompt):
client.handle_authentication_req(prompt)
@freeciv.register
def popup_notify_goto_dialog(title, text, a, b):
client.popup_notify(text)
class Client(object):
def __init__(self, no_quit=False):
global client
client = self
self.no_quit = no_quit
self.next_time = time.time()
self.cursor_pos = (0, 0)
self.draw_patrol_lines = False
self.out_window_callback = None
self.turn_button_flip = False
self.meetings = {}
self.additional_server_line_callback = None
def server_line_callback(self, line):
if self.additional_server_line_callback:
self.additional_server_line_callback(line)
def tick(self):
if self.next_time >= time.time():
sleep_time = freeciv.func.real_timer_callback()
self.next_time = time.time() + sleep_time
freeciv.func.call_idle_callbacks()
if net_socket != -1:
r, w, x = select.select([net_socket], [], [], 0.01)
if r:
freeciv.func.input_from_server(net_socket)
else:
time.sleep(0.03)
#window.draw_cursor(cursor_pos)
def console_line(self, text):
if self.out_window_callback:
self.out_window_callback(text)
monitor.log('outwindow', text)
if features.get('debug.outwindow'):
print '[OutWindow]', text
def end_turn(self):
freeciv.func.key_end_turn()
def chat(self, text):
freeciv.func.send_chat(text)
def draw_map(self, surf, pos):
mapview = freeciv.func.get_mapview_store()
surf.blit(mapview, pos)
def get_mapview_store(self):
return freeciv.func.get_mapview_store()
def draw_overview(self, surf, pos, scale):
dest_surf = window.overview_surface.scale(scale)
surf.blit(dest_surf, pos)
def overview_click(self, x, y):
freeciv.func.py_overview_click(x, y)
def set_map_size(self, size):
freeciv.func.map_canvas_resized(*size)
def update_map_canvas_visible(self):
freeciv.func.update_map_canvas_visible()
def get_overview_size(self):
return window.overview_surface.get_size()
def mouse_motion(self, pos):
window.mouse_pos = self.cursor_pos = pos
x, y = pos
freeciv.func.update_line(x, y)
freeciv.func.control_mouse_cursor_pos(x, y)
def unit_select_dialog_popup(self, units):
print 'unit_select_dialog', units
def key_event(self, type, keycode):
key.key(type, keycode)
def update_meswin(self, lines):
print 'meswin updated'
print '\t' + '\n\t'.join(map(repr, lines))
def connect_to_server(self, username, host, port):
bufflen = 512
buff = ' ' * bufflen
result = freeciv.func.connect_to_server(username, host, port, buff, bufflen)
buff = buff.rstrip(' ').rstrip('\0')
if result == -1:
raise ConnectionError(buff)
def escape(self):
if self.draw_patrol_lines:
self.key_event(graphics.const.KEYDOWN, graphics.const.K_ESCAPE)
self.draw_patrol_lines = False
else:
self.quit()
def disable_menus(self):
print 'disable_menus'
def get_unit_in_focus(self):
units = freeciv.func.get_units_in_focus()
if units:
return actions.Unit(units[0])
else:
return None
def disconnect(self):
if not self.no_quit:
self.chat('/quit')
freeciv.func.disconnect_from_server()
global client
client = None
def get_tax_values(self):
lux = freeciv.func.get_tax_value(True)
science = freeciv.func.get_tax_value(False)
tax = 100 - lux - science
return tax, lux, science
def set_tax_values(self, tax, lux, science):
freeciv.func.set_tax_values(tax, lux, science)
def get_gold(self):
return freeciv.func.get_gold_amount()
def get_gold_income(self):
return freeciv.func.get_gold_income()
def get_current_tech(self):
return freeciv.func.get_current_tech()
def get_techs(self, level=11):
return map(Tech, freeciv.func.get_techs(level))
def get_all_techs(self):
return map(Tech, freeciv.func.get_advances())
def get_current_year_name(self):
return freeciv.func.get_current_year_name()
def get_governments(self):
return map(Gov, freeciv.func.get_governments())
def get_cities(self):
return map(city.City, freeciv.func.get_cities())
def _set_turn_button_state(self, restore):
if not freeciv.func.get_turn_done_button_state():
return
if (restore and self.turn_button_flip) or not restore:
self.turn_button_flip = not self.turn_button_flip
self.set_turn_button_state(not self.turn_button_flip)
def set_turn_button_state(self, enabled):
pass
def authenticate(self, password):
freeciv.func.authenticate(password)
def get_players(self):
return diplomacy.get_players()
def get_player_with_id(self, id):
return diplomacy.Player(freeciv.func.player_by_number(id))
def get_playing(self):
return diplomacy.Player(freeciv.func.get_playing())
def toggle_full_labels(self):
freeciv.func.request_toggle_city_full_bar()
def get_caravan_options(self, unit, homecity, destcity):
can_establish, can_trade, can_wonder = \
freeciv.func.py_get_caravan_options(unit.handle, homecity.handle, destcity.handle)
return can_establish, can_trade, can_wonder
def save_and_get_name(self, callback):
def line_callback(line):
prefix = 'Game saved as '
if line.startswith(prefix):
self.additional_server_line_callback = None
callback(line[len(prefix):].strip())
self.additional_server_line_callback = line_callback
self.chat('/save')
class Gov(object):
def __init__(self, (index, name, changable)):
self.name = name
self.index = index
self.changable = changable
def change_to(self):
freeciv.func.change_government(self.index)
class Tech(object):
def __init__(self, (index, name, steps)):
self.index = index
self.name = name
self.steps = steps
def set_as_goal(self):
freeciv.func.set_tech_goal(self.index)
#def get_research_state(self):
# return freeciv.func.get_invention_state(self.index)
def set_as_current(self):
freeciv.func.set_tech_research(self.index)
def get_nations(group=-1):
return [
(freeciv.func.get_name_of_nation_id(i),
freeciv.func.city_style_of_nation_id(i), i)
for i in freeciv.func.get_list_of_nations_in_group(group) ]
def get_nations_groups():
nation_group_count = freeciv.func.nation_group_count()
nations_groups = []
for group_id in range(nation_group_count):
nations_groups.append(freeciv.func.get_name_of_nation_group_id(group_id))
nations_groups.append('All')
return nations_groups
def set_nationset(value):
freeciv.func.option_str_set_by_name('nationset', value)
def get_nation_name(i):
return freeciv.func.get_name_of_nation_id(i)
| javaxubuntu/freeciv-android | lib/freeciv/client/__init__.py | Python | gpl-2.0 | 9,292 |
from __future__ import unicode_literals
from django.db import models
from wagtail.admin.edit_handlers import (
FieldPanel, MultiFieldPanel, PageChooserPanel
)
from wagtail.documents.edit_handlers import DocumentChooserPanel
class AbstractLinkFields(models.Model):
"""Abstract class for link fields."""
link_document = models.ForeignKey('wagtaildocs.Document', blank=True,
null=True, related_name='+',
on_delete=models.SET_NULL
)
link_external = models.URLField('External link', blank=True, null=True
)
link_page = models.ForeignKey('wagtailcore.Page', blank=True,
null=True, related_name='+',
on_delete=models.SET_NULL)
panels = [
DocumentChooserPanel('link_document'),
FieldPanel('link_external'),
PageChooserPanel('link_page')
]
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
class Meta:
abstract = True
class AbstractRelatedLink(AbstractLinkFields):
"""Abstract class for related links."""
title = models.CharField(max_length=256, help_text='Link title')
panels = [
FieldPanel('title'),
MultiFieldPanel(AbstractLinkFields.panels, 'Link')
]
class Meta:
abstract = True
| kingsdigitallab/shakespeare400-django | cms/models/links.py | Python | gpl-2.0 | 1,587 |
#!/usr/bin/python2
import os
import sys
if 'hostusb' in os.environ:
sys.stderr.write('hostusb: cannot migrate VM with host usb devices\n')
sys.exit(2)
| EdDev/vdsm | vdsm_hooks/hostusb/before_vm_migrate_source.py | Python | gpl-2.0 | 161 |
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .._matchesfilterbase import MatchesFilterBase
#-------------------------------------------------------------------------
#
# MatchesFilter
#
#-------------------------------------------------------------------------
class MatchesFilter(MatchesFilterBase):
"""Rule that checks against another filter"""
name = _('Sources matching the <filter>')
description = _("Matches sources matched by the specified filter name")
namespace = 'Source'
| sam-m888/gprime | gprime/filters/rules/source/_matchesfilter.py | Python | gpl-2.0 | 1,694 |
class Utils(object):
def __init__(self):
pass
| opendatakosovo/kosovo-assembly-transcript-api | kata/utils/utils.py | Python | gpl-2.0 | 59 |
#!/usr/bin/python -tt
"""A Python program to analyse Sumo and OVNIS outpuy.
Try running this program from the command line:
python analyse.py
"""
import os
# import csv
import matplotlib
matplotlib.use('Agg') # headless mode
import matplotlib.pyplot as plt
import numpy as np
# import time
# from matplotlib.mlab import csv2rec
# import math
# from math import exp, pow
import pandas as pd
from optparse import OptionParser
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
linestyles = ['-', '--', '-.', ':']
markers = []
for m in Line2D.markers:
try:
if len(m) == 1 and m != ' ':
markers.append(m)
except TypeError:
pass
styles = markers + [
r'$\lambda$',
r'$\bowtie$',
r'$\circlearrowleft$',
r'$\clubsuit$',
r'$\checkmark$']
# colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
colors = ['c', 'b', 'r', 'y', 'g', 'm']
average_span = 60
# Define a main() function.
def main():
parser = OptionParser()
parser.add_option('--inputDirs',
help=("input dirs."), type="string", dest="inputDirs")
parser.add_option('--inputDir',
help=("input dir."), type="string", dest="inputDir")
parser.add_option('--inputFile',
help=("input file."), type="string", dest="inputFile")
parser.add_option('--inputFiles',
help=("input files."), type="string", dest="inputFiles")
parser.add_option('--labels',
help=("Label titles"), type="string", dest='labelTitles')
parser.add_option('--outputFile',
help=("outputFile."), type="string", dest="outputFile")
parser.add_option('--outputDir',
help=("outputDir."), type="string", dest="outputDir")
parser.add_option('--startX',
help=("Start time"), type="int", dest='startX')
parser.add_option('--endX', help=("End time"), type="int", dest='endX')
parser.add_option('--stepSize',
help=("Step size"), type="int", dest='stepSize')
parser.add_option('--scenario',
help=("Scenario "), type="string", dest='scenario')
(options, args) = parser.parse_args()
print options
args = {}
args["startX"] = options.startX
args["endX"] = options.endX
args['stepSize'] = options.stepSize
args['xLabel'] = 'time'
scenario = options.scenario or "Kirchberg"
if options.inputDirs:
print "inputDirs", options.inputDirs
process_compare_dirs(options.inputDirs, "average.txt",
scenario)
if options.inputDir:
process_average_calculation(options.inputDir, scenario)
if options.inputFile:
filename = options.inputFile
if "communities.csv" in filename:
process_communities(filename, options.outputDir)
# if "edges_error" in filename:
# process_edges_error(filename, options.outputDir, scenario)
if "tripinfo" in filename or "output_log_routing_end" in filename:
print "TODO uncomment"
process_trips(filename, options.outputDir, scenario)
def read_average_stats(filepath, groupnames, statnames):
result = {name: {name: list() for name in statnames}
for name in groupnames}
f = open(filepath, 'r')
groupname = None
lines = f.readlines()
for line in lines:
line = line.strip()
if line in groupnames:
groupname = line
if groupname:
data = line.split("\t")
if len(data) > 0:
statname = data[0]
if statname in statnames:
print groupname, statname, data
result[groupname][statname] = float(data[1])
# groupname = None
return result
def plot_total_with_ranges(ax, percents, values, color, label, markerstyle):
columns = ["min", "mean", "max"]
styles = ['--','-','--']
labels = [None,label,None]
linewidths = [1, 2, 1]
for col, style, lw, lbl in zip(columns, styles, linewidths, labels):
if col == "mean":
ax.plot(percents, values[col], label=lbl, lw=lw,
color=color, linestyle=style,
marker=markerstyle, markersize=8, markevery=2, markeredgecolor=color,
markerfacecolor='none', markeredgewidth=2)
else :
ax.plot(percents, values[col], label=lbl, lw=lw,
color=color, linestyle=style)
def plot_total_without_ranges(ax, percents, values, color, label, markerstyle):
columns = ["std"]
styles = ['-']
labels = [label]
linewidths = [2]
for col, style, lw, lbl in zip(columns, styles, linewidths, labels):
ax.plot(percents, values[col], label=lbl, lw=lw,
color=color, linestyle=style,
marker=markerstyle, markersize=8, markevery=2, markeredgecolor=color,
markerfacecolor='none', markeredgewidth=2)
def process_compare_dirs(inputdir, filename, scenario):
percents = range(0, 105, 5)
groupnames = []
titlenames = []
# groupnames = ["total", "shortest", "probabilistic"]
# titlenames = ["All vehicles", "Shortest-time", "Probabilistic"]
# titlenames = ["All vehicles", "TrafficEQ-ST", "TrafficEQ-P"]
if scenario == "Highway":
groupnames += ["main", "bypass"]
titlenames += ["Main", "Bypass"]
elif scenario == "Kirchberg":
groupnames += ["routedist#0", "routedist#1", "routedist#2"]
titlenames += ["Kennedy", "Adenauer", "Thuengen"]
statnames = ["mean", "min", "max"]
statnames = ["std"]
values = {name: {name: list() for name in statnames}
for name in groupnames}
for percent in percents:
percent_shortest = 100-percent
dirname = "%03d-%03d" % (percent_shortest, percent)
filepath = os.path.join(inputdir, dirname, filename)
result = read_average_stats(filepath, groupnames, statnames)
for groupname, groupstats in result.items():
for statname, stat in groupstats.items():
val = None if stat == 0 or stat == [] else stat
values[groupname][statname].append(val)
title = "Penetration rate"
measure = "std" # "mean"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, groupname in enumerate(groupnames):
color = colors[i % len(colors)]
style = styles[(i*3) % len(styles)]
if "std" == measure:
plot_total_without_ranges(ax, percents, values[groupname], color, titlenames[i], style)
else:
plot_total_with_ranges(ax, percents, values[groupname], color, titlenames[i], style)
# if groupname == "routedist#0":
# print values[groupname]
# plot_total_with_ranges(ax, percents, values[groupname], color, titlenames[i], style)
# else:
# ax.plot(percents, values[groupname]["mean"], label=titlenames[i], lw=2,
# color=color, markeredgecolor=color,
# linestyle='-', marker=style, markersize=8, markevery=i+1,
# markerfacecolor='none', markeredgewidth=2)
outputfile = os.path.join(inputdir, "plot_" + title + "_lines" + ".pdf")
leg = ax.legend(loc='lower left', fancybox=True)
leg.get_frame().set_alpha(0.5)
# ax.yaxis.set_ticks(np.arange(200, 400, 10))
ax.xaxis.set_ticks(np.arange(percents[0], percents[len(percents)-1], 5))
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
ax.set_xlabel("Percentage of TrafficEQ-P vehicles")
if measure == "std":
plot_title = "Standard deviation of travel times (seconds)"
elif measure == "mean":
plot_title = "Average travel time (seconds)"
ax.set_ylabel(plot_title)
plt.grid()
plt.savefig(outputfile)
epsfilename = os.path.join(inputdir, "plot_" + title + "_lines" + ".eps")
print "plotting to " + epsfilename
plt.savefig(epsfilename)
outputfile = os.path.join(inputdir, "penetration_rate_"+measure+".csv")
fout = open(outputfile, 'w')
fout.write("Percentage\tTotal\tShortest\tProbabilistic")
total, shortest, probabilistic = 0, 0, 0
for i, percent in enumerate(percents):
if "total" in values:
total = values["total"][measure][i]
if "shortest" in values:
shortest = values["shortest"][measure][i] \
if values["shortest"][measure][i] is not None else 0
if "probabilistic" in values:
probabilistic = values["probabilistic"][measure][i] \
if values["probabilistic"][measure][i] is not None else 0
fout.write("%d\t%.2f\t%.2f\t%.2f\n" % (percent, total,
shortest, probabilistic))
# markeredgecolor=color, markerfacecolor='none', markevery=5, label=ylabel)
def floor_number(number, span):
return (number // span) * span
def read_df_stats(f, averages, groupnames, statnames, indexes):
f.readline() # skip header line
f.readline() # skip empty
for groupname in groupnames:
data = f.readline().strip().lower().split("\t")
if not data or len(data) < 2:
return
name = data[1]
for statname, index in zip(statnames, indexes):
averages[name][statname].append(
float(data[index]))
def read_stats(inputdir, filepattern, statnames, indexes,
routenames, route_column,
strategynames, strategy_column):
sum_averages = {name: list() for name in statnames}
route_averages = {name: {name: list() for name in statnames}
for name in routenames}
strategy_averages = {name: {name: list() for name in statnames}
for name in strategynames}
for root, dirs, files in os.walk(inputdir):
for filename in files:
if filepattern in filename:
filepath = os.path.join(root, filename)
with open(filepath) as f:
f.readline() # skip line
# read sum averages
for statname in statnames:
data = f.readline().strip().lower().split("\t")
sum_averages[statname].append(float(data[1]))
# read strategy averages
header_line = f.readline() # read header line
if strategy_column in header_line:
read_df_stats(f, strategy_averages, strategynames,
statnames, indexes)
# read route averages
elif route_column in header_line:
read_df_stats(f, route_averages, routenames,
statnames, indexes)
line = f.readline()
while route_column not in line:
line = f.readline()
read_df_stats(f, route_averages, routenames,
statnames, indexes)
return sum_averages, route_averages, strategy_averages
def write_average_stats(filepath, statnames, averages, route_averages,
strategy_averages,
error_statnames, error_averages,
links, error_link):
print "writing to .. ", filepath
outf = open(filepath, 'w')
outf.write("total\n")
outf.write("statistic\tmean\tstd\tmin\tmax\n")
# averages
for name in statnames:
outf.write("%s\t%.2f\t%.2f\t%.2f\t%.2f\n" % (
name, np.mean(averages[name]), np.std(averages[name]),
np.amin(averages[name]), np.amax(averages[name])))
# route averages
for route, route_stats in route_averages.items():
outf.write("%s\n" % route)
for name in statnames:
if name in statnames:
outf.write("%s\t%.2f\t%.2f\t%.2f\t%2f\n" % (
name, np.mean(route_stats[name]),
np.std(route_stats[name]),
np.amin(route_stats[name]),
np.amax(route_stats[name])))
# strategy averages
for strategy, stats in strategy_averages.items():
outf.write("%s\n" % strategy)
for name in statnames:
if len(stats[name]) == 0:
stats[name].append(0)
outf.write("%s\t%.2f\t%.2f\t%.2f\t%.2f\n" % (
name, np.mean(stats[name]), np.std(stats[name]),
np.amin(stats[name]), np.amax(stats[name])))
for name in error_statnames:
if len(error_averages[name]) > 0:
outf.write("%s\t%.2f\t%.2f\t%.2f\t%.2f\n" % (
name, np.mean(error_averages[name]),
np.std(error_averages[name]),
np.amin(error_averages[name]), np.amax(error_averages[name])))
for name in links:
outf.write("%s\t%.2f\t%.2f\t%.2f\t%.2f\n" % (
name, np.mean(error_link[name]), np.std(error_link[name]),
np.amin(error_link[name]), np.amax(error_link[name])))
def read_error_stats(inputdir, filepattern, statnames):
averages = {name: list() for name in statnames}
for root, dirs, files in os.walk(inputdir):
for filename in files:
if filepattern in filename:
filepath = os.path.join(root, filename)
with open(filepath) as f:
for statname in statnames:
data = f.readline().strip().lower().split("\t")
averages[statname].append(float(data[1]))
return averages
def read_links(f):
line = f.readline().strip().lower().split("\t")
start = 1
step = 5
links = [line[i] for i in range(start, len(line), step)]
headers = []
headers.append("Time")
for linkid in links:
headers.append(linkid+"_name")
headers.append(linkid+"_static")
headers.append(linkid+"_perfect")
headers.append(linkid+"_vanet")
headers.append(linkid+"_blank")
return links, headers
def read_link_errors(inputdir, filepattern):
links = []
headers = []
averages = {}
for root, dirs, files in os.walk(inputdir):
for filename in files:
if filepattern in filename:
filepath = os.path.join(root, filename)
if len(links) == 0:
links, headers = read_links(open(filepath))
averages = {name: list() for name in links}
df = pd.read_csv(filepath, sep="\t", names=headers)
print "Read link errors", filepath
for linkid in links:
diff = np.abs(df[linkid+"_vanet"] - df[linkid+"_perfect"])
ape = diff / df[linkid+"_perfect"]
if len(ape) > 0:
mape = np.mean(ape)
# print linkid, " mape\t", mape
averages[linkid].append(mape)
return averages, links
def process_average_calculation(inputdir, scenario):
filepattern = "tripstats.txt"
# statnames = ["mean", "sum", "std", "min", "max", "count"]
statnames = ["count", "sum", "mean", "std", "min", "max"]
indexes = [2, 3, 4, 5, 6, 7]
if scenario == "Kirchberg":
routenames = ["routedist#0", "routedist#1", "routedist#2"]
else:
routenames = ["main", "bypass"]
strategy_column = "routingStrategy"
strategynames = ["shortest", "probabilistic"]
route_column = "routeId"
averages, route_averages, strategy_averages = read_stats(
inputdir, filepattern, statnames, indexes, routenames, route_column,
strategynames, strategy_column)
# print averages, route_averages, strategy_averages
error_statnames = ["MAPE"]
error_averages = read_error_stats(inputdir, "tripstats_error_Diff.txt",
error_statnames)
error_links, links = read_link_errors(inputdir,
"output_log_edges_error_static")
# write total averages
filepath = os.path.join(inputdir, "average.txt")
write_average_stats(filepath, statnames, averages, route_averages,
strategy_averages, error_statnames, error_averages,
links, error_links)
def process_communities(filename, outputdir):
df = pd.read_csv(filename, sep="\t")
title = "Average speed"
plt.figure(1)
plt.scatter(df['step'], df['timeMeanSpeed'])
outputfile = os.path.join(outputdir, "plot_" + title + ".pdf")
plt.savefig(outputfile)
def clean_file(filename, is_header=False, new_header=None):
clean_filename = filename + '_clean'
number_of_columns = 0
skipped_lines = 0
fout = open(clean_filename, 'w')
with open(filename, 'r') as f:
if is_header:
header_line = next(f)
if new_header:
header_line = new_header
number_of_columns = len(header_line.split('\t'))
fout.write(header_line)
for data_line in f:
if len(data_line.strip().split('\t')) != number_of_columns:
skipped_lines += 1
continue
fout.write(data_line)
return clean_filename, skipped_lines
def write_group_stats(df, groupby_col, groupby_col2, xlabel, ylabel,
route_names, outputdir, skipped_lines):
filename = os.path.join(outputdir, "tripstats.txt")
outfile = open(filename, 'w')
outfile.write("skipped_lines\t%d\n" % skipped_lines)
meanTT = np.mean(df[ylabel])
sumTT = np.sum(df[ylabel])
stdTT = np.std(df[ylabel])
minTT = np.min(df[ylabel])
maxTT = np.max(df[ylabel])
count = len(df[ylabel])
outfile.write("Count\t%.2f\n" % count)
outfile.write("Sum TT\t%.2f\n" % sumTT)
outfile.write("Mean TT\t%.2f\n" % meanTT)
outfile.write("Std TT\t%.2f\n" % stdTT)
outfile.write("Min TT\t%.2f\n" % minTT)
outfile.write("Max TT\t%.2f\n" % maxTT)
# group by routing strategies
grouped = df.groupby(groupby_col2)
strategies = grouped.aggregate(
{ylabel: [np.size, sum, np.mean, np.std, np.amin, np.amax]}
).reset_index()
strategies.to_csv(outfile, sep='\t')
outfile.write("\n")
# gropup by route
grouped = df.groupby(groupby_col)
ylabel2 = "staticCost"
routes = grouped.aggregate(
{ylabel: [np.size, sum, np.mean, np.std, np.amin, np.amax],
ylabel2: [np.mean, np.sum, np.std]}).reset_index()
print "Writing to file %s" % outfile
routes.to_csv(outfile, sep='\t')
outfile.write("\n")
def write_stats(df, xlabel, ylabel, mape, outputdir, skipped_lines):
filename = os.path.join(outputdir, "tripstats_error_"+ylabel+".txt")
outfile = open(filename, 'w')
outfile.write("MAPE\t%.2f\n" % mape)
outfile.write("skipped_lines\t%d\n" % skipped_lines)
meanTT = np.mean(df[ylabel])
sumTT = np.sum(df[ylabel])
stdTT = np.std(df[ylabel])
minTT = np.min(df[ylabel])
maxTT = np.max(df[ylabel])
outfile.write("Mean TT\t%.2f\n" % meanTT)
outfile.write("Sum TT\t%.2f\n" % sumTT)
outfile.write("Std TT\t%.2f\n" % stdTT)
outfile.write("Min TT\t%.2f\n" % minTT)
outfile.write("Max TT\t%.2f\n" % maxTT)
outfile.close()
def get_group_axes_ranges(grouped, xlabel, ylabel):
xmin = 0
ymin = 0
xmax = 0
ymax = 0
for name, group in grouped:
x = max(group[xlabel])
if x > xmax:
xmax = x
y = max(group[ylabel])
if y > ymax:
ymax = y
margin = ymax * 0.1
return [xmin, xmax, ymin, ymax + margin]
def get_axes_ranges(df, xlabel, ylabels):
xmin = 0
ymin = 0
xmax = 0
ymax = 0
print(df.head())
x = max(df[xlabel])
if x > xmax:
xmax = x
for ylabel in ylabels:
y = max(df[ylabel])
if y > ymax:
ymax = y
return [xmin, xmax, ymin, ymax]
def plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir):
colors = ['c', 'b', 'r', 'y', 'g', 'm']
fig = plt.figure()
[xmin, xmax, ymin, ymax] = get_axes_ranges(df, xlabel, ylabels)
axes = []
subplots = 1
for j in range(subplots):
axes.append(fig.add_subplot(
1, subplots, j+1, axisbg='white')) # height, width, chart #
axes[j].set_ylim([ymin, ymax])
axes[j].set_xlim([xmin, xmax])
axes[j].locator_params(nbins=4)
for i, ylabel in enumerate(ylabels):
color = colors[i % len(colors)]
print i, ylabel, color
x = df[xlabel]
y = df[ylabel]
style = styles[(i) % len(styles)]
# linestyle = linestyles[i % len(linestyles)]
axes[j].plot(x, y, linestyle='-', marker=style, color=color,
markersize=8, markeredgecolor=color,
markerfacecolor='none', markevery=5, label=ylabel)
axes[j].legend(loc='lower right')
axes[j].set_xlabel(xtitle)
axes[0].set_ylabel(ytitle)
outputfile = outputdir + "plot_" + title + "_lines" + ".pdf"
plt.savefig(outputfile)
def calculate_mape(df):
mape = 0
df["APE"] = df["Error"] / df["Perfect"]
mape = np.mean(df["APE"])
return mape
def process_edges_error(filename, outputdir, scenario):
if scenario == "Kirchberg":
new_header = "Time\tVehicle Id\tSize Vanet\tSize Perfect\tError\t" \
"Perfect\tVanet\tDiff\tStatic\troute Kennedy\tKennedy\tKennedy Error\t" \
"Kennedy Perfect\tKennedy Vanet\tKennedy Diff\tKennedy Static\t" \
"route Adenauer\tAdenauer\tAdenauer Error\tAdenauer Perfect\t" \
"Adenauer Vanet\tAdenauer Diff\tAdenauer Static\troute Thuengen\t" \
"Thuengen\tThuengen Error\tThuengen Perfect\tThuengen Vanet\t" \
"Thuengen Diff\tThuengen Static\n"
else:
new_header = "Time\tVehicle Id\tSize Vanet\tSize Perfect\tError\t" \
"Perfect\tVanet\tDiff\tStatic\troute Bypass\tBypass\tBypass Error\t" \
"Bypass Perfect\tBypass Vanet\tBypass Diff\tBypass Static\t" \
"route Main\tMain\tMain Error\t" \
"Main Perfect\tMain Vanet\tMain Diff\tMain Static\n"
filename, skipped_lines = clean_file(filename, False, new_header)
xlabel = "Time"
df = pd.read_csv(filename, sep="\t", index_col=False)
print("processing" , filename)
print("df", df.head())
xtitle = 'Time (seconds)'
ytitle = 'Duration (seconds)'
title = "Total travel times"
ylabels = ["Perfect", "Vanet"]
ylabel = "Diff"
df[xlabel] = df[xlabel].convert_objects(convert_numeric=True)
df[ylabel] = df[ylabel].convert_objects(convert_numeric=True)
for label in ylabels:
df[label] = df[label].convert_objects(convert_numeric=True)
plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir)
mape = calculate_mape(df)
write_stats(df, xlabel, ylabel, mape, outputdir, skipped_lines)
"""
if scenario == "Kirchberg":
ylabels = ["Kennedy Perfect", "Kennedy Vanet"]
title = "Kennedy travel times"
ylabel = "Kennedy Diff"
else:
title = "Main travel times"
ylabels = ["Main Perfect", "Main Vanet"]
ylabel = "Main Diff"
plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir)
write_stats(df, xlabel, ylabel, outputdir, skipped_lines)
if scenario == "Kirchberg":
ylabels = ["Adenauer Perfect", "Adenauer Vanet"]
title = "Adenauer travel times"
ylabel = "Adenauer Diff"
else:
ylabels = ["Bypass Perfect", "Bypass Vanet"]
title = "Bypass travel times"
ylabel = "Bypass Diff"
plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir)
write_stats(df, xlabel, ylabel, outputdir, skipped_lines)
if scenario == "Kirchberg":
title = "Thuengen travel times"
ylabels = ["Thuengen Perfect", "Thuengen Vanet"]
ylabel = "Thuengen Diff"
plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir)
write_stats(df, xlabel, ylabel, outputdir, skipped_lines)
title = "Error Travel times"
if scenario == "Kirchberg":
ylabels = ["Kennedy Diff", "Adenauer Diff", "Thuengen Diff"]
else:
ylabels = ["Main Diff", "Bypass Diff"]
plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir)
title = "Perfect Travel times"
if scenario == "Kirchberg":
ylabels = ["Kennedy Perfect", "Adenauer Perfect", "Thuengen Perfect"]
else:
ylabels = ["Main Perfect", "Bypass Perfect"]
plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir)
title = "Vanet Travel times"
if scenario == "Kirchberg":
ylabels = ["Kennedy Vanet", "Adenauer Vanet", "Thuengen Vanet"]
else:
ylabels = ["Main Vanet", "Bypass Vanet"]
plot_lines(df, xlabel, ylabels, title, xtitle, ytitle, outputdir)
"""
def process_trips(filename, outputdir, scenario):
filename, skipped_lines = clean_file(filename, True, None)
df = pd.read_csv(filename, sep="\t")
route_names = {"2069.63": "Kennedy", "2598.22": "Adenauer",
"2460.76": "Thuengen", "1262.43": "Kennedy",
"1791.02": "Adenauer", "1653.56": "Thuengen",
"routedist#0": "Kennedy",
"routedist#1": "Adenauer",
"routedist#2": "Thuengen",
"main": "Main", "bypass": "Bypass"}
# tripinfo
# arrival waitSteps vType depart routeLength vaporized duration arrivalSpeed devices departPos departDelay departLane departSpeed arrivalPos rerouteNo id arrivalLane
xlabel = "arrival"
ylabel = "duration"
title = "Trip duration"
xtitle = 'Time (seconds)'
ytitle = 'Duration (seconds)'
if "tripinfo" in filename:
groupby_col = "routeLength"
df[groupby_col] = df[groupby_col].map(lambda x: '%.2f' % x)
else:
groupby_col = "routeId"
groupby_col2 = "routingStrategy"
df[xlabel] = df[xlabel].convert_objects(convert_numeric=True)
df[ylabel] = df[ylabel].convert_objects(convert_numeric=True)
# plot_scatterplot(df, groupby_col, xlabel, ylabel, title, xtitle,
# ytitle, route_names, outputdir)
# plot_group_lines_v(df, groupby_col, xlabel, ylabel, title, xtitle,
# ytitle, route_names, outputdir)
plot_group_lines_a(df, groupby_col, xlabel, ylabel, title, xtitle,
ytitle, route_names, outputdir)
# plot_group_lines_a(df, groupby_col, xlabel, 'travelTime',
# 'Travel time on routes', xtitle, ytitle,
# route_names, outputdir)
# plot_group_lines(df, groupby_col, xlabel, ylabel, title, xtitle,
# ytitle, route_names, outputdir)
write_group_stats(df, groupby_col, groupby_col2, xlabel, ylabel,
route_names, outputdir, skipped_lines)
def plot_group_lines_v(df, groupby_col, xlabel, ylabel, title, xtitle, ytitle,
route_names, outputdir):
grouped = df.groupby(groupby_col)
fig = plt.figure()
num_groups = len(grouped)
[xmin, xmax, ymin, ymax] = get_group_axes_ranges(grouped, xlabel, ylabel)
axes = []
for i, value in enumerate(grouped):
name, group = value
color = colors[i % len(colors)]
print i, name, color
x = group[xlabel]
y = group[ylabel]
axes.append(fig.add_subplot(
num_groups, 1, i+1, axisbg='white')) # height, width, chart #
axes[i].set_ylim([ymin, ymax])
axes[i].set_xlim([xmin, xmax])
axes[i].locator_params(nbins=4)
style = styles[(i) % len(styles)]
# linestyle = linestyles[i % len(linestyles)]
axes[i].plot(x, y, linestyle='-', marker=style, color=color,
markersize=8, markeredgecolor=color,
markerfacecolor='none', markevery=5,
label=route_names[name])
axes[i].legend(loc='lower right')
axes[i].set_xlabel(xtitle)
axes[0].set_ylabel(ytitle)
outputfile = os.path.join(outputdir,
"plot_groups_v_" + title + "_lines" + ".pdf")
plt.savefig(outputfile)
def plot_group_lines_a(df, groupby_col, xlabel, ylabel, title, xtitle,
ytitle, route_names, outputdir):
grouped = df.groupby(groupby_col)
fig = plt.figure()
[xmin, xmax, ymin, ymax] = get_group_axes_ranges(grouped, xlabel, ylabel)
axes = []
axes.append(fig.add_subplot(2, 1, 1, axisbg='white'))
axes[0].set_ylim([ymin, 800])
axes[0].set_xlim([xmin, xmax])
ylabel2 = 'vehiclesOnRoute'
[xmin, xmax, ymin, ymax] = get_group_axes_ranges(grouped, xlabel, ylabel2)
axes.append(plt.subplot(2, 1, 2, axisbg='white', sharex=axes[0]))
axes[1].set_ylim([ymin, 100])
axes[1].set_xlim([xmin, xmax])
# axes[0].locator_params(nbins=4)
for i, value in enumerate(grouped):
name, group = value
print "group", type(group)
# smooth
group['Time2'] = floor_number(group[xlabel], average_span)
smoothed = group.groupby('Time2').aggregate(np.mean).reset_index()
# print smoothed.head()
color = colors[i % len(colors)]
x = group[xlabel]
y = group[ylabel]
y2 = group['vehiclesOnRoute']
# smoothed
x = smoothed[xlabel]
y = smoothed[ylabel]
y2 = smoothed['vehiclesOnRoute']
style = styles[(i*2+3) % len(styles)]
# linestyle = linestyles[i % len(linestyles)]
print i, name, color, style
axes[0].plot(x, y, linestyle='-', marker=style, color=color, lw=1,
markeredgewidth=2, markersize=8, markeredgecolor=color,
markerfacecolor='none', markevery=5,
label=route_names[name])
axes[1].plot(x, y2, linestyle='-', marker=style, color=color,
markersize=8, markeredgecolor=color,
markerfacecolor='none', markevery=5)
# axes[0].legend(loc='lower right')
# axes[0].set_xlabel(xtitle)
axes[0].set_ylabel(ytitle)
leg = axes[0].legend(loc='upper left', fancybox=True)
leg.get_frame().set_alpha(0.5)
axes[1].set_xlabel(xtitle)
axes[1].set_ylabel("Number of vehicles")
outputfile = outputdir + "plot_groups_a_" + title + "_lines" + ".pdf"
print "plotting to {}".format(outputfile)
plt.savefig(outputfile)
outputfile = outputdir + "plot_groups_a_" + title + "_lines" + ".eps"
print "plotting to {}".format(outputfile)
plt.savefig(outputfile)
def plot_group_lines(df, groupby_col, xlabel, ylabel, title, xtitle,
ytitle, route_names, outputdir):
grouped = df.groupby(groupby_col)
fig = plt.figure()
num_groups = len(grouped)
[xmin, xmax, ymin, ymax] = get_group_axes_ranges(grouped, xlabel, ylabel)
axes = []
for i, value in enumerate(grouped):
name, group = value
color = colors[i % len(colors)]
print i, name, color
x = group[xlabel]
y = group[ylabel]
axes.append(fig.add_subplot(
1, num_groups, i+1, axisbg='white')) # height, width, chart #
axes[i].set_ylim([ymin, ymax])
axes[i].set_xlim([xmin, xmax])
axes[i].locator_params(nbins=4)
axes[i].plot(x, y, color, linewidth=1, label=route_names[name])
axes[i].legend(loc='lower right')
axes[i].set_xlabel(xtitle)
axes[0].set_ylabel(ytitle)
outputfile = outputdir + "plot_groups_" + title + "_lines" + ".pdf"
plt.savefig(outputfile)
def plot_scatterplot(df, groupby_col, xlabel, ylabel, title, xtitle,
ytitle, route_names, outputdir):
grouped = df.groupby(groupby_col)
plt.figure(1)
for i, value in enumerate(grouped):
name, group = value
if name in route_names:
color = colors[i % len(colors)]
print "plotting group\t", i, name, color, type(group[xlabel])
plt.scatter(group[xlabel], group[ylabel], s=20, c=color,
marker='.', label=route_names[name], lw=0)
[xmin, xmax, ymin, ymax] = get_group_axes_ranges(grouped, xlabel, ylabel)
plt.xlim([xmin, xmax])
plt.xlabel(xtitle)
plt.ylabel(ytitle)
plt.legend(loc='lower right')
outputfile = outputdir + "plot_" + title + "_scatterplot" + ".pdf"
plt.savefig(outputfile)
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| agacia/ovnis | python/analyse.py | Python | gpl-2.0 | 32,722 |
import os
def set_directory(directory):
os.chdir(directory)
def get_available_abbreviations():
return os.listdir('../database/')
def get_abbreviation_list(list_name):
l = dict()
with open(list_name, 'r') as f:
for line in f:
for a, r, l in line.split(','):
l[a] = r, l
return l
set_directory('../database/')
print(get_available_abbreviations())
for a, r, l in get_abbreviation_list('dummy.txt'):
print(a, r, l)
| Thalmann/AbbreviationGame | abbreviations/DAL.py | Python | gpl-2.0 | 475 |
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1999, 2000, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from math import pi
from Tkinter import IntVar, DoubleVar, Entry, Label, Button, Frame
import Sketch.Scripting
from Sketch import _, SolidPattern, StandardColors, PolyBezier, CreatePath, \
Point, Polar
from Sketch.UI.sketchdlg import SKModal
#
#
#
def create_star_path(corners, step, radius):
# create a star-like polygon.
center = Point(300, 400)
radius = 100
angle = step * 2 * pi / corners
# create an empty path and append the line segments
path = CreatePath()
for i in range(corners):
p = Polar(radius, angle * i + pi / 2)
path.AppendLine(p)
# close the path.
path.AppendLine(path.Node(0))
path.ClosePath()
return path
#
# A modal dialog that asks for the parameters
#
# SKModal is the baseclass Sketch uses for modal dialogs. It provides
# some standard functionality for all modal dialogs.
#
# The intended use of a sub-class of SKModal is to instantiate it and
# call its RunDialog method.
#
# RunDialog pops up the dialog and returns when the user either cancels
# the dialog or presses the OK button. Its return value is None if the
# dialog was canceled or whatever object was passed to the close_dlg
# method to close the dialog in response to the click on the OK-button.
# See the method ok below.
#
class CreateStarDlg(SKModal):
title = _("Create Star")
def __init__(self, master, **kw):
# This constructor is here just for illustration purposes; it's
# not really needed here, as it simply passes all parameters on
# to the base class' constructor.
#
# The parameter master is the window this dialog belongs to. It
# should normally be the top-level application window.
apply(SKModal.__init__, (self, master), kw)
def build_dlg(self):
# The SKModal constructor automatically calls this method to
# create the widgets in the dialog.
#
# self.top is the top-level window of the dialog. All widgets of
# the dialog must contained in it.
top = self.top
# The rest is normal Tkinter code.
self.var_corners = IntVar(top)
self.var_corners.set(5)
label = Label(top, text = _("Corners"), anchor = 'e')
label.grid(column = 0, row = 0, sticky = 'ew')
entry = Entry(top, textvariable = self.var_corners, width = 15)
entry.grid(column = 1, row = 0, sticky = 'ew')
self.var_steps = IntVar(top)
self.var_steps.set(2)
label = Label(top, text = _("Steps"), anchor = 'e')
label.grid(column = 0, row = 1, sticky = 'ew')
entry = Entry(top, textvariable = self.var_steps, width = 15)
entry.grid(column = 1, row = 1, sticky = 'ew')
self.var_radius = DoubleVar(top)
self.var_radius.set(100)
label = Label(top, text = _("Radius"), anchor = 'e')
label.grid(column = 0, row = 2, sticky = 'ew')
entry = Entry(top, textvariable = self.var_radius, width = 15)
entry.grid(column = 1, row = 2, sticky = 'ew')
but_frame = Frame(top)
but_frame.grid(column = 0, row = 3, columnspan = 2)
button = Button(but_frame, text = _("OK"), command = self.ok)
button.pack(side = 'left', expand = 1)
# The self.cancel method is provided by the base class and
# cancels the dialog.
button = Button(but_frame, text = _("Cancel"), command = self.cancel)
button.pack(side = 'right', expand = 1)
def ok(self, *args):
# This method is bound to the OK-button. Its purpose is to
# collect the values of the various edit fields and pass them as
# one parameter to the close_dlg method.
#
# close_dlg() saves its parameter and closes the dialog.
corners = self.var_corners.get()
steps = self.var_steps.get()
radius = self.var_radius.get()
self.close_dlg((corners, steps, radius))
def create_star(context):
# Instantiate the modal dialog...
dlg = CreateStarDlg(context.application.root)
# ... and run it.
result = dlg.RunDialog()
if result is not None:
# if the result is not None, the user pressed OK. Now constuct
# the star-path...
corners, steps, radius = result
path = create_star_path(corners, steps, radius)
# ... and create the bezier object. The parameter to the
# constructor must be a tuple of paths
bezier = PolyBezier((path,))
# Set the line color to blue, the line width to 4pt
bezier.SetProperties(line_pattern = SolidPattern(StandardColors.blue),
line_width = 4)
# and insert it into the document
context.main_window.PlaceObject(bezier)
Sketch.Scripting.AddFunction('create_star', _("Create Star"),
create_star,
script_type = Sketch.Scripting.AdvancedScript)
| shumik/skencil-c | Script/create_star.py | Python | gpl-2.0 | 5,806 |
# -*- coding: utf-8 -*-
"""
Method of Characteristics Nozzle Design Code
@author: Peter Senior
email: [email protected]
"""
import numpy
from numpy import sin, tan, arcsin, arctan
import matplotlib.pyplot as plt
def mu(M):
"""Calculates and returns the value of mu for a given mach number"""
return arcsin(1 / M)
def nu(M, gamma = 1.4):
"""Calculates and returns the value of nu (the Prantl-Meyer angle) based
upon the Mach number at a point"""
nux = (((gamma + 1) / (gamma - 1)) ** 0.5) * arctan(((gamma - 1) * (M ** 2
- 1) / (gamma + 1)) ** 0.5) - arctan((M ** 2 - 1) ** 0.5)
return nux
def R_over_Re(M, Nu_exit, Eps, gamma=1.4):
Rx = (1 - (((2 / (gamma + 1)) * (1 + (gamma - 1) * M ** 2 / 2)) **
((gamma + 1) / (2 * (gamma - 1))) * sin(phi(M, Nu_exit))) / Eps) ** 0.5
return Rx
def phi(M, Nu_exit):
return Nu_exit - nu(M) + mu(M)
def MoC_Plug_Nozzle_Geometry(Me, gamma=1.4, N=50):
"""Creates plug nozzle geometry using Me as the exit mach number, gamma
defaulting to 1.4, and N as the solution resolution, defaulting to 50. The
expansion point is located at 0, 1. The returned coordinates are
nondimensional"""
#Constants
Me = float(Me)
gamma = 1.4
M = numpy.linspace(1, Me, N)
Eps = (1 / Me) * ((2 / (gamma + 1)) * (1 + (gamma - 1) * Me ** 2 / 2)) ** (
(gamma + 1) / (2 * (gamma - 1)))
Nu_exit = nu(Me)
R = R_over_Re(M, Nu_exit, Eps)
X = (1 - R) / tan(phi(M, Nu_exit))
return [list(X), list(R)]
def Plot_Geo(Me, gamma=1.4, N=50):
"""Plots the created geometry"""
coords = MoC_Plug_Nozzle_Geometry(Me, gamma, N)
plt.plot(coords[0], coords[1])
plt.plot(0,1, marker='o')
return
def Write_Geometry(Me, gamma=1.4, N=50, scale=1):
"""Creates plug nozzle geometry and writes it to the file 'geo.txt'"""
x = MoC_Plug_Nozzle_Geometry(Me, gamma, N)
z = numpy.zeros(len(x[0]))
f = open('geo.txt', 'w')
for i in range(len(x[0])):
f.write('{} {} {}'.format(x[0][i]*scale, x[1][i]*scale, z[i]*scale))
f.write('\n')
f.close()
return
| Italianmoose/Year-3-IP | Nozzle/Method-of-Characteristics/Plug/Plug MoC code v2.1.py | Python | gpl-2.0 | 2,107 |
# Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from helpers.command import Command
@Command('inspect', ['handler', 'is_admin', 'nick'])
def cmd(send, msg, args):
"""'Inspect' a bot attribute."""
nick, is_admin, handler = args['nick'], args['is_admin'], args['handler']
if not is_admin(nick):
send("The inspect command may only be used by admins.")
return
if not hasattr(handler, msg):
send("That attribute was not found in the handler.")
return
send(str(getattr(handler, msg)))
| sckasturi/saltlake | commands/inspect.py | Python | gpl-2.0 | 1,307 |
#! /usr/bin/env python
# coding: utf8
#
# Modular REST API dispatcher in Python (dispytch)
#
# Copyright (C) 2015 Denis Pompilio (jawa) <[email protected]>
# Copyright (C) 2015 Cyrielle Camanes (cycy) <[email protected]>
#
# This file is part of dispytch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Munin requests module
"""
import logging
from . import infos
from . import rrd_utils
_log = logging.getLogger("dispytch")
def handle_request_list(arguments):
"""Handle "list" request
:param dict arguments: Dictionnary of arguments
:return: Dictionnary of available data
:rtype: dict
"""
target = arguments.get('target')
if target:
available = {target: infos.config.get_node(target)}
else:
available = {'nodes_list': infos.config.nodes}
return (None, available)
def handle_request_byid(munin_args):
"""Handle "by-id" request
:param dict munin_args: Dictionnary of arguments built by Munin module
:return: Dictionnary of fetched data
:rtype: dict
"""
# Find specified id from configuration
if not munin_args.get('target'):
raise ValueError('missing node from request')
node = infos.config.get_node(munin_args['target'])
if not node:
raise ValueError('unknown requested node')
_log.debug("selected munin node: {0}".format(node['__id']))
series = rrd_utils.get_munin_entry_metrics(
node['__datadir'], node['__id'],
munin_args.get('datatype'), munin_args.get('cf'),
munin_args.get('start'), munin_args.get('stop'))
graph_info = node.get('graphs', {}).get(munin_args.get('datatype'))
return (graph_info, series)
def handle_request_byip(munin_args):
"""Handle "by-ip" request
:param dict arguments: Dictionnary of arguments
:return: Dictionnary of fetched data
:rtype: dict
"""
# Find id with specified ip from configuration
ipaddr = munin_args.get('target')
if ipaddr is None:
raise ValueError('missing IP from request')
node = infos.config.get_node_by_ip(munin_args['target'])
if not node:
raise ValueError('unknown requested IP')
_log.debug("selected munin node: {0}".format(node['__id']))
series = rrd_utils.get_munin_entry_metrics(
node['__datadir'], node['__id'],
munin_args.get('datatype'), munin_args.get('cf'),
munin_args.get('start'), munin_args.get('stop'))
graph_info = node.get('graphs', {}).get(munin_args.get('datatype'))
return (graph_info, series)
# Reference known methods to handle
KNOWN_METHODS = {
'list': handle_request_list,
'by-id': handle_request_byid,
'by-ip': handle_request_byip,
}
| outini/dispytch | modules/munin/requests.py | Python | gpl-2.0 | 3,387 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../..", "../../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
print("Running test with sliced format.")
TestUtil.clientServerTest()
print("Running test with 1.0 encoding.")
TestUtil.clientServerTest(additionalClientOptions="--Ice.Default.EncodingVersion=1.0",
additionalServerOptions="--Ice.Default.EncodingVersion=1.0")
print("Running test with sliced format and AMD server.")
TestUtil.clientServerTest(server="test.Ice.slicing.objects.AMDServer")
print("Running test with 1.0 encoding and AMD server.")
TestUtil.clientServerTest(server="test.Ice.slicing.objects.AMDServer",
additionalClientOptions="--Ice.Default.EncodingVersion=1.0",
additionalServerOptions="--Ice.Default.EncodingVersion=1.0")
| sbesson/zeroc-ice | java/test/Ice/slicing/objects/run.py | Python | gpl-2.0 | 1,516 |
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('FE_vs_T_at_n_plot_hand_data')
T = data[:,0]
fe = data[:,2]
gw = data[:,3]
plt.plot(T, fe)
plt.axhline(0)
plt.xlabel('T')
plt.ylabel('Free energy difference')
plt.figure()
plt.plot(T, gw)
plt.xlabel('T')
plt.ylabel('width')
plt.show()
| droundy/deft | papers/fuzzy-fmt/FE_vs_T_at_n_plot_hand_data.py | Python | gpl-2.0 | 309 |
from pycdas.portal.cdas import *
import time, sys
startServer = False
portal = None
request_port = 5670
response_port = 5671
host = "cldra"
server = "localhost"
if host == "webmap":
dataset = "file:/att/gpfsfs/ffs2004/ppl/tpmaxwel/cdas/cache/collections/NCML/merra_mon_ua.xml"
var = "ua"
elif host == "cldra":
dataset = "file:/home/tpmaxwel/.cdas/cache/collections/NCML/CIP_MERRA_ua_mon.ncml"
var = "ua"
else:
dataset = "http://esgf.nccs.nasa.gov/thredds/dodsC/CMIP5/NASA/GISS/historical/E2-H_historical_r1i1p1/tas_Amon_GISS-E2-H_historical_r1i1p1_185001-190012.nc"
var = "tas"
cldraGISSdset = "file:///home/tpmaxwel/.cdas/cache/collections/NCML/GISS_E2H_r1i1p1.ncml"
localGISSdset = "file:///Users/tpmaxwel/.cdas/cache/collections/NCML/giss_r1i1p1.xml"
currentGISSDataset = localGISSdset
try:
if startServer:
portal = CDASPortal()
portal.start_CDAS()
time.sleep(20)
else:
portal = CDASPortal(ConnectionMode.CONNECT, server, request_port, response_port)
response_manager = portal.createResponseManager()
t0 = time.time()
datainputs = '[domain=[{"name":"d0","lat":{"start":30,"end":40,"system":"indices"},"lon":{"start":30,"end":40,"system":"indices"},"lev":{"start":10,"end":10,"system":"indices"},"time":{"start":0,"end":100,"system":"indices"}}],variable=[{"uri":"' + currentGISSDataset + '","name":"tas:v1","domain":"d0"}],operation=[{"name":"CDSpark.max","input":"v1","axes":"xt","filter":"DJF"}]]'
# datainputs = '[domain=[{"name":"d0","lat":{"start":30,"end":40,"system":"indices"},"lon":{"start":30,"end":40,"system":"indices"},"lev":{"start":10,"end":10,"system":"indices"},"time":{"start":0,"end":100,"system":"indices"}}],variable=[{"uri":"file:///home/tpmaxwel/.cdas/cache/collections/NCML/GISS_E2H_r1i1p1.ncml","name":"tas:v1","domain":"d0"}],operation=[{"name":"CDSpark.max","input":"v1","axes":"xy"}]]'
# datainputs = '[domain=[{"name":"d0"}],variable=[{"uri":"' + dataset + '","name":"ua:v1","domain":"d0"}],operation=[{"name":"python.numpyModule.ave","input":"v1","axes":"xt","filter":"DJF"}]]'
print "Sending request on port {0}, server {1}: {2}".format( portal.request_port, server, datainputs ); sys.stdout.flush()
rId = portal.sendMessage( "execute", [ "CDSpark.workflow", datainputs, ""] )
responses = response_manager.getResponses(rId)
print "Completed OP in time {0}".format( time.time()-t0 ); sys.stdout.flush()
print "Responses = " + str(responses)
except Exception, err:
traceback.print_exc()
finally:
portal.shutdown()
| nasa-nccs-cds/CDAS2 | python/test/localPerformanceTests/zonalAveDemo.py | Python | gpl-2.0 | 2,568 |
from django.db.models import F
from . import PostingEndpoint, PostingMiddleware
class UpdateStatsMiddleware(PostingMiddleware):
def save(self, serializer):
self.update_category(self.thread.category, self.thread)
self.update_thread(self.thread, self.post)
self.update_user(self.user)
def update_category(self, category, thread):
if self.mode == PostingEndpoint.START:
category.threads = F('threads') + 1
if self.mode != PostingEndpoint.EDIT:
category.set_last_thread(thread)
category.posts = F('posts') + 1
category.update_all = True
def update_thread(self, thread, post):
if self.mode == PostingEndpoint.START:
thread.set_first_post(post)
if self.mode != PostingEndpoint.EDIT:
thread.set_last_post(post)
if self.mode == PostingEndpoint.REPLY:
thread.replies = F('replies') + 1
thread.update_all = True
def update_user(self, user):
if self.mode == PostingEndpoint.START:
user.threads = F('threads') + 1
user.update_fields.append('threads')
if self.mode != PostingEndpoint.EDIT:
user.posts = F('posts') + 1
user.update_fields.append('posts')
| 1905410/Misago | misago/threads/api/postingendpoint/updatestats.py | Python | gpl-2.0 | 1,287 |
# -*- coding: utf-8 -*-
# Copyright 2008-2015 Jaap Karssenberg <[email protected]>
'''The ExportLinker object translates links in zim pages to URLS
for the export content
'''
import logging
logger = logging.getLogger('zim.exporter')
#~ import base64
from .layouts import ExportLayout
from zim.formats import BaseLinker
from zim.fs import File, Dir, PathLookupError
from zim.config import data_file
from zim.notebook import interwiki_link, encode_filename, HRef, PageNotFoundError
from zim.parsing import link_type, is_win32_path_re, url_decode, url_encode
from zim.formats import BaseLinker
class ExportLinker(BaseLinker):
'''This object translate links in zim pages to (relative) URLs.
This is used when exporting data to resolve links.
Relative URLs start with "./" or "../" and should be interpreted
in the same way as in HTML. Both URLs and relative URLs are
already URL encoded
@todo: info on formats to know how to set "usebase"
'''
def __init__(self, notebook, layout, source=None, output=None,
usebase=False, document_root_url=None
):
'''Contructor
@param notebook: the source L{Notebook} for resolving links
@param layout: the L{ExportLayout} for resolving target files
@param source: is the L{Path} of the source page being exported
@param output: is a L{File} object for the destination file
@param usebase: if C{True} the format allows returning relative paths
@param document_root_url: optional URL for the document root
'''
self.notebook = notebook
self.layout = layout
self.source = source
self.output = output
if output:
self.base = output.dir
else:
self.base = None
self.usebase = usebase
self.document_root_url = document_root_url
#~ self._icons = {} # memorize them because the occur often in one page
## Methods used while exporting to resolve links etc. ##
def link(self, link):
'''Returns an url for a link in a zim page
This method is used to translate links of any type.
@param link: link to be translated
@returns: url, uri, or relative path
context of this linker
'''
# Determines the link type and dispatches any of the "link_*" methods
assert isinstance(link, basestring)
type = link_type(link)
methodname = '_link_' + type
if hasattr(self, methodname):
href = getattr(self, methodname)(link)
else:
href = link
#~ print "Linker:", link, '-->', href, '(%s)' % type
return href
def img(self, src):
'''Returns an url for image file 'src' '''
return self._link_file(src)
#~ def icon(self, name):
#~ '''Returns an url for an icon'''
#~ if not name in self._icons:
#~ path = 'icons/%s.png' % name
#~ self._icons[name] = self.resource(path)
#~ return self._icons[name]
def resource(self, path):
'''Return an url for template resources'''
dir = self.layout.resources_dir()
file = dir.file(path)
return self.file_object(file)
def resolve_source_file(self, link):
'''Find the source file for an attachment
Used e.g. by the latex format to find files for equations to
be inlined. Do not use this method to resolve links, the file
given here might be temporary and is not guaranteed to be
available after the export.
@returns: a L{File} object or C{None} if no file was found
'''
return self.notebook.resolve_file(link, self.source)
def resolve_dest_file(self, link):
'''Find the destination file for an attachment
@returns: a L{File} object
'''
return self._resolve_file(link)
def page_object(self, path):
'''Turn a L{Path} object in a relative link or URI'''
try:
file = self.layout.page_file(path)
except PathLookupError:
return '' # Link outside of current export ?
else:
if file == self.output:
return '#' + path.name # single page layout ?
else:
return self.file_object(file)
def file_object(self, file):
'''Turn a L{File} object in a relative link or URI'''
if self.base and self.usebase \
and file.ischild(self.layout.relative_root):
relpath = file.relpath(self.base, allowupward=True)
if not relpath.startswith('.'):
relpath = './' + relpath
return relpath
elif self.notebook.document_root \
and self.document_root_url \
and file.ischild(self.notebook.document_root):
relpath = file.relpath(self.notebook.document_root)
return self.document_root_url + relpath
else:
return file.uri
## Methods below are internal, not used by format or template ##
def _link_page(self, link):
try:
if self.source:
path = self.notebook.pages.resolve_link(
self.source, HRef.new_from_wiki_link(link)
)
else:
path = self.notebook.pages.lookup_from_user_input(link)
except ValueError:
return ''
else:
return self.page_object(path)
def _link_file(self, link):
file = self._resolve_file(link)
return self.file_object(file)
def _resolve_file(self, link):
# TODO checks here are copy of notebook.resolve_file - should be single function
# to determine type of file link: attachment / document / other
# or generic function that takes attachment folder & document folder as args
filename = link.replace('\\', '/')
if filename.startswith('~') or filename.startswith('file:/'):
file = File(filename)
elif filename.startswith('/'):
if self.notebook.document_root:
file = self.notebook.document_root.file(filename)
else:
file = File(filename)
elif is_win32_path_re.match(filename):
if not filename.startswith('/'):
filename = '/' + filename # make absolute on Unix
file = File(filename)
else:
if self.source:
dir = self.layout.attachments_dir(self.source)
else:
dir = self.layout.relative_root
file = File((dir.path, filename)) # XXX LocalDir --> File -- will need get_abspath to resolve
return file
def _link_mailto(self, link):
if link.startswith('mailto:'):
return link
else:
return 'mailto:' + link
def _link_interwiki(self, link):
href = interwiki_link(link)
if href and href != link:
return self.link(href) # recurs
else:
logger.warn('No URL found for interwiki link "%s"', link)
return None
def _link_notebook(self, link):
if link.startswith('zim+'):
link = link[4:]
if '?' in link:
link, path = link.split('?')
# FIXME: code below is not robust because we don't know the
# storage mode of linked notebook...
path = url_decode(path) # was already encoded by interwiki_link()
path = encode_filename(path).replace(' ', '_')
return link + '/' + url_encode(path) + '.txt'
else:
return link
class StubLayout(ExportLayout):
'''Stub implementation of L{ExportLayout} that is used by
L{StaticExportLinker}
'''
def __init__(self, notebook, resources_dir):
self.notebook = notebook
self.resources_dir = resources_dir
def page_file(self, page):
try:
page = self.notebook.get_page(page)
if hasattr(page, 'source') and isinstance(page.source, File):
return page.source
else:
return None
except PageNotFoundError:
return None
def attachments_dir(self, page):
return self.notebook.get_attachments_dir(page)
def resources_dir(self):
return self.resources_dir
#~ def data_uri(file):
#~ if file.basename.endswith('.png'):
#~ mime = 'image/png'
#~ else:
#~ mime = file.get_mimetype()
#~ data64 = u''.join(base64.encodestring(file.raw()).splitlines())
#~ return u'data:%s;base64,%s' % (mime, data64)
class StaticExportLinker(ExportLinker):
'''This linker can be used when exporting a page to e.g. html
without a file to write the html to. All links are resolved
statically to the source files.
'''
def __init__(self, notebook, resources_dir=None, source=None):
layout = StubLayout(notebook, resources_dir)
ExportLinker.__init__(self, notebook, layout, source=source)
#~ def icon(self, name):
#~ if not name in self._icons:
#~ path = 'icons/%s.png' % name
#~ if self.layout.resources_dir:
#~ file = self.layout.resources_dir.file(path)
#~ if file.exists():
#~ self._icons[name] = data_uri(file)
#~ if not name in self._icons:
#~ file = data_file('pixmaps/%s.png' % name)
#~ if file.exists():
#~ self._icons[name] = data_uri(file)
#~ else:
#~ self._icons[name] = file.uri
#~ return self._icons[name]
| Osndok/zim-desktop-wiki | zim/export/linker.py | Python | gpl-2.0 | 8,234 |
# color.py color output for the status and qseries commands
#
# Copyright (C) 2007 Kevin Christen <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''colorize output from some commands
This extension modifies the status and resolve commands to add color
to their output to reflect file status, the qseries command to add
color to reflect patch status (applied, unapplied, missing), and to
diff-related commands to highlight additions, removals, diff headers,
and trailing whitespace.
Other effects in addition to color, like bold and underlined text, are
also available. By default, the terminfo database is used to find the
terminal codes used to change color and effect. If terminfo is not
available, then effects are rendered with the ECMA-48 SGR control
function (aka ANSI escape codes).
Default effects may be overridden from your configuration file::
[color]
status.modified = blue bold underline red_background
status.added = green bold
status.removed = red bold blue_background
status.deleted = cyan bold underline
status.unknown = magenta bold underline
status.ignored = black bold
# 'none' turns off all effects
status.clean = none
status.copied = none
qseries.applied = blue bold underline
qseries.unapplied = black bold
qseries.missing = red bold
diff.diffline = bold
diff.extended = cyan bold
diff.file_a = red bold
diff.file_b = green bold
diff.hunk = magenta
diff.deleted = red
diff.inserted = green
diff.changed = white
diff.trailingwhitespace = bold red_background
resolve.unresolved = red bold
resolve.resolved = green bold
bookmarks.current = green
branches.active = none
branches.closed = black bold
branches.current = green
branches.inactive = none
tags.normal = green
tags.local = black bold
rebase.rebased = blue
rebase.remaining = red bold
shelve.age = cyan
shelve.newest = green bold
shelve.name = blue bold
histedit.remaining = red bold
The available effects in terminfo mode are 'blink', 'bold', 'dim',
'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
'underline'. How each is rendered depends on the terminal emulator.
Some may not be available for a given terminal type, and will be
silently ignored.
Note that on some systems, terminfo mode may cause problems when using
color with the pager extension and less -R. less with the -R option
will only display ECMA-48 color codes, and terminfo mode may sometimes
emit codes that less doesn't understand. You can work around this by
either using ansi mode (or auto mode), or by using less -r (which will
pass through all terminal control codes, not just color control
codes).
Because there are only eight standard colors, this module allows you
to define color names for other color slots which might be available
for your terminal type, assuming terminfo mode. For instance::
color.brightblue = 12
color.pink = 207
color.orange = 202
to set 'brightblue' to color slot 12 (useful for 16 color terminals
that have brighter colors defined in the upper eight) and, 'pink' and
'orange' to colors in 256-color xterm's default color cube. These
defined colors may then be used as any of the pre-defined eight,
including appending '_background' to set the background to that color.
By default, the color extension will use ANSI mode (or win32 mode on
Windows) if it detects a terminal. To override auto mode (to enable
terminfo mode, for example), set the following configuration option::
[color]
mode = terminfo
Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will
disable color.
'''
import os
from mercurial import commands, dispatch, extensions, ui as uimod, util
from mercurial import templater, error
from mercurial.i18n import _
testedwith = 'internal'
# start and stop parameters for effects
_effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
'italic': 3, 'underline': 4, 'inverse': 7,
'black_background': 40, 'red_background': 41,
'green_background': 42, 'yellow_background': 43,
'blue_background': 44, 'purple_background': 45,
'cyan_background': 46, 'white_background': 47}
def _terminfosetup(ui, mode):
'''Initialize terminfo data and the terminal if we're in terminfo mode.'''
global _terminfo_params
# If we failed to load curses, we go ahead and return.
if not _terminfo_params:
return
# Otherwise, see what the config file says.
if mode not in ('auto', 'terminfo'):
return
_terminfo_params.update((key[6:], (False, int(val)))
for key, val in ui.configitems('color')
if key.startswith('color.'))
try:
curses.setupterm()
except curses.error, e:
_terminfo_params = {}
return
for key, (b, e) in _terminfo_params.items():
if not b:
continue
if not curses.tigetstr(e):
# Most terminals don't support dim, invis, etc, so don't be
# noisy and use ui.debug().
ui.debug("no terminfo entry for %s\n" % e)
del _terminfo_params[key]
if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
# Only warn about missing terminfo entries if we explicitly asked for
# terminfo mode.
if mode == "terminfo":
ui.warn(_("no terminfo entry for setab/setaf: reverting to "
"ECMA-48 color\n"))
_terminfo_params = {}
def _modesetup(ui, coloropt):
global _terminfo_params
auto = coloropt == 'auto'
always = not auto and util.parsebool(coloropt)
if not always and not auto:
return None
formatted = always or (os.environ.get('TERM') != 'dumb' and ui.formatted())
mode = ui.config('color', 'mode', 'auto')
realmode = mode
if mode == 'auto':
if os.name == 'nt' and 'TERM' not in os.environ:
# looks line a cmd.exe console, use win32 API or nothing
realmode = 'win32'
else:
realmode = 'ansi'
if realmode == 'win32':
_terminfo_params = {}
if not w32effects:
if mode == 'win32':
# only warn if color.mode is explicitly set to win32
ui.warn(_('warning: failed to set color mode to %s\n') % mode)
return None
_effects.update(w32effects)
elif realmode == 'ansi':
_terminfo_params = {}
elif realmode == 'terminfo':
_terminfosetup(ui, mode)
if not _terminfo_params:
if mode == 'terminfo':
## FIXME Shouldn't we return None in this case too?
# only warn if color.mode is explicitly set to win32
ui.warn(_('warning: failed to set color mode to %s\n') % mode)
realmode = 'ansi'
else:
return None
if always or (auto and formatted):
return realmode
return None
try:
import curses
# Mapping from effect name to terminfo attribute name or color number.
# This will also force-load the curses module.
_terminfo_params = {'none': (True, 'sgr0'),
'standout': (True, 'smso'),
'underline': (True, 'smul'),
'reverse': (True, 'rev'),
'inverse': (True, 'rev'),
'blink': (True, 'blink'),
'dim': (True, 'dim'),
'bold': (True, 'bold'),
'invisible': (True, 'invis'),
'italic': (True, 'sitm'),
'black': (False, curses.COLOR_BLACK),
'red': (False, curses.COLOR_RED),
'green': (False, curses.COLOR_GREEN),
'yellow': (False, curses.COLOR_YELLOW),
'blue': (False, curses.COLOR_BLUE),
'magenta': (False, curses.COLOR_MAGENTA),
'cyan': (False, curses.COLOR_CYAN),
'white': (False, curses.COLOR_WHITE)}
except ImportError:
_terminfo_params = False
_styles = {'grep.match': 'red bold',
'grep.linenumber': 'green',
'grep.rev': 'green',
'grep.change': 'green',
'grep.sep': 'cyan',
'grep.filename': 'magenta',
'grep.user': 'magenta',
'grep.date': 'magenta',
'bookmarks.current': 'green',
'branches.active': 'none',
'branches.closed': 'black bold',
'branches.current': 'green',
'branches.inactive': 'none',
'diff.changed': 'white',
'diff.deleted': 'red',
'diff.diffline': 'bold',
'diff.extended': 'cyan bold',
'diff.file_a': 'red bold',
'diff.file_b': 'green bold',
'diff.hunk': 'magenta',
'diff.inserted': 'green',
'diff.trailingwhitespace': 'bold red_background',
'diffstat.deleted': 'red',
'diffstat.inserted': 'green',
'histedit.remaining': 'red bold',
'ui.prompt': 'yellow',
'log.changeset': 'yellow',
'rebase.rebased': 'blue',
'rebase.remaining': 'red bold',
'resolve.resolved': 'green bold',
'resolve.unresolved': 'red bold',
'shelve.age': 'cyan',
'shelve.newest': 'green bold',
'shelve.name': 'blue bold',
'status.added': 'green bold',
'status.clean': 'none',
'status.copied': 'none',
'status.deleted': 'cyan bold underline',
'status.ignored': 'black bold',
'status.modified': 'blue bold',
'status.removed': 'red bold',
'status.unknown': 'magenta bold underline',
'tags.normal': 'green',
'tags.local': 'black bold'}
def _effect_str(effect):
'''Helper function for render_effects().'''
bg = False
if effect.endswith('_background'):
bg = True
effect = effect[:-11]
attr, val = _terminfo_params[effect]
if attr:
return curses.tigetstr(val)
elif bg:
return curses.tparm(curses.tigetstr('setab'), val)
else:
return curses.tparm(curses.tigetstr('setaf'), val)
def render_effects(text, effects):
'Wrap text in commands to turn on each effect.'
if not text:
return text
if not _terminfo_params:
start = [str(_effects[e]) for e in ['none'] + effects.split()]
start = '\033[' + ';'.join(start) + 'm'
stop = '\033[' + str(_effects['none']) + 'm'
else:
start = ''.join(_effect_str(effect)
for effect in ['none'] + effects.split())
stop = _effect_str('none')
return ''.join([start, text, stop])
def extstyles():
for name, ext in extensions.extensions():
_styles.update(getattr(ext, 'colortable', {}))
def configstyles(ui):
for status, cfgeffects in ui.configitems('color'):
if '.' not in status or status.startswith('color.'):
continue
cfgeffects = ui.configlist('color', status)
if cfgeffects:
good = []
for e in cfgeffects:
if not _terminfo_params and e in _effects:
good.append(e)
elif e in _terminfo_params or e[:-11] in _terminfo_params:
good.append(e)
else:
ui.warn(_("ignoring unknown color/effect %r "
"(configured in color.%s)\n")
% (e, status))
_styles[status] = ' '.join(good)
class colorui(uimod.ui):
def popbuffer(self, labeled=False):
if self._colormode is None:
return super(colorui, self).popbuffer(labeled)
if labeled:
return ''.join(self.label(a, label) for a, label
in self._buffers.pop())
return ''.join(a for a, label in self._buffers.pop())
_colormode = 'ansi'
def write(self, *args, **opts):
if self._colormode is None:
return super(colorui, self).write(*args, **opts)
label = opts.get('label', '')
if self._buffers:
self._buffers[-1].extend([(str(a), label) for a in args])
elif self._colormode == 'win32':
for a in args:
win32print(a, super(colorui, self).write, **opts)
else:
return super(colorui, self).write(
*[self.label(str(a), label) for a in args], **opts)
def write_err(self, *args, **opts):
if self._colormode is None:
return super(colorui, self).write_err(*args, **opts)
label = opts.get('label', '')
if self._colormode == 'win32':
for a in args:
win32print(a, super(colorui, self).write_err, **opts)
else:
return super(colorui, self).write_err(
*[self.label(str(a), label) for a in args], **opts)
def label(self, msg, label):
if self._colormode is None:
return super(colorui, self).label(msg, label)
effects = []
for l in label.split():
s = _styles.get(l, '')
if s:
effects.append(s)
effects = ' '.join(effects)
if effects:
return '\n'.join([render_effects(s, effects)
for s in msg.split('\n')])
return msg
def templatelabel(context, mapping, args):
if len(args) != 2:
# i18n: "label" is a keyword
raise error.ParseError(_("label expects two arguments"))
thing = templater._evalifliteral(args[1], context, mapping)
# apparently, repo could be a string that is the favicon?
repo = mapping.get('repo', '')
if isinstance(repo, str):
return thing
label = templater.stringify(args[0][0](context, mapping, args[0][1]))
label = templater.runtemplate(context, mapping,
templater.compiletemplate(label, context))
thing = templater.stringify(thing)
label = templater.stringify(label)
return repo.ui.label(thing, label)
def uisetup(ui):
if ui.plain():
return
if not isinstance(ui, colorui):
colorui.__bases__ = (ui.__class__,)
ui.__class__ = colorui
def colorcmd(orig, ui_, opts, cmd, cmdfunc):
mode = _modesetup(ui_, opts['color'])
colorui._colormode = mode
if mode:
extstyles()
configstyles(ui_)
return orig(ui_, opts, cmd, cmdfunc)
extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
templater.funcs['label'] = templatelabel
def extsetup(ui):
commands.globalopts.append(
('', 'color', 'auto',
# i18n: 'always', 'auto', and 'never' are keywords and should
# not be translated
_("when to colorize (boolean, always, auto, or never)"),
_('TYPE')))
if os.name != 'nt':
w32effects = None
else:
import re, ctypes
_kernel32 = ctypes.windll.kernel32
_WORD = ctypes.c_ushort
_INVALID_HANDLE_VALUE = -1
class _COORD(ctypes.Structure):
_fields_ = [('X', ctypes.c_short),
('Y', ctypes.c_short)]
class _SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', _COORD),
('dwCursorPosition', _COORD),
('wAttributes', _WORD),
('srWindow', _SMALL_RECT),
('dwMaximumWindowSize', _COORD)]
_STD_OUTPUT_HANDLE = 0xfffffff5L # (DWORD)-11
_STD_ERROR_HANDLE = 0xfffffff4L # (DWORD)-12
_FOREGROUND_BLUE = 0x0001
_FOREGROUND_GREEN = 0x0002
_FOREGROUND_RED = 0x0004
_FOREGROUND_INTENSITY = 0x0008
_BACKGROUND_BLUE = 0x0010
_BACKGROUND_GREEN = 0x0020
_BACKGROUND_RED = 0x0040
_BACKGROUND_INTENSITY = 0x0080
_COMMON_LVB_REVERSE_VIDEO = 0x4000
_COMMON_LVB_UNDERSCORE = 0x8000
# http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
w32effects = {
'none': -1,
'black': 0,
'red': _FOREGROUND_RED,
'green': _FOREGROUND_GREEN,
'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
'blue': _FOREGROUND_BLUE,
'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
'bold': _FOREGROUND_INTENSITY,
'black_background': 0x100, # unused value > 0x0f
'red_background': _BACKGROUND_RED,
'green_background': _BACKGROUND_GREEN,
'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
'blue_background': _BACKGROUND_BLUE,
'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
_BACKGROUND_BLUE),
'bold_background': _BACKGROUND_INTENSITY,
'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
}
passthrough = set([_FOREGROUND_INTENSITY,
_BACKGROUND_INTENSITY,
_COMMON_LVB_UNDERSCORE,
_COMMON_LVB_REVERSE_VIDEO])
stdout = _kernel32.GetStdHandle(
_STD_OUTPUT_HANDLE) # don't close the handle returned
if stdout is None or stdout == _INVALID_HANDLE_VALUE:
w32effects = None
else:
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
if not _kernel32.GetConsoleScreenBufferInfo(
stdout, ctypes.byref(csbi)):
# stdout may not support GetConsoleScreenBufferInfo()
# when called from subprocess or redirected
w32effects = None
else:
origattr = csbi.wAttributes
ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
re.MULTILINE | re.DOTALL)
def win32print(text, orig, **opts):
label = opts.get('label', '')
attr = origattr
def mapcolor(val, attr):
if val == -1:
return origattr
elif val in passthrough:
return attr | val
elif val > 0x0f:
return (val & 0x70) | (attr & 0x8f)
else:
return (val & 0x07) | (attr & 0xf8)
# determine console attributes based on labels
for l in label.split():
style = _styles.get(l, '')
for effect in style.split():
attr = mapcolor(w32effects[effect], attr)
# hack to ensure regexp finds data
if not text.startswith('\033['):
text = '\033[m' + text
# Look for ANSI-like codes embedded in text
m = re.match(ansire, text)
try:
while m:
for sattr in m.group(1).split(';'):
if sattr:
attr = mapcolor(int(sattr), attr)
_kernel32.SetConsoleTextAttribute(stdout, attr)
orig(m.group(2), **opts)
m = re.match(ansire, m.group(3))
finally:
# Explicitly reset original attributes
_kernel32.SetConsoleTextAttribute(stdout, origattr)
| spraints/for-example | hgext/color.py | Python | gpl-2.0 | 19,828 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import Skype4Py
import psycopg2
import sys
import time
connection_string=open('./.dbconfig.dat','r').read()
cn=psycopg2.connect(connection_string)
cr=cn.cursor()
cr.execute('INSERT INTO skype_net_send("to", message) VALUES (%s, %s);',(sys.argv[1],sys.argv[2]))
cn.commit()
cr.close()
cn.close() | tech22info/skype-secretary | daemon/skype_netsend.py | Python | gpl-2.0 | 339 |
# Create your views here.
from django.views.generic import ListView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from galery.models import CarouArt, AutreIllu, Logo, Faction, Sectorial, Photo, Fig
from galery.serializer import PhotoSerializer
class Main_carousel(ListView):
"""
This class allows to quickly call the article list. and return other useful data for the page
"""
model=CarouArt
context_object_name="articles"
template_name="galery/carousel.html"
def get_queryset(self):
"""modify standard data recovery"""
temp = CarouArt.objects.all().order_by('-date')
return temp
def get_context_data(self, **kwargs):
"""recover and modify the context data to add the list of categories"""
context = super(Main_carousel, self).get_context_data(**kwargs)
#add the new context data
context['illus'] = AutreIllu.objects.filter(in_carousel=True)
context['logos'] = Logo.objects.filter(display_carousel=True)
return context
class Main_galery(ListView):
"""
This class gathered all data for galery side bar
"""
model=Faction
context_object_name="factions"
template_name="galery/galery.html"
def get_queryset(self):
"""modify standard data recovery"""
return Faction.objects.all().order_by('name')
def get_context_data(self, **kwargs):
"""add sectorial and figs data"""
context = super(Main_galery, self).get_context_data(**kwargs)
factions = context['factions']
total = []
for f in factions:
temp = [f, ]
sector = Sectorial.objects.filter(factions = f).order_by('name')
for s in sector:
figus = Fig.objects.filter(sectorial = s).order_by('name')
temp.append([s, figus])
total.append(temp)
context['sidemenus'] = total
return context
@api_view(['GET', ])
def pics_list(request, faction = None, secto = None, fig = None):
"""
List all pictures
"""
if faction == None and secto == None and fig == None:
pics = Photo.objects.all()
elif faction != None and secto == None and fig == None:
pics = Photo.objects.filter(faction = faction)
elif faction != None and secto != None and fig == None:
pics = Photo.objects.filter(faction = faction, sectorial = secto)
elif faction != None and secto != None and fig != None:
pics = Photo.objects.filter(faction = faction, sectorial = secto, fig = fig)
serializer = PhotoSerializer(pics, many = True)
return Response(serializer.data)
| sylvainnizac/Djangoctopus | galery/views.py | Python | gpl-2.0 | 2,685 |
#!/usr/bin/env python
###########################################################################
# obd_sensors.py
#
# Copyright 2004 Donour Sizemore ([email protected])
# Copyright 2009 Secons Ltd. (www.obdtester.com)
#
# This file is part of pyOBD.
#
# pyOBD is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyOBD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyOBD; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
###########################################################################
def hex_to_int(str):
i = eval("0x" + str, {}, {})
return i
def maf(code):
code = hex_to_int(code)
return code * 0.00132276
def throttle_pos(code):
code = hex_to_int(code)
return code * 100.0 / 255.0
def intake_m_pres(code): # in kPa
code = hex_to_int(code)
return code / 0.14504
def rpm(code):
code = hex_to_int(code)
return code / 4
def speed(code):
code = hex_to_int(code)
return code / 1.60934
def percent_scale(code):
code = hex_to_int(code)
return code * 100.0 / 255.0
def timing_advance(code):
code = hex_to_int(code)
return (code - 128) / 2.0
def sec_to_min(code):
code = hex_to_int(code)
return code / 60
def temp(code):
code = hex_to_int(code)
return code - 40
def cpass(code):
#fixme
return code
def fuel_trim_percent(code):
code = hex_to_int(code)
return (code - 128.0) * 100.0 / 128
def dtc_decrypt(code):
#first byte is byte after PID and without spaces
num = hex_to_int(code[:2]) #A byte
res = []
if num & 0x80: # is mil light on
mil = 1
else:
mil = 0
# bit 0-6 are the number of dtc's.
num = num & 0x7f
res.append(num)
res.append(mil)
numB = hex_to_int(code[2:4]) #B byte
for i in range(0,3):
res.append(((numB>>i)&0x01)+((numB>>(3+i))&0x02))
numC = hex_to_int(code[4:6]) #C byte
numD = hex_to_int(code[6:8]) #D byte
for i in range(0,7):
res.append(((numC>>i)&0x01)+(((numD>>i)&0x01)<<1))
res.append(((numD>>7)&0x01)) #EGR SystemC7 bit of different
return res
def hex_to_bitstring(str):
bitstring = ""
for i in str:
# silly type safety, we don't want to eval random stuff
if type(i) == type(''):
v = eval("0x%s" % i)
if v & 8 :
bitstring += '1'
else:
bitstring += '0'
if v & 4:
bitstring += '1'
else:
bitstring += '0'
if v & 2:
bitstring += '1'
else:
bitstring += '0'
if v & 1:
bitstring += '1'
else:
bitstring += '0'
return bitstring
class Sensor:
def __init__(self, shortName, sensorName, sensorcommand, sensorValueFunction, u):
self.shortname = shortName
self.name = sensorName
self.cmd = sensorcommand
self.value= sensorValueFunction
self.unit = u
SENSORS = [
Sensor("pids" , " Supported PIDs", "0100", hex_to_bitstring ,"" ),
Sensor("dtc_status" , "Status Since DTC Cleared", "0101", dtc_decrypt ,"" ),
Sensor("dtc_ff" , "DTC Causing Freeze Frame", "0102", cpass ,"" ),
Sensor("fuel_status" , " Fuel System Status", "0103", cpass ,"" ),
Sensor("load" , " Calculated Load Value", "01041", percent_scale ,"" ),
Sensor("temp" , " Coolant Temperature", "0105", temp ,"C" ),
Sensor("short_term_fuel_trim_1", " Short Term Fuel Trim", "0106", fuel_trim_percent,"%" ),
Sensor("long_term_fuel_trim_1" , " Long Term Fuel Trim", "0107", fuel_trim_percent,"%" ),
Sensor("short_term_fuel_trim_2", " Short Term Fuel Trim", "0108", fuel_trim_percent,"%" ),
Sensor("long_term_fuel_trim_2" , " Long Term Fuel Trim", "0109", fuel_trim_percent,"%" ),
Sensor("fuel_pressure" , " Fuel Rail Pressure", "010A", cpass ,"" ),
Sensor("manifold_pressure" , "Intake Manifold Pressure", "010B", intake_m_pres ,"psi" ),
Sensor("rpm" , " Engine RPM", "010C1", rpm ,"" ),
Sensor("speed" , " Vehicle Speed", "010D1", speed ,"MPH" ),
Sensor("timing_advance" , " Timing Advance", "010E", timing_advance ,"degrees"),
Sensor("intake_air_temp" , " Intake Air Temp", "010F", temp ,"C" ),
Sensor("maf" , " Air Flow Rate (MAF)", "0110", maf ,"lb/min" ),
Sensor("throttle_pos" , " Throttle Position", "01111", throttle_pos ,"%" ),
Sensor("secondary_air_status" , " Secondary Air Status", "0112", cpass ,"" ),
Sensor("o2_sensor_positions" , " Location of O2 sensors", "0113", cpass ,"" ),
Sensor("o211" , " O2 Sensor: 1 - 1", "0114", fuel_trim_percent,"%" ),
Sensor("o212" , " O2 Sensor: 1 - 2", "0115", fuel_trim_percent,"%" ),
Sensor("o213" , " O2 Sensor: 1 - 3", "0116", fuel_trim_percent,"%" ),
Sensor("o214" , " O2 Sensor: 1 - 4", "0117", fuel_trim_percent,"%" ),
Sensor("o221" , " O2 Sensor: 2 - 1", "0118", fuel_trim_percent,"%" ),
Sensor("o222" , " O2 Sensor: 2 - 2", "0119", fuel_trim_percent,"%" ),
Sensor("o223" , " O2 Sensor: 2 - 3", "011A", fuel_trim_percent,"%" ),
Sensor("o224" , " O2 Sensor: 2 - 4", "011B", fuel_trim_percent,"%" ),
Sensor("obd_standard" , " OBD Designation", "011C", cpass ,"" ),
Sensor("o2_sensor_position_b" ," Location of O2 sensors" , "011D", cpass ,"" ),
Sensor("aux_input" , " Aux input status", "011E", cpass ,"" ),
Sensor("engine_time" , " Time Since Engine Start", "011F", sec_to_min ,"min" ),
Sensor("engine_mil_time" , " Engine Run with MIL on", "014D", sec_to_min ,"min" ),
]
#___________________________________________________________
def test():
for i in SENSORS:
print i.name, i.value("F")
if __name__ == "__main__":
test()
| lcintron/PyO-X | obd_sensors.py | Python | gpl-2.0 | 7,188 |
# Copyright (C) 2018 Andrew Vitu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gramps.gen.plug import Gramplet
from collections import defaultdict
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
class HouseTimelineGramplet(Gramplet):
def init(self):
self.house_width = 40
self.set_tooltip(_("Double-click name for details"))
# self.set_text(_("No Family Tree loaded."))
def on_load(self):
self.no_wrap()
tag = self.gui.buffer.create_tag("fixed")
tag.set_property("font", "Courier 8")
if len(self.gui.data) != 1:
self.gui.data[:] = ["001", None]
def db_changed(self):
self.connect(self.dbstate.db,'person-add', self.update)
self.connect(self.dbstate.db,'person-update', self.update)
self.connect(self.dbstate.db,'person-delete', self.update)
def save_update_options(self, widget=None):
style = self.get_option(_("House Icon Style"))
self.gui.data[:] = [style.get_value()]
self.update()
def build_options(self):
from gramps.gen.plug.menu import EnumeratedListOption
# Add types:
style_list = EnumeratedListOption(_("House Icon Style"), self.gui.data[0])
for item in [("001", _("Standard")),
("002", _("Small")),
("003", _("Unicode")),
("004", _("None")),
]:
style_list.add_item(item[0], item[1])
self.add_option(style_list)
def main(self):
self.set_text(_("Processing...") + "\n")
yield True
self.sorted_residents = {}
self.clear_text()
address_count = 0
self.residents_range = []
# get details of people dbhandle, name, address
for p in self.dbstate.db.iter_people():
person_handle = p.handle
primary_name = p.get_primary_name()
person_name = primary_name.get_name()
person_addr = p.get_address_list()
if person_addr:
address_count += 1
for item in person_addr:
# address format from db is:
# [0] street, [1] locality, [2] city, [3] pcode, [4] state/county, [5] country, [6] phone
address = item.get_text_data_list()
date = item.get_date_object()
self.build_parent_address_dict(address,date,person_handle,person_name)
if address_count == 0:
self.set_text(_("There are no individuals with Address data. Please add Address data to people."))
self.build_house()
start, end = self.gui.buffer.get_bounds()
self.gui.buffer.apply_tag_by_name("fixed", start, end)
self.append_text("", scroll_to="begin")
yield False
def build_parent_address_dict(self, address, date, person_handle, person_name):
"""
Builds self.sorted_residents, The address + person_handle object.
The collection is grouped by locality/city (group_key) and sub key (address_key)
"""
# group key represents a group of similar locality/city.
group_key = address[1] + address[2]
# address key is the actual property address.
address_key = self.format_address_key(address)
if group_key not in self.sorted_residents:
self.sorted_residents[group_key] = {address_key: [[date.get_ymd(),person_handle,person_name]]}
elif group_key in self.sorted_residents:
if address_key not in self.sorted_residents[group_key]:
self.sorted_residents[group_key][address_key] = [[date.get_ymd(),person_handle,person_name]]
elif address_key in self.sorted_residents[group_key]:
self.sorted_residents[group_key][address_key] += [[date.get_ymd(),person_handle,person_name]]
def format_address_key(self, address):
"""
Builds a formatted Address string that can be used as a Key.
"""
key = ""
for k in address:
if len(k) > 0:
key += k + " "
return key
def build_house(self):
"""
Outputs sorted details from self.sorted_residents.
"""
gl_location = _("Location")
gl_time = _("Time In Family")
gl_first_resident = _("First Resident")
gl_last_resident = _("Last Resident")
gl_timeline = _("Timeline")
gl_unknown = _("Unknown")
gl_total_residents = _("Total Known Residents")
for resident in self.sorted_residents.items():
# sort by house number
for item in sorted(resident[1].items()):
# sort residents of an address by date.
sorted_dates = sorted(item[1],key=lambda k: k[0][0])
residents_reversed = sorted(sorted_dates, reverse=True)
# we need a list of distinct handles to get the correct resident count per address.
distinct_handles = []
for h in sorted_dates:
if h[1] not in distinct_handles:
distinct_handles.append(h[1])
first_year = int(sorted_dates[0][0][0])
last_year = int(sorted_dates[-1][0][0])
time_in_family = last_year - first_year if first_year != 0 else gl_unknown
self.append_text("=========================\n")
self.render_house(self.gui.data[0])
self.append_text("{0}: {1}\n".format(gl_location,item[0]) +
("{0}: {1} years\n".format(gl_time,time_in_family)) +
("{0}: {1} - {2}\n".format(gl_first_resident,sorted_dates[0][0][0],sorted_dates[0][2])) +
("{0}: {1} - {2}\n".format(gl_last_resident,sorted_dates[-1][0][0],sorted_dates[-1][2])) +
("{0}: {1}\n".format(gl_total_residents,len(distinct_handles))) +
" \n")
self.append_text("{0}:\n".format(gl_timeline))
# for each person that is a resident, display the date and name with link.
for detail in sorted_dates:
# if a person has two address details, display the person living there as a range between two dates.
first_handle = detail
last_handle = next(handle for handle in residents_reversed if handle[1] == first_handle[1])
if (detail[0][0] != last_handle[0][0] and detail[1] not in self.residents_range):
self.append_text("{0} -> {1} - ".format(first_handle[0][0],last_handle[0][0]))
self.link(detail[2],"Person",detail[1])
self.append_text("\n")
self.residents_range.append(first_handle[1])
elif detail[1] not in self.residents_range:
self.append_text("{0} - ".format(detail[0][0]))
self.link(detail[2],"Person",detail[1])
self.append_text("\n")
def render_house(self,house_type):
"""
Renders various types of ASCII houses.
"""
if house_type == "001":
self.append_text(
" ~~~\n" +
" __[]________\n" +
"/____________\\ \n" +
"| | \n" +
"| [)(] [)(] | \n" +
"| __ | \n" +
"| | | | \n" +
"|____|__|____| \n" +
" \n"
)
elif house_type == "002":
self.append_text(
" .___. \n" +
"/ \___\\ \n" +
"|_|_#_| \n" +
" \n"
)
elif house_type == "003":
self.append_text(
"⌂ \n"
) | sam-m888/addons-source | HouseTimelineGramplet/housetimeline.py | Python | gpl-2.0 | 8,826 |
suite = {
"name" : "mx",
"libraries" : {
# ------------- Libraries -------------
"JACOCOAGENT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoagent-0.7.1-1.jar"],
"sha1" : "2f73a645b02e39290e577ce555f00b02004650b0",
},
"JACOCOREPORT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoreport-0.7.1-2.jar"],
"sha1" : "a630436391832d697a12c8f7daef8655d7a1efd2",
},
"FINDBUGS_DIST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/findbugs-3.0.0.zip",
"http://sourceforge.net/projects/findbugs/files/findbugs/3.0.0/findbugs-3.0.0.zip/download",
],
"sha1" : "6e56d67f238dbcd60acb88a81655749aa6419c5b",
},
"SIGTEST" : {
"urls" : [
"http://hg.netbeans.org/binaries/A7674A6D78B7FEA58AF76B357DAE6EA5E3FDFBE9-apitest.jar",
],
"sha1" : "a7674a6d78b7fea58af76b357dae6ea5e3fdfbe9",
},
"CODESNIPPET-DOCLET" : {
"urls" : [
"http://repo1.maven.org/maven2/org/apidesign/javadoc/codesnippet-doclet/0.9/codesnippet-doclet-0.9.jar",
],
"sha1" : "4de316ba4d1e646dc44f92e4b1c4501d79afe595",
},
"JUNIT" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11.jar",
],
"sha1" : "4e031bb61df09069aeb2bffb4019e7a5034a4ee0",
"eclipse.container" : "org.eclipse.jdt.junit.JUNIT_CONTAINER/4",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11-sources.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11-sources.jar",
],
"sourceSha1" : "28e0ad201304e4a4abf999ca0570b7cffc352c3c",
"dependencies" : ["HAMCREST"],
"licence" : "CPL",
"maven" : {
"groupId" : "junit",
"artifactId" : "junit",
"version" : "4.11",
}
},
"CHECKSTYLE_6.0" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/checkstyle-6.0-all.jar",
"jar:http://sourceforge.net/projects/checkstyle/files/checkstyle/6.0/checkstyle-6.0-bin.zip/download!/checkstyle-6.0/checkstyle-6.0-all.jar",
],
"sha1" : "2bedc7feded58b5fd65595323bfaf7b9bb6a3c7a",
"licence" : "LGPLv21",
"maven" : {
"groupId" : "com.puppycrawl.tools",
"artifactId" : "checkstyle",
"version" : "6.0",
}
},
"CHECKSTYLE_6.15" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/checkstyle-6.15-all.jar",
"http://sourceforge.net/projects/checkstyle/files/checkstyle/6.15/checkstyle-6.15-all.jar",
],
"sha1" : "db9ade7f4ef4ecb48e3f522873946f9b48f949ee",
"licence" : "LGPLv21",
"maven" : {
"groupId" : "com.puppycrawl.tools",
"artifactId" : "checkstyle",
"version" : "6.15",
}
},
"HAMCREST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
"sha1" : "42a25dc3219429f0e5d060061f71acb49bf010a0",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3-sources.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3-sources.jar",
],
"sourceSha1" : "1dc37250fbc78e23a65a67fbbaf71d2e9cbc3c0b",
"licence" : "BSD-new",
"maven" : {
"groupId" : "org.hamcrest",
"artifactId" : "hamcrest-core",
"version" : "1.3",
}
},
"JMH" : {
"sha1" : "7e1577cf6e1f1326b78a322d206fa9412fd41ae9",
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jmh/jmh-runner-1.11.2.jar"],
"sourceSha1" : "12a67f0dcdfe7e43218bf38c1d7fd766122a3dc7",
"sourceUrls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jmh/jmh-runner-1.11.2-sources.jar"],
},
},
"licenses" : {
"GPLv2-CPE" : {
"name" : "GNU General Public License, version 2, with the Classpath Exception",
"url" : "http://openjdk.java.net/legal/gplv2+ce.html"
},
"BSD-new" : {
"name" : "New BSD License (3-clause BSD license)",
"url" : "http://opensource.org/licenses/BSD-3-Clause"
},
"CPL" : {
"name" : "Common Public License Version 1.0",
"url" : "http://opensource.org/licenses/cpl1.0.txt"
},
"LGPLv21" : {
"name" : "GNU Lesser General Public License, version 2.1",
"url" : "http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html"
},
},
"projects" : {
"com.oracle.mxtool.junit" : {
"subDir" : "java",
"sourceDirs" : ["src"],
"dependencies" : [
"JUNIT",
],
"javaCompliance" : "1.8",
},
"com.oracle.mxtool.compilerserver" : {
"subDir" : "java",
"sourceDirs" : ["src"],
"javaCompliance" : "1.7",
},
},
}
| olpaw/mx | mx.mx/suite.py | Python | gpl-2.0 | 5,084 |
__author__ = 'Rizzy'
import os
selection = os.listdir('D:/')
for each in selection:
if 'maya' in each.lower():
print each
| riasatmanzoor/maya-tools | test.py | Python | gpl-2.0 | 136 |
import json
import re
def main():
with open('replacedict.json') as data_file:
repl_dict = json.load(data_file)
with open('somethingtoreplace.txt') as data_file:
str_file = data_file.read()
for item,value in repl_dict.items():
print '{} : {}'.format(item,value)
searchObj = re.search(str_file,item,flags=0)
if searchObj:
print "searchObj.group() : ", searchObj.group()
print "searchObj.group(1) : ", searchObj.group(1)
print "searchObj.group(2) : ", searchObj.group(2)
#print str_file
#scientific_num = re.compile("^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$")
if __name__ == "__main__":
main()
| ma-tri-x/pytry | replace.py | Python | gpl-2.0 | 713 |
from vsip.view import FVector
from vsip.cuda import dda
from vsip.cuda.module import Function, Module
import numpy
from struct import pack, unpack
def _pack_arguments(*args):
"""Pack arguments from `args` into a single `struct` and return
that, together with a list of objects to be destroyed post-call."""
arg_data = []
cleanup = []
format = ""
for i, arg in enumerate(args):
if isinstance(arg, int):
arg_data.append(arg)
format += 'l'
elif isinstance(arg, float):
arg_data.append(arg)
format += 'd'
elif isinstance(arg, numpy.number):
arg_data.append(arg)
format += arg.dtype.char
elif isinstance(arg, FVector):
data = dda.FVData(arg)
arg_data.append(data)
format += "P"
cleanup.append(data)
elif isinstance(arg, buffer):
arg_data.append(arg)
format += "s"
else:
raise TypeError('invalid type on parameter %d (%s)' %(i + 1, type(arg)))
buf = pack(format, *arg_data)
return buf, cleanup
def _function_call(func, *args, **kwds):
grid = kwds.pop("grid", (1,1))
stream = kwds.pop("stream", None)
block = kwds.pop("block", None)
shared = kwds.pop("shared", None)
texrefs = kwds.pop("texrefs", [])
time_kernel = kwds.pop("time_kernel", False)
if kwds:
raise ValueError(
"extra keyword arguments: %s"
% (",".join(kwds.iterkeys())))
if block is None:
raise ValueError, "must specify block size"
func.set_block_shape(*block)
buf, cleanup = _pack_arguments(*args)
func.param_setv(0, buf)
func.param_set_size(len(buf))
if shared is not None:
func.set_shared_size(shared)
for texref in texrefs:
func.param_set_texref(texref)
if stream is None:
if time_kernel:
Context.synchronize()
from time import time
start_time = time()
func.launch_grid(*grid)
if time_kernel:
Context.synchronize()
return time()-start_time
else:
assert not time_kernel, "Can't time the kernel on an asynchronous invocation"
func.launch_grid_async(grid[0], grid[1], stream)
Function.__call__ = _function_call
#Function.prepare = function_prepare
#Function.prepared_call = function_prepared_call
#Function.prepared_timed_call = function_prepared_timed_call
#Function.prepared_async_call = function_prepared_async_call
#Function.__getattr__ = function___getattr__
| maxywb/vsipl | sourceryvsipl++-x86-3.1/src/vsipl++/python/cuda/__init__.py | Python | gpl-2.0 | 2,621 |
#!/usr/bin/env python
"""
Copyright (C) Sarah Mount, 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Sarah Mount <[email protected]>'
__date__ = 'Jan 2011'
if __name__ == '__main__':
import benchmarkr
benchmarkr.main()
| snim2/benchmarkr | benchmarkr/__main__.py | Python | gpl-2.0 | 821 |
'''
Created on Jul 29, 2015
@author: krzysztof
'''
import unittest
import UserInputParse as uip
class Test(unittest.TestCase):
def setUp(self):
self.inp1 = ["-s", "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/combined.cpp",
"-d", "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/inp1.csv"]
self.inp2 = ["--source=/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV",
"--destination=/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV/inp2.csv"]
self.inp3 = ["--recursive",
"--source=/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV",
"--destination=/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV/inp3.csv"]
self.comp1 = {"source": "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/combined.cpp",
"dest": "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/inp1.csv",
"recursive": False}
self.comp2 = {"source": "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV",
"dest": "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV/inp2.csv",
"recursive": False}
self.comp3 = {"source": "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV",
"dest": "/home/krzysztof/Programming/Eclipse/Python/CppPyDoc/tests/testFiles/toCSV/inp3.csv",
"recursive": True}
def tearDown(self):
pass
def test_inp1_fileAsInput(self):
data = uip.UserInputParse(self.inp1)
source, dest, *cmd = data.getCmd()
self.assertEqual(source, [self.comp1["source"]], "source:Comp: {} != {}".format(source, self.comp1["source"]))
self.assertEqual(dest, self.comp1["dest"], "dest:Comp {} != {}".format(dest, self.comp1["dest"]))
self.assertEqual(cmd[0], self.comp1["recursive"], "cmd[0]:Comp: {} != {}".format(cmd[0], self.comp1["recursive"]))
def test_inp2_dirAsInput(self):
data = uip.UserInputParse(self.inp2)
source, dest, *cmd = data.getCmd()
self.assertEqual(source, [self.comp2["source"]], "source:Comp: {} != {}".format(source, self.comp2["source"]))
self.assertEqual(dest, self.comp2["dest"], "dest:Comp {} != {}".format(dest, self.comp2["dest"]))
self.assertEqual(cmd[0], self.comp2["recursive"], "cmd[0]:Comp: {} != {}".format(cmd[0], self.comp2["recursive"]))
def test_inp3_dirAsInput_Recursive(self):
data = uip.UserInputParse(self.inp3)
source, dest, *cmd = data.getCmd()
self.assertEqual(source, [self.comp3["source"]], "source:Comp: {} != {}".format(source, self.comp3["source"]))
self.assertEqual(dest, self.comp3["dest"], "dest:Comp: {} != {}".format(dest, self.comp3["dest"]))
self.assertEqual(cmd[0], self.comp3["recursive"], "cmd[0]:Comp: {} != {}".format(cmd[0], self.comp3["recursive"]))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | rCorvidae/CppPyDoc | tests/unitTestUserInputParse.py | Python | gpl-2.0 | 3,258 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
@author: chris.hill , <im.weittor#gmail.com>
@copyright: (C) 2014 weittor
@license: GNU General Public License version 2.0 (GPLv2)
@version: 0.1
@contact:
@var:
@type:
@param:
@return:
@rtype:
@note:
@attention:
@bug:
@warning:
@see:
"""
import textwrap
graph_text = '''
As a system administrator, you run across numerous challenges and problems. Managing users, disk space, processes, devices, and backups can cause many system administrators to lose their hair, good humor, or sanity. Shell scripts can help, but they often have frustrating limitations. This is where a full-featured scripting language, such as Python, can turn a tedious task into an easy and, dare I say it, fun one.
'''
print 'No dedent1:\n'
print textwrap.fill(graph_text, width=50)
print
dedented_text = textwrap.dedent(graph_text).strip()
print 'No dedent2:\n'
for width in [ 45, 70 ]:
print '%d Columns:\n' %width
print textwrap.fill(dedented_text, width=width)
print
print 'No dedent3:\n'
print textwrap.fill(dedented_text,initial_indent='',subsequent_indent=' ' * 4,width=50,)
| weittor/python-100 | module_textwrap.py | Python | gpl-2.0 | 1,184 |
# -*- mode: python; -*-
##
## Script to build an indexed representation of a GFF file for efficient
## retrieval of genes
##
import os
import sys
import time
import glob
import shelve
from collections import defaultdict
# Add misopy path
miso_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, miso_path)
import misopy
import misopy.gff_utils as gff_utils
import misopy.pickle_utils as pickle_utils
import misopy.Gene as gene_utils
import misopy.misc_utils as misc_utils
print(misopy)
COMPRESS_PREFIX = misc_utils.COMPRESS_PREFIX
def compress_event_name(event_name,
prefix=COMPRESS_PREFIX):
event_hash = hash(event_name)
compressed_event_name = "%s_%s" %(prefix, event_hash)
return compressed_event_name
def serialize_genes(gff_genes,
gff_filename,
output_dir,
compress_id=False):
"""
Output genes into pickle files by chromosome, by gene.
If asked, use compressed IDs (hashes) of the 'ID=' field in the GFF.
"""
genes_by_chrom = defaultdict(dict)
# Split up genes by chromosome
for gene_id, gene_info in gff_genes.iteritems():
gene_obj = gene_info["gene_object"]
gene_hierarchy = gene_info["hierarchy"]
genes_by_chrom[gene_obj.chrom][gene_id] = \
{'gene_object': gene_obj,
'hierarchy': gene_hierarchy}
if compress_id:
gene_compressed_id = compress_event_name(gene_id)
# Store compressed ID
genes_by_chrom[gene_obj.chrom][gene_id]['compressed_id'] \
= gene_compressed_id
# Mapping from gene IDs to pickled filename
gene_id_to_filename = {}
# Mapping from compressed IDs (hashes) to gene IDs
compressed_id_to_gene_id = {}
# Serialize all the genes in each chromosome into their
# own directory
for chrom, chrom_genes in genes_by_chrom.iteritems():
if chrom.startswith("chr"):
chrom_dir_name = chrom
else:
# Add chr-prefix for ease of finding directory
# in downstream steps.
chrom_dir_name = "chr%s" %(str(chrom))
# Make directory for chromosome if it doesn't already exist
chrom_dir = os.path.join(output_dir, chrom_dir_name)
if not os.path.isdir(chrom_dir):
print "Making directory: %s" %(chrom_dir)
os.makedirs(chrom_dir)
t1 = time.time()
# Serialize each gene into a separate file
num_genes = len(genes_by_chrom[chrom])
for gene_id, gene_info in genes_by_chrom[chrom].iteritems():
gene_compressed_id = None
if compress_id:
gene_compressed_id = \
genes_by_chrom[chrom][gene_id]['compressed_id']
gene_filename = \
os.path.abspath(os.path.join(chrom_dir,
"%s.pickle" \
%(gene_compressed_id)))
else:
gene_filename = \
os.path.abspath(os.path.join(chrom_dir,
"%s.pickle" %(gene_id)))
# Write each gene/event's pickle file
pickle_utils.write_pickled_file({gene_id:
genes_by_chrom[chrom][gene_id]},
gene_filename)
# Record what filename was associated with this gene ID
gene_id_to_filename[gene_id] = gene_filename
# Record compressed ID (hash) to gene ID
if gene_compressed_id is not None:
compressed_id_to_gene_id[gene_compressed_id] = gene_id
t2 = time.time()
print " - Chromosome serialization took %.2f seconds" %(t2 - t1)
# Shelve the mapping from gene ids to filenames
shelved_filename = os.path.join(output_dir,
"genes_to_filenames.shelve")
shelved_data = shelve.open(shelved_filename)
for k, v in gene_id_to_filename.iteritems():
shelved_data[k] = v
shelved_data.close()
# Shelve the mapping from compressed gene ids to gene ids
shelved_filename = os.path.join(output_dir,
"compressed_ids_to_genes.shelve")
shelved_data = shelve.open(shelved_filename)
for k, v in compressed_id_to_gene_id.iteritems():
shelved_data[k] = v
shelved_data.close()
# Output a list of genes in ordinary GFF format
genes_filename = os.path.join(output_dir, "genes.gff")
print "Outputting gene records in GFF format..."
print " - Output file: %s" %(genes_filename)
with open(gff_filename) as gff_in:
with open(genes_filename, "w") as gff_out:
for line in gff_in:
if line.startswith("#"): continue
record_type = line.strip().split("\t")[2]
if record_type == "gene":
gff_out.write(line)
def index_gff(gff_filename, output_dir,
compress_id=False):
"""
Index the given GFF and placed the indexed representation
in the output directory.
"""
print "Indexing GFF..."
if compress_id:
print " - Using compressed IDs to create indexed filenames."
# First check that the GFF is not already indexed
indexed_files = glob.glob(os.path.join(output_dir, "chr*"))
if len(indexed_files) >= 1:
print "%s appears to already be indexed. Aborting." \
%(gff_filename)
return
print " - GFF: %s" %(gff_filename)
print " - Outputting to: %s" %(output_dir)
overall_t1 = time.time()
t1 = time.time()
gff_genes = gene_utils.load_genes_from_gff(gff_filename)
t2 = time.time()
print " - Loading of genes from GFF took %.2f seconds" %(t2 - t1)
t1 = time.time()
serialize_genes(gff_genes,
gff_filename,
output_dir,
compress_id=compress_id)
t2 = time.time()
print " - Serialization of genes from GFF took %.2f seconds" %(t2 - t1)
overall_t2 = time.time()
print "Indexing of GFF took %.2f seconds." %(overall_t2 - overall_t1)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--index", dest="index_gff", nargs=2, default=None,
help="Index the given GFF. Takes as arguments as GFF filename "
"and an output directory.")
parser.add_option("--compress-id", dest="compress_id", default=False,
action="store_true",
help="Use the compressed version of the GFF \'ID=\' "
"field rather than the ID itself when creating "
".miso output filenames.")
(options, args) = parser.parse_args()
if options.index_gff != None:
gff_filename = \
os.path.abspath(os.path.expanduser(options.index_gff[0]))
output_dir = \
os.path.abspath(os.path.expanduser(options.index_gff[1]))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
index_gff(gff_filename, output_dir,
compress_id=options.compress_id)
else:
print "Indexer of GFF files for use with MISO."
print "Need to pass --index, for example:\n"
print "index_gff --index annotation.gff indexed_annotation/"
if __name__ == '__main__':
main()
| Xinglab/rmats2sashimiplot | src/MISO/misopy/index_gff.py | Python | gpl-2.0 | 7,529 |
# pylint: disable=global-statement
from PyQt4.QtCore import QT_VERSION_STR
import string
import sys
import traceback
from kang import VERSION
__mainWindow = None
__showedexmess = set()
__debug = False
def init(mainWindow, debug=False):
"""
Initialize the module
"""
global __mainWindow
global __debug
__mainWindow = mainWindow
__debug = debug
sys.excepthook = _excepthook
def _excepthook(excType, excValue, tracebackobj):
"""
Global function to catch unhandled exceptions.
@param excType exception type
@param excValue exception value
@param tracebackobj traceback object
"""
try:
tb = traceback.format_exception(excType, excValue, tracebackobj)
exmess = ''.join(tb)
sys.stderr.write(exmess)
if __mainWindow and not (exmess in __showedexmess):
__showedexmess.add(exmess)
msg = _formatMessage(exmess)
__mainWindow.signalException(msg)
except:
if __debug:
raise
def _formatMessage(exmess):
"""
Format the exception message
"""
msg = '==========================================================================\n'
msg += 'Kang Version:\t %s\n' % VERSION
msg += 'Python Version:\t %s\n' % unicode(string.replace(sys.version, '\n', ' - '))
msg += 'PyQt Version:\t %s\n' % unicode(QT_VERSION_STR)
msg += 'Operating System: %s\n' % unicode(sys.platform)
regex = __mainWindow.regexMultiLineEdit.toPlainText()
if regex:
msg += '=== REGEX ============================================================\n'
msg += unicode(regex)
if not msg.endswith('\n'):
msg += '\n'
rstr = __mainWindow.stringMultiLineEdit.toPlainText()
if rstr:
msg += '=== STRING ===========================================================\n'
msg += unicode(rstr)
if not msg.endswith('\n'):
msg += '\n'
replace = __mainWindow.replaceTextEdit.toPlainText()
if replace:
msg += '=== REPLACE ==========================================================\n'
msg += unicode(replace)
if not msg.endswith('\n'):
msg += '\n'
if exmess:
msg += '=== EXCEPTION ========================================================\n'
msg += unicode(exmess)
if not msg.endswith('\n'):
msg += '\n'
return msg
| geckoblu/kang | kang/modules/exceptionHandler.py | Python | gpl-2.0 | 2,411 |
__module_name__ = "Tinyurl service plugin"
__module_version__ = "1.0"
__module_description__ = "Turns a long URL into a much shorter one."
__author__ = "Mikica Ivosevic"
import hexchat
import urllib2
def get_short_url(url):
res = urllib2.urlopen('http://tinyurl.com/api-create.php?url=' + url)
return res.read()
def shorturl(word, word_eol, userdata):
command = word[0]
if len(word) < 2:
print("Usage eg: /shorturl http://www.google.com")
else:
if command == "shorturl":
print 'Short url: ' + get_short_url(word[1])
hexchat.hook_command("shorturl",shorturl)
hexchat.prnt(__module_name__ + ' version ' + __module_version__ + ' loadded.')
| mikicaivosevic/hexchat-shorturl-addon | short_url.py | Python | gpl-2.0 | 653 |
# vim: expandtab sw=4 ts=4 sts=4:
#
# Copyright © 2003 - 2018 Michal Čihař <[email protected]>
#
# This file is part of python-gammu <https://wammu.eu/python-gammu/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import gammu.data
class DataTest(unittest.TestCase):
def test_connections(self):
self.assertTrue("at" in gammu.data.Connections)
def test_errors(self):
self.assertTrue("ERR_INSTALL_NOT_FOUND" in gammu.data.Errors)
self.assertEqual(gammu.data.ErrorNumbers[73], "ERR_NETWORK_ERROR")
| gammu/python-gammu | test/test_data.py | Python | gpl-2.0 | 1,209 |
#!/usr/bin/python3
# Opening Facebook data files
with open('friends.htm', 'r') as file:
friend_file = [line.strip() for line in file][0]
with open('events.htm', 'r') as file:
event_file = [line.strip() for line in file][0]
with open('messages.htm', 'r') as file:
messages_file_line_list = [line.strip() for line in file]
with open('pokes.htm', 'r') as file:
poke_file = [line.strip() for line in file][0]
###########################################################################
"""Counts the number of messages"""
###########################################################################
# Counts the number of messages
messages_count = 0
for line in messages_file_line_list:
messages_count += line.count('UTC')
print("Number of messages:", messages_count)
###########################################################################
"""Counts the number of conversation threads"""
###########################################################################
# Counts the number of different conversation threads
thread_count = 0
for line in messages_file_line_list:
thread_count += line.count('<div class="thread">')
print("Number of conversations:", thread_count)
###########################################################################
"""Messages Over Time Analysis"""
###########################################################################
# For Creating Exhaustive Date List
import datetime
# Creates list of all times
times_list = []
for line in messages_file_line_list:
span_split_line = line.split("span")
for string in span_split_line:
if '"meta">' in string:
times_list.append(string.split('"meta">')[1].split('</')[0])
# Creates a list of dates
date_list = []
for time in times_list:
if time.split(" at ")[0] in date_list:
pass
else:
date_list.append(time.split(" at ")[0])
# Creates ISO date list
iso_date_list = []
months = {"january":"01", "february":"02", "march":"03", "april":"04",
"may":"05", "june":"06", "july":"07", "august":"08",
"september":"09", "october":"10", "november":"11", "december":"12"}
for date in date_list:
iso_date = []
# Finds Year
iso_date.append(date.split(" ")[3])
# Finds Month
month = date.split(" ")[2].lower()
iso_date.append(months[month])
# Finds Date
if int(date.split(" ")[1]) < 10:
iso_date.append("0"+date.split(" ")[1])
else:
iso_date.append(date.split(" ")[1])
# Adds to main list
if "".join(iso_date) in iso_date_list:
pass
else:
iso_date_list.append("".join(iso_date))
# Counts how many messages per day
date_count_list = [0 for item in date_list]
for time in times_list:
date_count_list[date_list.index(time.split(" at ")[0])] += 1
# Groups date which are the same together
unique_date_list = []
for date in iso_date_list:
if date in unique_date_list:
pass
else:
unique_date_list.append(date)
# Creating a message count list for all unique dates
unique_date_list = sorted(unique_date_list, key=int)
unique_date_count_list = [0 for item in unique_date_list]
for date in iso_date_list:
unique_date_count_list[unique_date_list.index(date)] += date_count_list[iso_date_list.index(date)]
# Creates a list of every date in usage window
all_dates = []
for date_item in unique_date_list:
date_int = int(str(datetime.datetime.strptime(date_item, '%Y%m%d').date()).replace("-",""))
if date_int < 20040204: # the facebook start date (see issue 2)
pass
else:
date1 = str(date_int)
break
date2 = unique_date_list[len(unique_date_list)-1]
start = datetime.datetime.strptime(date1, '%Y%m%d')
end = datetime.datetime.strptime(date2, '%Y%m%d')
step = datetime.timedelta(days = 1)
while start <= end:
all_dates.append(str(start.date()).replace("-",""))
start += step
# A count last which accounts for inactive dates
all_date_count_list = []
for date in all_dates:
if date in unique_date_list:
all_date_count_list.append(unique_date_count_list[unique_date_list.index(date)])
else:
all_date_count_list.append(0)
# Writes data to file
target = open('date_data.txt', 'w')
target.truncate()
for date in all_dates:
target.write("%s %s \n" % (date, all_date_count_list[all_dates.index(date)]))
target.close()
print("Created message date data")
# Checks all messages counted - I lose 4 messages
count_check = 0
for item in all_date_count_list:
count_check += int(item)
print("Actual messages:", count_check)
###########################################################################
"""Message at Time Analysis"""
###########################################################################
# Makes use of times_list and date_count_list created previously
# Creates list of all times
all_times = []
for hour in range(0,24):
for mins in range(0,60):
hourv, minv = "", ""
if hour < 10:
hourv = "0"+str(hour)
else:
hourv = hour
if mins < 10:
minv = "0"+str(mins)
else:
minv = mins
all_times.append(str(hourv)+str(minv))
# Creates a list of dates
time_list = []
for time in times_list:
time_list.append(time.split(" at ")[1].split(" ")[0].replace(":",""))
unique_time_list = []
for time in time_list:
if time in unique_time_list:
pass
else:
unique_time_list.append(time)
unique_time_list = sorted(unique_time_list, key=int)
unique_time_count_list = [0 for item in unique_time_list]
for time in time_list:
unique_time_count_list[unique_time_list.index(time)] += 1
# TEST
all_unique_time_list = []
all_unique_time_count_list = []
never_messaged_times =[]
# binary_message_list = [] for heatmapping
for time in all_times:
all_unique_time_list.append(time)
if time in unique_time_list:
all_unique_time_count_list.append(unique_time_count_list[unique_time_list.index(time)])
# binary_message_list.append(1) for heatmapping
else:
time_parts = list(str(time))
time_parts.insert(2,":")
time = "".join(time_parts)
never_messaged_times.append(time)
all_unique_time_count_list.append(0)
# binary_message_list.append(0) for heat mapping
print("Never sent a message at the following %d times: %s" % (len(never_messaged_times), ", ".join(never_messaged_times)))
# TEST
time_labs=[]
for time in all_unique_time_list:
if (list(str(time))[2]+list(str(time))[3] == "00") and (int(time)%3 == 0):
time_parts = list(str(time))
time_parts.insert(2, ":")
time_labs.append("".join(time_parts))
else:
time_labs.append("")
target = open('time_data.txt', 'w')
target.truncate()
for x in range(0, len(all_unique_time_list)-1):
time_parts = list(str(all_unique_time_list[x]))
time_parts.insert(2,":")
time = "".join(time_parts)
target.write("%s %s %s \n" % (time, all_unique_time_count_list[x], time_labs[x])) # binary_message_list[x]
target.close()
print("Created message time data")
###########################################################################
"""Poke Analysis"""
###########################################################################
# Extracting List of Pokes
poke_list = []
poke_file_data = poke_file.split("Pokes")[len(poke_file.split("Pokes"))-1].split("<li>")
for item in poke_file_data:
if "poked" in item:
poke_list.append(item.replace('<div class="meta">', ', ').replace(' UTC+01</div></li>',''))
else:
pass
# Cleaning up data
last_item = poke_list[len(poke_list)-1]
poke_list.pop(len(poke_list)-1)
poke_list.append(last_item.split('</ul></div><div class="footer">')[0])
temp_list = []
for item in poke_list:
temp_list.append(item.split('</div>')[0])
poke_list = temp_list
print("Number of pokes:", len(poke_list))
###########################################################################
"""Friends Analysis"""
###########################################################################
all_friend_data = friend_file.split('<h2>Friends</h2>')[len(friend_file.split('<h2>Friends</h2>'))-1]
# Current Friends
current_friend_data = all_friend_data.split('<h2>Sent Friend Requests</h2>')[0].split('<li>')
temp_list = []
for friend in current_friend_data:
temp_list.append(friend.replace("</li>","").replace("</ul>",""))
temp_list.pop(0)
current_friend_data = temp_list
print("Number of current friends:", len(current_friend_data))
# Sent Friend Requests
sent_friend_data = all_friend_data.split('<h2>Sent Friend Requests</h2>')[1].split('<h2>Received Friend Requests</h2>')[0].split('<li>')
temp_list = []
for friend in sent_friend_data:
temp_list.append(friend.replace("</li>","").replace("</ul>",""))
temp_list.pop(0)
sent_friend_data = temp_list
print("Number of sent friend requests:", len(sent_friend_data))
# Recieved Friend Requests
recieved_friend_data = all_friend_data.split('<h2>Received Friend Requests</h2>')[1].split('<h2>Removed Friends</h2>')[0].split('<li>')
temp_list = []
for friend in recieved_friend_data:
temp_list.append(friend.replace("</li>","").replace("</ul>",""))
temp_list.pop(0)
recieved_friend_data = temp_list
print("Number of recieved friend requests:", len(recieved_friend_data))
# Removed Friends
removed_friend_data = all_friend_data.split('<h2>Removed Friends</h2>')[1].split('<li>')
temp_list = []
for friend in removed_friend_data:
temp_list.append(friend.replace("</li>","").replace("</ul>",""))
temp_list.pop(0)
removed_friend_data = temp_list
last_item = removed_friend_data[len(removed_friend_data)-1].split('</div><div class="footer">')[0]
removed_friend_data.pop(len(removed_friend_data)-1)
removed_friend_data.append(last_item)
print("Number of removed friends:", len(removed_friend_data))
###########################################################################
"""Event Analysis"""
###########################################################################
event_data = event_file.split('<h2>Events</h2>')[len(event_file.split('<h2>Events</h2>'))-1].split('<li>')
event_data.pop(0)
temp_list = []
for event in event_data:
temp_list.append(event.split('<p class="meta">'))
event_data = temp_list
temp_list = []
for event in event_data:
temp_list.append([event[0]]+event[1].replace('</p></li>','').split('<br />'))
event_data = temp_list
last_item = [event_data[len(event_data)-1][0], event_data[len(event_data)-1][1], event_data[len(event_data)-1][2].split('</ul></div><div class="footer">')[0]]
event_data.pop(len(event_data)-1)
event_data.append(last_item)
print("Number of events:", len(event_data))
# See Issue 1
# day_list = ["Monday,","Tuesday,","Wednesday,","Thursday,","Friday,","Saturday,","Sunday,"]
# for event in temp_list:
# for word in event.split(" "):
# if word in day_list:
# if event.split(" ").index(word)-1 == 0:
# print(event.split(event.split(" ")[event.split(" ").index(word)]))
# break
# else:
# print(event.split(event.split(" ")[event.split(" ").index(word)-1]))
# break
target = open('event_data.txt', 'w')
target.truncate()
for event in event_data:
target.write("%s \n" % event[2].replace(" r","R"))
target.close()
| Foggalong/Facebook-Data-Analyser | scrape.py | Python | gpl-2.0 | 10,780 |
""" A model of an Infrastructure Cluster in CFME
:var page: A :py:class:`cfme.web_ui.Region` object describing common elements on the
Cluster pages.
"""
from functools import partial
from navmazing import NavigateToSibling, NavigateToAttribute
from cfme.fixtures import pytest_selenium as sel
from utils.appliance.implementations.ui import navigate_to, navigator, CFMENavigateStep
from utils.appliance import Navigatable
from cfme.web_ui import Quadicon, Region, listaccordion as list_acc, toolbar as tb, flash, \
paginator, match_location
from utils.pretty import Pretty
from utils.wait import wait_for
from utils.api import rest_api
details_page = Region(infoblock_type='detail')
cfg_btn = partial(tb.select, 'Configuration')
pol_btn = partial(tb.select, 'Policy')
match_page = partial(match_location, controller='ems_cluster',
title='Clusters')
# todo: since Cluster always requires provider, it will use only one way to get to Cluster Detail's
# page. But we need to fix this in the future.
class Cluster(Pretty, Navigatable):
""" Model of an infrastructure cluster in cfme
Args:
name: Name of the cluster.
provider: provider this cluster is attached to.
Note:
If given a provider_key, it will navigate through ``Infrastructure/Providers`` instead
of the direct path through ``Infrastructure/Clusters``.
"""
pretty_attrs = ['name', 'provider']
def __init__(self, name, provider, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self._short_name = self.name.split('in')[0].strip()
self.provider = provider
self.quad_name = 'cluster'
col = rest_api().collections
self._id = [cl.id for cl in col.clusters.all if cl.name == self._short_name
and cl.ems_id == self.provider.id][-1]
def delete(self, cancel=True):
"""
Deletes a cluster from CFME
Args:
cancel: Whether to cancel the deletion, defaults to True
"""
navigate_to(self, 'Details')
cfg_btn('Remove from the VMDB', invokes_alert=True)
sel.handle_alert(cancel=cancel)
def wait_for_delete(self):
wait_for(lambda: not self.exists, fail_condition=False,
message="Wait cluster to disappear", num_sec=500, fail_func=sel.refresh)
def wait_for_appear(self):
wait_for(lambda: self.exists, fail_condition=False,
message="Wait cluster to appear", num_sec=1000, fail_func=sel.refresh)
def get_detail(self, *ident):
""" Gets details from the details infoblock
The function first ensures that we are on the detail page for the specific cluster.
Args:
*ident: An InfoBlock title, followed by the Key name, e.g. "Relationships", "Images"
Returns: A string representing the contents of the InfoBlock's value.
"""
navigate_to(self, 'Details')
return details_page.infoblock.text(*ident)
@property
def exists(self):
try:
navigate_to(self, 'Details')
quad = Quadicon(self.name, self.quad_name)
if sel.is_displayed(quad):
return True
except sel.NoSuchElementException:
return False
@property
def id(self):
"""extracts cluster id for this cluster"""
return self._id
@property
def short_name(self):
"""returns only cluster's name exactly how it is stored in DB (without datacenter part)"""
return self._short_name
def run_smartstate_analysis(self):
navigate_to(self, 'Details')
tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True)
sel.handle_alert(cancel=False)
flash.assert_message_contain('Cluster / Deployment Role: scan successfully initiated')
@navigator.register(Cluster, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
from cfme.web_ui.menu import nav
nav._nav_to_fn('Compute', 'Infrastructure', 'Clusters')(None)
def resetter(self):
tb.select("Grid View")
sel.check(paginator.check_all())
sel.uncheck(paginator.check_all())
@navigator.register(Cluster, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.click(Quadicon(self.obj.name, self.obj.quad_name))
def am_i_here(self):
return match_page(summary="{} (Summary)".format(self.obj.name))
@navigator.register(Cluster, 'DetailsFromProvider')
class DetailsFromProvider(CFMENavigateStep):
def step(self):
navigate_to(self.obj.provider, 'Details')
list_acc.select('Relationships', 'Show all managed Clusters', by_title=True, partial=False)
sel.click(Quadicon(self.obj.name, self.obj.quad_name))
def am_i_here(self):
return match_page(summary="{} (Summary)".format(self.obj.name))
| kzvyahin/cfme_tests | cfme/infrastructure/cluster.py | Python | gpl-2.0 | 5,046 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by theF ree Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import absolute_import
import httpretty
from mock import patch
from invenio.testsuite import InvenioTestCase
class DataCiteTasksTest(InvenioTestCase):
def setUp(self):
self.app.config['CFG_DATACITE_DOI_PREFIX'] = "10.1234"
from invenio.modules.pidstore.models import PersistentIdentifier
self.pid = PersistentIdentifier.create("doi", "10.1234/invenio.1234")
def tearDown(self):
from invenio.modules.pidstore.models import PersistentIdentifier
PersistentIdentifier.query.filter_by(
pid_value=self.pid.pid_value
).delete()
def patch_get_record(self, get_record_patch):
from invenio.modules.records.api import Record
r = Record(
json={
self.app.config['PIDSTORE_DATACITE_RECORD_DOI_FIELD']:
'10.1234/invenio.1234',
'recid': 1,
},
master_format='marc'
)
get_record_patch.return_value = r
@patch('invenio.modules.records.api.get_record')
@httpretty.activate
def test_sync_registered(self, get_record_patch):
self.patch_get_record(get_record_patch)
httpretty.register_uri(
httpretty.GET,
"https://mds.datacite.org/doi/10.1234/invenio.1234",
body='http://invenio-software.org/record/1234',
status=200
)
from invenio.modules.pidstore.tasks import datacite_sync
from invenio.modules.pidstore.models import PersistentIdentifier
pid = PersistentIdentifier.get("doi", "10.1234/invenio.1234")
assert pid.status == self.app.config['PIDSTORE_STATUS_NEW']
datacite_sync(1)
pid = PersistentIdentifier.get("doi", "10.1234/invenio.1234")
assert pid.status == self.app.config['PIDSTORE_STATUS_REGISTERED']
| MSusik/invenio | invenio/modules/pidstore/testsuite/test_tasks_datacite.py | Python | gpl-2.0 | 2,632 |
from nestly import Nest
from nestly.scons import SConsWrap, name_targets
import os
import os.path
import numpy
import appconfig
config = appconfig.read('config.yaml')
# Base folder
nest = Nest()
wrap = SConsWrap(nest, os.path.join(config['cluster']['folder'],
config['cluster']['algorithms']['srmcl']['folder']))
env = Environment(ENV=os.environ)
# Used for resolving what type of execution environment will be used.
exec_env = appconfig.ExecutionEnvironment(ARGUMENTS, supported_env=['pbs', 'sge', 'local'])
# Variation
# don't include root as we don't want it embedded in this nest hierarchy
hic_paths = appconfig.get_precedents(config['map_folder'], config['hic2ctg'], prepend_root=False, tips_only=True)
wrap.add('hic_path', hic_paths)
#
# TODO, this needs to be placed in mapping
#
@wrap.add_target('make_graph')
@name_targets
def make_graph(outdir, c):
# add the root back in because we need to refer to the file
ref_path = os.path.join(config['map_folder'], c['hic_path'])
hic_bam = str(os.path.join(ref_path, config['hic2ctg']))
source = hic_bam
targets = appconfig.prepend_paths(outdir, ['edges.csv', 'nodes.csv'])
action = exec_env.resolve_action({
'pbs': 'bin/pbsrun_GRAPH.sh -s $SOURCE.abspath $TARGETS.abspath',
'sge': 'bin/sgerun_GRAPH.sh -s $SOURCE.abspath $TARGETS.abspath',
'local': 'bin/bamToEdges.py -s $SOURCE.abspath $TARGETS.abspath'
})
return 'edges', 'nodes', env.Command(targets, source, action)
@wrap.add_target('make_cluster_input')
@name_targets
def make_cluster_input(outdir, c):
sources = [str(c['make_graph']['edges']), str(c['make_graph']['nodes'])]
base_out = appconfig.prepend_paths(outdir, config['cluster']['input'])[0]
targets = [base_out, base_out + '.nodemap']
action = exec_env.resolve_action({
'pbs': 'bin/pbsrun_MKMETIS.sh {0[ctg_minlen]} $SOURCES.abspath $TARGETS.abspath'.format(config),
'sge': 'bin/sgerun_MKMETIS.sh {0[ctg_minlen]} $SOURCES.abspath $TARGETS.abspath'.format(config),
'local': 'bin/edgeToMetis.py --fmt metis -m {0[ctg_minlen]} $SOURCES.abspath $TARGETS.abspath'.format(config)
})
return 'output', 'nodemap', env.Command(targets, sources, action)
params = config['cluster']['algorithms']['srmcl']
wrap.add('inflation', numpy.linspace(params['infl']['min'], params['infl']['max'], params['infl']['steps']))
#wrap.add('balance', numpy.linspace(params['bal']['min'], params['bal']['max'], params['bal']['steps']))
# These are added for future sweep possibility. Defaults for now
wrap.add('balance', [0.5])
wrap.add('penalty', [1.25])
wrap.add('redundancy', [0.6])
wrap.add('quality', [0])
@wrap.add_target('do_cluster')
@name_targets
def do_cluster(outdir, c):
# TODO run over both weighted/unweighted?
source = c['make_cluster_input']['output']
target = appconfig.prepend_paths(outdir, config['cluster']['output'] + '.metis')
action = exec_env.resolve_action({
'pbs': 'bin/pbsrun_SRMCL.sh -b {0[balance]} -i {0[inflation]} $SOURCE.abspath $TARGET.abspath'.format(c),
'sge': 'bin/sgerun_SRMCL.sh -b {0[balance]} -i {0[inflation]} $SOURCE.abspath $TARGET.abspath'.format(c),
'local': 'bin/srmcl -b {0[balance]} -i {0[inflation]} -o $TARGET.abspath $SOURCE.abspath'.format(c)
})
return 'output', env.Command(target, source, action)
@wrap.add_target('do_convert')
@name_targets
def do_cluster(outdir, c):
# TODO run over both weighted/unweighted?
sources = [c['make_cluster_input']['nodemap'], c['do_cluster']['output']]
target = appconfig.prepend_paths(outdir, config['cluster']['output'])
action = exec_env.resolve_action({
'pbs': 'bin/metisClToMCL.py $SOURCES.abspath $TARGET.abspath'.format(c),
'sge': 'bin/metisClToMCL.py $SOURCES.abspath $TARGET.abspath'.format(c),
'local': 'bin/metisClToMCL.py $SOURCES.abspath $TARGET.abspath'.format(c)
})
return 'output', env.Command(target, sources, action)
@wrap.add_target('do_score')
def do_score(outdir, c):
cl_out = c['do_convert']['output']
ref_path = os.path.join(config['map_folder'], c['hic_path'])
ttable = appconfig.search_up(ref_path, config['truth_table'])
if ttable is None:
raise RuntimeError('Could not find an accompanying truth table for associated run {0}'.format(c['hic_path']))
# this target consumes truth table and clustering output
source = [ttable, cl_out]
# this target creates 3 output files
target = ['{0}.{1}'.format(cl_out, suffix) for suffix in ['f1', 'vm', 'bc']]
action = exec_env.resolve_action({
'pbs': 'bin/pbsrun_SCORE.sh $SOURCES.abspath',
'sge': 'bin/sgerun_SCORE.sh $SOURCES.abspath',
'local': 'bin/all_scores.sh $SOURCES.abspath'
})
return env.Command(target, source, action)
wrap.add_controls(Environment())
| koadman/proxigenomics | simulation/pipeline/SConstruct_srmcl.py | Python | gpl-2.0 | 4,892 |
import django.core.handlers.wsgi
import logging
from django.template import loader,Context,RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from ebaysdk import finding
from ebaysdk.exception import ConnectionError
from ebayapi.api import *
from ebayapi import api as ebayapi
from retail import Supplier,ShopInfo,getSupplierFromEbayInfo
import retailtype
from error import *
from lxml import etree
import urllib, random, json, datetime
import zuser
from StringIO import StringIO
def getEbayInfo(request):
token = getToken(request)
if ('ebayinfo' in request.session):
return request.session.get('ebayinfo',{})
else:
return {}
def getTokenFromEbayInfo(ebayinfo):
return ebayinfo['token']
def ebay_ajax_prefix(handler):
def rst_handler(request,*args,**kargs):
token = getToken(request)
if token:
ebayinfo = getEbayInfo(request)
return handler(request,ebayinfo,*args,**kargs)
else:
return returnError("Not authorised")
return rst_handler
def ebay_view_prefix(handler):
def rst_handler(request,*args,**kargs):
token = getToken(request)
if token:
ebayinfo = getEbayInfo(request)
return handler(request,*args,**kargs)
else:
context = Context({})
return (render_to_response("ebaylogin.html",context,context_instance=RequestContext(request)))
return rst_handler
def GetXSLT(xslt_context,xslt_template):
xslt_template = loader.get_template(xslt_template)
xslt_str = xslt_template.render(xslt_context)
xslt_doc = etree.parse(StringIO(xslt_str))
xslt = etree.XSLT(xslt_doc)
return xslt
def checkSuccess(doc):
ack = doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
return True;
else:
return False;
def getItem(itemid,token):
item = GetItem(itemid,token)
xml_doc = etree.parse(StringIO(item))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
title = xml_doc.xpath("//xs:Title",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
price = xml_doc.xpath("//xs:ConvertedCurrentPrice",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
return {'label':title,'value':price}
else:
return None
return None
# We will save the referrer so that we can route back
def auth(request):
# We first need to check whether this session is already linked to some ebay shop or not.
token = getToken(request)
if token:
return HttpResponseRedirect('/admin/')
else:
if ('HTTP_REFERER' in request.META):
request.session['continue'] = request.META['HTTP_REFERER']
sessionid = GetSessionID(request)
xml_doc = etree.parse(StringIO(sessionid))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
session = xml_doc.xpath("//xs:SessionID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
ebayinfo = request.session.get('ebayinfo',{})
ebayinfo['session'] = session.text
request.session['ebayinfo'] = ebayinfo
args = urllib.quote_plus("zre="+request.META['HTTP_HOST'])
token = GetToken(args,session.text)
return token
else:
return HttpResponse(ack.text)
def logoutebay(request):
request.session['ebayinfo'] = None
return HttpResponseRedirect('/admin/')
def authsuccess(request):
return HttpResponseRedirect('http://' + request.GET['zre'])
def authfail(request):
return HttpResponseRedirect('http://' + request.GET['zre'])
# This private function gets the ebay token if it exists in the current session. It will try fetch one if ebay is connected. It returns None if failed to get a token.
def getToken(request):
if (not 'ebayinfo' in request.session) or (not request.session['ebayinfo']):
request.session['ebayinfo'] = {}
ebayinfo = request.session.get('ebayinfo',{})
user = zuser.getCurrentUser(request)
# we are going to fetch the token if it does not exist yet
token = ""
if (('token' in ebayinfo) and (ebayinfo['token'])):
token = ebayinfo['token']
else:
if ('session' in ebayinfo):
token = FetchToken(request,ebayinfo['session'])
xml_doc = etree.parse(StringIO(token))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
token = xml_doc.xpath("//xs:eBayAuthToken",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
token = token.text
else:
msg = xml_doc.xpath("//xs:LongMessage",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
ebayerror = msg.text
ebayinfo['error'] = ebayerror
# should not update ebayinfo in request.session
# request.session['ebayinfo'] = ebayinfo
logging.info("Can not get token from ebay id:" + token)
if (not token): # can not get token from session
if user:
usr = user
if (usr and usr.ebaytoken):
token = usr.ebaytoken
# By the above computation we have tried to get the token
if (token):
ebayinfo['token'] = token
else:
logging.info("Can not get session for ebay auth")
return None
# so far we might need to update the token of the current user
if user:
usr = user
if (usr):
usr.ebaytoken = token
usr.put()
logging.info("ebayinfo:" + json.dumps(ebayinfo))
if ('token' in ebayinfo) and ebayinfo['token']:
request.session['ebayinfo'] = ebayinfo
# here we try to get as much info as possible from a ebay token
if((not 'id' in ebayinfo) or (not 'email' in ebayinfo)):
user = GetUserInfo(token)
user_doc = etree.parse(StringIO(user))
ack = user_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if ('Success' in ack.text):
email = user_doc.xpath("//xs:Email",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
ebayinfo['email'] = email.text
uid = user_doc.xpath("//xs:UserID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
ebayinfo['id'] = uid.text
else:
request.session['ebayinfo'] = {}
logging.info("Can not find email address in ebayinfo")
return None
if((not 'store' in ebayinfo) or (not 'logo' in ebayinfo) or (not 'category' in ebayinfo)):
store = GetStore(token)
store_doc = etree.parse(StringIO(store))
ack = store_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if ('Success' in ack.text):
name = store_doc.xpath("//xs:Store/xs:Name",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
ebayinfo['store'] = name.text
logo = store_doc.xpath("//xs:Store/xs:Logo/xs:URL",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})
if logo:
ebayinfo['logo'] = logo[0].text
else:
ebayinfo['logo'] = None
cgs = {}
categories = store_doc.xpath("//xs:Store/xs:CustomCategories/xs:CustomCategory",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})
a = etree.tostring(categories[0])
for category in categories:
name = category.xpath("./xs:Name",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
id = category.xpath("./xs:CategoryID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
cgs[id] = {'name':name,'children':{}}
childcategories = category.xpath("./xs:ChildCategory",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})
for child in childcategories:
name = child.xpath("./xs:Name",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
cid = child.xpath("./xs:CategoryID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
cgs[id]['children'][cid] = {'name':name}
ebayinfo['categories'] = cgs
else:
request.session['ebayinfo'] = {}
logging.info("Can not find shopinfo in ebayinfo:" + store)
return None
request.session['ebayinfo'] = ebayinfo
currentSite().setebayinfo(json.dumps(ebayinfo))
return ebayinfo['token']
else:
return None
####
# This function will append general infomation after item description
# It will replace everything after <!-- below is embed code --> tag
####
@ebay_view_prefix
def ebayorders(request):
tt = datetime.datetime.utcnow()
context = Context({"ORDER_GROUP":[tt]})
return (render_to_response("ebayorders.html",context,context_instance=RequestContext(request)))
@ebay_ajax_prefix
def ebayordersajax(request,ebayinfo):
token = getTokenFromEbayInfo(ebayinfo)
year = request.GET['year']
month = request.GET['month']
day = request.GET['day']
tt = datetime.datetime(year=int(year),month=int(month),day=int(day))
ft = tt - datetime.timedelta(hours=120)
tt = tt.strftime("%Y-%m-%dT%H:%M:%S.000Z")
ft = ft.strftime("%Y-%m-%dT%H:%M:%S.000Z")
xml_doc_str = GetOrders(token,ft,tt)
xml_doc = etree.parse(StringIO(xml_doc_str))
xslt = GetXSLT(Context({}),'xslt/EbayOrdersJSON.xslt')
xrst = xslt(xml_doc)
rst = unicode(xrst)
return HttpResponse(rst)
def relist(ebayinfo,item):
token = ebayinfo['token']
config = {'SELLER_ID':ebayinfo['id']}
config['INITIAL'] = item.description
config['ITEM'] = item
config['EXTRA'] = ShopInfo.all().filter("type =","ebay").order("name")
format = loader.get_template("ebay/format.html")
content = format.render(Context(config))
ebayitem = GetItem(item.ebayid,token)
xml_doc = etree.parse(StringIO(ebayitem))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
sellingstatus = xml_doc.xpath("//xs:SellingStatus/xs:ListingStatus",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
if (sellingstatus == "Completed"):
revise = RelistItemSimple(item,token,content)
xml_doc = etree.parse(StringIO(revise))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
ebayid = xml_doc.xpath("//xs:ItemID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
item.ebayid = refid
item.put()
return (HttpResponse(revise,mimetype = "text/xml"),item)
else:
return (returnError("Related ebay item is still active"),item)
else:
return (HttpResponse(ebayitem,mimetype = "text/xml"),item)
####
# This function will append general infomation after item description
# It will replace everything after <!-- below is embed code --> tag
####
def format(ebayinfo,itemid):
token = ebayinfo['token']
id = ebayinfo['id']
config = {'SELLER_ID':id}
item = GetItem(itemid,token)
xml_doc = etree.parse(StringIO(item))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
description = xml_doc.xpath("//xs:Description",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
refid = xml_doc.xpath("//xs:SKU",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})
if (not refid):
return returnError('SKU Not Provided')
else:
refid = refid[0].text
# refid = xml_doc.xpath("//xs:ItemID",
# namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
name = xml_doc.xpath("//xs:Title",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
price = xml_doc.xpath("//xs:ConvertedCurrentPrice",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
galleryurl = xml_doc.xpath("//xs:GalleryURL",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
infourl = xml_doc.xpath("//xs:ViewItemURL",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
ebaycategory = xml_doc.xpath("//xs:PrimaryCategory/xs:CategoryID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
category = xml_doc.xpath("//xs:StoreCategoryID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
sndcategory = xml_doc.xpath("//xs:StoreCategory2ID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
sellingstatus = xml_doc.xpath("//xs:SellingStatus/xs:ListingStatus",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
topd = description.text.split("<!-- below is embeded code -->")
config['INITIAL'] = topd[0]
config['EXTRA'] = ShopInfo.all().filter("type =","ebay").order("name")
# save the item
iteminfo = {'refid':refid,'name':name
,'price':float(price),'cost':float(price),'galleryurl':galleryurl
,'infourl':infourl,'category':category,'sndcategory':sndcategory
,'description':topd[0],'ebayid':itemid,'ebaycategory':ebaycategory
,'specification':"{}"}
item = retailtype.getItem(refid)
supplier = getSupplierFromEbayInfo(ebayinfo)
if item:
iteminfo['specification'] = item.specification
# FIXME: We do not update galleryurl back to ebay gallery url at the moment.
# iteminfo['galleryurl'] = item.galleryurl
item.ebayid = itemid
supplier = item.parent()
zitem = supplier.saveItem(iteminfo)
config['ITEM'] = zitem
format = loader.get_template("ebay/format.html")
content = format.render(Context(config))
if (sellingstatus != "Completed"):
revise = ReviseItemSimple(item,token,content)
xml_doc = etree.parse(StringIO(revise))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
return (HttpResponse(revise,mimetype = "text/xml"),item)
else:
return (HttpResponse(revise,mimetype = "text/xml"),None)
else:
revise = RelistItemSimple(item,token,content)
xml_doc = etree.parse(StringIO(revise))
ack = xml_doc.xpath("//xs:Ack",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
if('Success' in ack.text):
ebayid = xml_doc.xpath("//xs:ItemID",
namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
zitem.ebayid = refid
zitem.put()
return (HttpResponse(revise,mimetype = "text/xml"),item)
else:
return (HttpResponse(revise,mimetype = "text/xml"),None)
else:
return (HttpResponse(item,mimetype = "text/xml"),None)
####
# This function will append general infomation after item description
# It will replace everything after <!-- below is embed code --> tag
####
def sync(ebayinfo,item):
token = ebayinfo['token']
id = ebayinfo['id']
config = {'SELLER_ID':id}
description = item.description
name = item.name
config['INITIAL'] = description
config['ITEM'] = item
config['EXTRA'] = ShopInfo.all().filter("type =","ebay").order("name")
format = loader.get_template("ebay/format.html")
content = format.render(Context(config))
if (not item.ebayid):
revise = ReviseItemBySKU(item.refid,name,token,content)
else:
revise = ReviseItem(item,token,content)
return HttpResponse(revise,mimetype = "text/xml")
def getactivelist(request):
token = getToken(request)
page = 1
if ("page" in request.GET):
page = int(request.GET['page'])
xml_doc = None
if token:
if 'itemid' in request.GET:
rid = request.GET['itemid']
iteminfo = GetItem(rid,token)
xml_doc = etree.parse(StringIO(iteminfo))
xslt = GetXSLT(Context({}),'xslt/MyeBaySelling.xslt')
list_content = etree.tostring(xslt(xml_doc.getroot()))
else:
my_ebay_selling = GetMyeBaySelling(token,page)
xml_doc = etree.parse(StringIO(my_ebay_selling))
total = xml_doc.xpath("//xs:TotalNumberOfPages",namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0]
total = int(total.text);
xslt = GetXSLT(Context({'pages':range(total+1)[1:]}),'xslt/MyeBaySelling2.xslt')
list_content = etree.tostring(xslt(xml_doc.getroot()))
return list_content
else:
return None
def getinactivelist(request):
token = getToken(request)
page = 1
if ("page" in request.GET):
page = int(request.GET['page'])
xml_doc = None
if token:
if 'itemid' in request.GET:
rid = request.GET['itemid']
iteminfo = GetItem(rid,token)
xml_doc = etree.parse(StringIO(iteminfo))
xslt = GetXSLT(Context({}),'xslt/MyeBaySelling.xslt')
list_content = etree.tostring(xslt(xml_doc.getroot()))
else:
my_ebay_selling = GetMyeBaySellingInactive(token,page)
xml_doc = etree.parse(StringIO(my_ebay_selling))
total = xml_doc.xpath("//xs:TotalNumberOfPages",namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})
list_content = "" #none item if there is no TotalNumberOfPages provided
if(total):
total = int(total[0].text);
xslt = GetXSLT(Context({'pages':range(total+1)[1:]}),'xslt/MyeBaySelling2.xslt')
list_content = etree.tostring(xslt(xml_doc.getroot()))
return list_content
else:
return None
@ebay_view_prefix
def fetchcategory(request):
query = request.GET['term']
token = getToken(request)
rslt = GetCategories(request,token,query)
xml_doc = etree.parse(StringIO(rslt))
suggests = xml_doc.xpath("//xs:SuggestedCategory",namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})
items = []
for suggest in suggests:
id = suggest.xpath("./xs:Category/xs:CategoryID",namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
label = suggest.xpath("./xs:Category/xs:CategoryName",namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})[0].text
label = [label]
parents = suggest.xpath("./xs:Category/xs:CategoryParentName",namespaces={'xs':"urn:ebay:apis:eBLBaseComponents"})
for parent in parents:
label.append(parent.text)
label = "->".join(label)
items.append({'label':label,'value':id})
return HttpResponse(json.dumps(items),mimetype="text/plain")
| zoyoe/ectool | zoyoeec/core/ebay.py | Python | gpl-2.0 | 18,098 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('Departamento', '0001_initial'),
('Persona', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tipo_status',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tipo_status', models.CharField(max_length=50)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Trabajador',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('codigo_trabajador', models.CharField(max_length=50, null=True)),
('fecha_ingreso', models.DateField()),
('fecha_retiro', models.DateField(blank=True)),
('cargo', models.ForeignKey(to='Departamento.Cargo')),
('persona', models.ForeignKey(to='Persona.Persona')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Trabajador_status',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fecha_inicio_status', models.DateField(auto_now_add=True)),
('tipo_status', models.ForeignKey(to='Trabajador.Tipo_status')),
('trabajador', models.ForeignKey(to='Trabajador.Trabajador')),
],
options={
},
bases=(models.Model,),
),
]
| jrmendozat/mtvm | Trabajador/migrations/0001_initial.py | Python | gpl-2.0 | 1,826 |
#!/usr/bin/env python
# vim: expandtab sw=4 ts=4 sts=4:
#
# Copyright © 2003 - 2018 Michal Čihař <[email protected]>
#
# This file is part of python-gammu <https://wammu.eu/python-gammu/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
import gammu
def main():
# Global debug level
gammu.SetDebugFile(sys.stderr)
gammu.SetDebugLevel("textall")
state_machine = gammu.StateMachine()
state_machine.ReadConfig()
# Use global debug stub regardless configuration
c = state_machine.GetConfig(0)
c["UseGlobalDebugFile"] = True
state_machine.SetConfig(0, c)
state_machine.Init()
manufacturer = state_machine.GetManufacturer()
model = state_machine.GetModel()
imei = state_machine.GetIMEI()
firmware = state_machine.GetFirmware()
print("Phone infomation:")
print("{:<15}: {}".format("Manufacturer", manufacturer))
print("{:<15}: {} ({})".format("Model", model[0], model[1]))
print("{:<15}: {}".format("IMEI", imei))
print("{:<15}: {}".format("Firmware", firmware[0]))
if __name__ == "__main__":
main()
| gammu/python-gammu | examples/debugging.py | Python | gpl-2.0 | 1,756 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_resized.forms
import core.core
import content.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Commit',
fields=[
('commitid', models.CharField(primary_key=True, serialize=False, default=core.core._createId, max_length=16)),
('body', models.TextField(default='', max_length=500)),
('body_html', models.TextField(null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('show', models.BooleanField(default=True)),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('postid', models.CharField(primary_key=True, serialize=False, default=core.core._createId, max_length=16)),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100)),
('body', models.TextField(max_length=3000)),
('body_html', models.TextField(null=True, blank=True)),
('image', django_resized.forms.ResizedImageField(null=True, upload_to=content.models.Post.get_image, blank=True)),
('draft', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('last_commited', models.DateTimeField(null=True)),
('commit_number', models.IntegerField(default=0)),
('show', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Sub',
fields=[
('slug', models.SlugField(primary_key=True, serialize=False, max_length=10)),
('image', models.ImageField(upload_to=content.models.Sub.get_image)),
('created', models.DateTimeField(auto_now_add=True)),
('last_commited', models.DateTimeField(auto_now_add=True)),
('follower_number', models.IntegerField(default=0)),
],
options={
'ordering': ['-last_commited'],
},
),
migrations.CreateModel(
name='SubFollow',
fields=[
('sub_followid', models.CharField(primary_key=True, serialize=False, blank=True, max_length=33)),
],
),
]
| ellipticaldoor/dfiid | project/content/migrations/0001_initial.py | Python | gpl-2.0 | 2,604 |
#XML-RPC Introspection Client -Chapter 8- xmlrpci.py
#这方法都要自己提供,有兴趣的可以看文档
import xmlrpc.client, sys
url = 'http://localhost:51423'
s = xmlrpc.client.ServerProxy(url)
print('Gathering avilable methods...')
methods = s.listMethods()
while True:
print('\n\nAvailable Method:')
for i in range(len(methods)):
print('%2d: %s' % (i + 1, methods[i]))
selection = input('Select one(q to quit):')
if selection == 'q':
break
item = int(selection) - 1
print("Details for %s\n" % methods[item])
for sig in s.methodSignature(methods[item]):
print("Args: %s; Return: %s" % (", ".join(sig[1:]), sig[0]))
print("Help:", s.methodHelp(methods[item]))
| jiangfire/python-network-py3code | chapter8/xmlrpci.py | Python | gpl-2.0 | 743 |
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.db.models import get_model
from ..models import DispatchContainerRegister
from ..classes import ReturnController
@login_required
def return_items(request, **kwargs):
""" Return items from the producer to the source."""
msg = None
items = request.GET.get('items').split(',')
user_container_list = []
dispatch_container_register = DispatchContainerRegister.objects.get(id=items[0])
previous = dispatch_container_register
current = None
defaults = {}
container_model = get_model(dispatch_container_register.container_app_label, dispatch_container_register.container_model_name)
if not container_model:
raise TypeError('Dispatch Container model \'{0}\' does not exist. Got from DispatchContainerRegister of id \'{1}\'.'.format(dispatch_container_register.container_app_label+','+dispatch_container_register.container_model_name, dispatch_container_register.id))
for item in items:
current = DispatchContainerRegister.objects.get(id=item)
if current.producer != previous.producer:
raise TypeError('All items to be returned must be in the same producer. Got \'{0}\' and \'{1}\'.'.format(current, previous))
defaults[current.container_identifier_attrname] = current.container_identifier
user_container_list.append(container_model.objects.get(**defaults))
previous = current
defaults = {}
producer = current.producer
msg = ReturnController('default', producer.name).return_selected_items(user_container_list)
messages.add_message(request, messages.INFO, msg)
return render_to_response(
'return_items.html', {'producer': producer, },
context_instance=RequestContext(request)
)
| botswana-harvard/edc-dispatch | edc_dispatch/views/return_items.py | Python | gpl-2.0 | 1,951 |
################################################################################
# Copyright (C) 2015 Surfacingx #
# #
# This Program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2, or (at your option) #
# any later version. #
# #
# This Program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with XBMC; see the file COPYING. If not, write to #
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #
# http://www.gnu.org/copyleft/gpl.html #
################################################################################
import xbmc, xbmcaddon, xbmcgui, xbmcplugin, os, sys, xbmcvfs, glob
import shutil
import urllib2,urllib
import re
import uservar
from datetime import date, datetime, timedelta
from resources.libs import extract, downloader, notify, loginit, debridit, traktit, skinSwitch, uploadLog, wizard as wiz
ADDON_ID = uservar.ADDON_ID
ADDONTITLE = uservar.ADDONTITLE
ADDON = wiz.addonId(ADDON_ID)
VERSION = wiz.addonInfo(ADDON_ID,'version')
ADDONPATH = wiz.addonInfo(ADDON_ID,'path')
ADDONID = wiz.addonInfo(ADDON_ID,'id')
DIALOG = xbmcgui.Dialog()
DP = xbmcgui.DialogProgress()
HOME = xbmc.translatePath('special://home/')
PROFILE = xbmc.translatePath('special://profile/')
KODIHOME = xbmc.translatePath('special://xbmc/')
ADDONS = os.path.join(HOME, 'addons')
KODIADDONS = os.path.join(KODIHOME, 'addons')
USERDATA = os.path.join(HOME, 'userdata')
PLUGIN = os.path.join(ADDONS, ADDON_ID)
PACKAGES = os.path.join(ADDONS, 'packages')
ADDONDATA = os.path.join(USERDATA, 'addon_data', ADDON_ID)
FANART = os.path.join(ADDONPATH,'fanart.jpg')
ICON = os.path.join(ADDONPATH,'icon.png')
ART = os.path.join(ADDONPATH,'resources', 'art')
SKIN = xbmc.getSkinDir()
BUILDNAME = wiz.getS('buildname')
DEFAULTSKIN = wiz.getS('defaultskin')
DEFAULTNAME = wiz.getS('defaultskinname')
DEFAULTIGNORE = wiz.getS('defaultskinignore')
BUILDVERSION = wiz.getS('buildversion')
BUILDLATEST = wiz.getS('latestversion')
BUILDCHECK = wiz.getS('lastbuildcheck')
DISABLEUPDATE = wiz.getS('disableupdate')
AUTOCLEANUP = wiz.getS('autoclean')
AUTOCACHE = wiz.getS('clearcache')
AUTOPACKAGES = wiz.getS('clearpackages')
AUTOTHUMBS = wiz.getS('clearthumbs')
AUTOFEQ = wiz.getS('autocleanfeq')
AUTONEXTRUN = wiz.getS('nextautocleanup')
TRAKTSAVE = wiz.getS('traktlastsave')
REALSAVE = wiz.getS('debridlastsave')
LOGINSAVE = wiz.getS('loginlastsave')
KEEPTRAKT = wiz.getS('keeptrakt')
KEEPREAL = wiz.getS('keepdebrid')
KEEPLOGIN = wiz.getS('keeplogin')
INSTALLED = wiz.getS('installed')
EXTRACT = wiz.getS('extract')
EXTERROR = wiz.getS('errors')
NOTIFY = wiz.getS('notify')
NOTEDISMISS = wiz.getS('notedismiss')
NOTEID = wiz.getS('noteid')
BACKUPLOCATION = ADDON.getSetting('path') if not ADDON.getSetting('path') == '' else HOME
MYBUILDS = os.path.join(BACKUPLOCATION, 'The_One_Builds', '')
NOTEID = 0 if NOTEID == "" else int(NOTEID)
AUTOFEQ = int(AUTOFEQ) if AUTOFEQ.isdigit() else 0
TODAY = date.today()
TOMORROW = TODAY + timedelta(days=1)
TWODAYS = TODAY + timedelta(days=2)
THREEDAYS = TODAY + timedelta(days=3)
ONEWEEK = TODAY + timedelta(days=7)
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
EXCLUDES = uservar.EXCLUDES
BUILDFILE = uservar.BUILDFILE
UPDATECHECK = uservar.UPDATECHECK if str(uservar.UPDATECHECK).isdigit() else 1
NEXTCHECK = TODAY + timedelta(days=UPDATECHECK)
NOTIFICATION = uservar.NOTIFICATION
ENABLE = uservar.ENABLE
HEADERMESSAGE = uservar.HEADERMESSAGE
AUTOUPDATE = uservar.AUTOUPDATE
WIZARDFILE = uservar.WIZARDFILE
AUTOINSTALL = uservar.AUTOINSTALL
REPOID = uservar.REPOID
REPOADDONXML = uservar.REPOADDONXML
REPOZIPURL = uservar.REPOZIPURL
COLOR1 = uservar.COLOR1
COLOR2 = uservar.COLOR2
WORKING = True if wiz.workingURL(BUILDFILE) == True else False
FAILED = False
###########################
#### Check Updates ######
###########################
def checkUpdate():
BUILDNAME = wiz.getS('buildname')
BUILDVERSION = wiz.getS('buildversion')
link = wiz.openURL(BUILDFILE).replace('\n','').replace('\r','').replace('\t','')
match = re.compile('name="%s".+?ersion="(.+?)".+?con="(.+?)".+?anart="(.+?)"' % BUILDNAME).findall(link)
if len(match) > 0:
version = match[0][0]
icon = match[0][1]
fanart = match[0][2]
wiz.setS('latestversion', version)
if version > BUILDVERSION:
if DISABLEUPDATE == 'false':
wiz.log("[Check Updates] [Installed Version: %s] [Current Version: %s] Opening Update Window" % (BUILDVERSION, version), xbmc.LOGNOTICE)
notify.updateWindow(BUILDNAME, BUILDVERSION, version, icon, fanart)
else: wiz.log("[Check Updates] [Installed Version: %s] [Current Version: %s] Update Window Disabled" % (BUILDVERSION, version), xbmc.LOGNOTICE)
else: wiz.log("[Check Updates] [Installed Version: %s] [Current Version: %s]" % (BUILDVERSION, version), xbmc.LOGNOTICE)
else: wiz.log("[Check Updates] ERROR: Unable to find build version in build text file", xbmc.LOGERROR)
def checkSkin():
wiz.log("[Build Check] Invalid Skin Check Start")
DEFAULTSKIN = wiz.getS('defaultskin')
DEFAULTNAME = wiz.getS('defaultskinname')
DEFAULTIGNORE = wiz.getS('defaultskinignore')
gotoskin = False
if not DEFAULTSKIN == '':
if os.path.exists(os.path.join(ADDONS, DEFAULTSKIN)):
if DIALOG.yesno(ADDONTITLE, "[COLOR %s]It seems that the skin has been set back to [COLOR %s]%s[/COLOR]" % (COLOR2, COLOR1, SKIN[5:].title()), "Would you like to set the skin back to:[/COLOR]", '[COLOR %s]%s[/COLOR]' % (COLOR1, DEFAULTNAME)):
gotoskin = DEFAULTSKIN
gotoname = DEFAULTNAME
else: wiz.log("Skin was not reset", xbmc.LOGNOTICE); wiz.setS('defaultskinignore', 'true'); gotoskin = False
else: wiz.setS('defaultskin', ''); wiz.setS('defaultskinname', ''); DEFAULTSKIN = ''; DEFAULTNAME = ''
if DEFAULTSKIN == '':
skinname = []
skinlist = []
for folder in glob.glob(os.path.join(ADDONS, 'skin.*/')):
xml = "%s/addon.xml" % folder
if os.path.exists(xml):
f = open(xml,mode='r'); g = f.read().replace('\n','').replace('\r','').replace('\t',''); f.close();
match = wiz.parseDOM(g, 'addon', ret='id')
match2 = wiz.parseDOM(g, 'addon', ret='name')
wiz.log("%s: %s" % (folder, str(match[0])), xbmc.LOGNOTICE)
if len(match) > 0: skinlist.append(str(match[0])); skinname.append(str(match2[0]))
else: wiz.log("ID not found for %s" % folder, xbmc.LOGNOTICE)
else: wiz.log("ID not found for %s" % folder, xbmc.LOGNOTICE)
if len(skinlist) > 0:
if len(skinlist) > 1:
if DIALOG.yesno(ADDONTITLE, "[COLOR %s]It seems that the skin has been set back to [COLOR %s]%s[/COLOR]" % (COLOR2, COLOR1, SKIN[5:].title()), "Would you like to view a list of avaliable skins?[/COLOR]"):
choice = DIALOG.select("Select skin to switch to!", skinname)
if choice == -1: wiz.log("Skin was not reset", xbmc.LOGNOTICE); wiz.setS('defaultskinignore', 'true')
else:
gotoskin = skinlist[choice]
gotoname = skinname[choice]
else: wiz.log("Skin was not reset", xbmc.LOGNOTICE); wiz.setS('defaultskinignore', 'true')
else:
if DIALOG.yesno(ADDONTITLE, "[COLOR %s]It seems that the skin has been set back to [COLOR %s]%s[/COLOR]" % (COLOR2, COLOR1, SKIN[5:].title()), "Would you like to set the skin back to:[/COLOR]", '[COLOR %s]%s[/COLOR]' % (COLOR1, skinname[0])):
gotoskin = skinlist[0]
gotoname = skinname[0]
else: wiz.log("Skin was not reset", xbmc.LOGNOTICE); wiz.setS('defaultskinignore', 'true')
else: wiz.log("No skins found in addons folder.", xbmc.LOGNOTICE); wiz.setS('defaultskinignore', 'true'); gotoskin = False
if gotoskin:
skinSwitch.swapSkins(gotoskin)
x = 0
xbmc.sleep(1000)
while not xbmc.getCondVisibility("Window.isVisible(yesnodialog)") and x < 150:
x += 1
xbmc.sleep(200)
if xbmc.getCondVisibility("Window.isVisible(yesnodialog)"):
wiz.ebi('SendClick(11)')
wiz.lookandFeelData('restore')
else: wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, ADDONTITLE),'[COLOR %s]Skin Swap Timed Out![/COLOR]' % COLOR2)
wiz.log("[Build Check] Invalid Skin Check End", xbmc.LOGNOTICE)
while xbmc.Player().isPlayingVideo():
xbmc.sleep(1000)
if KODIV >= 17:
NOW = datetime.now()
temp = wiz.getS('kodi17iscrap')
if not temp == '':
if temp > str(NOW - timedelta(minutes=2)):
wiz.log("Killing Start Up Script")
sys.exit()
wiz.log("%s" % (NOW))
wiz.setS('kodi17iscrap', str(NOW))
xbmc.sleep(1000)
if not wiz.getS('kodi17iscrap') == str(NOW):
wiz.log("Killing Start Up Script")
sys.exit()
else:
wiz.log("Continuing Start Up Script")
wiz.log("[Path Check] Started", xbmc.LOGNOTICE)
path = os.path.split(ADDONPATH)
if not ADDONID == path[1]: DIALOG.ok(ADDONTITLE, '[COLOR %s]Please make sure that the plugin folder is the same as the ADDON_ID.[/COLOR]' % COLOR2, '[COLOR %s]Plugin ID:[/COLOR] [COLOR %s]%s[/COLOR]' % (COLOR2, COLOR1, ADDONID), '[COLOR %s]Plugin Folder:[/COLOR] [COLOR %s]%s[/COLOR]' % (COLOR2, COLOR1, path)); wiz.log("[Path Check] ADDON_ID and plugin folder doesnt match. %s / %s " % (ADDONID, path))
else: wiz.log("[Path Check] Good!", xbmc.LOGNOTICE)
if KODIADDONS in ADDONPATH:
wiz.log("Copying path to addons dir", xbmc.LOGNOTICE)
if not os.path.exists(ADDONS): os.makedirs(ADDONS)
newpath = xbmc.translatePath(os.path.join('special://home/addons/', ADDONID))
if os.path.exists(newpath):
wiz.log("Folder already exists, cleaning House", xbmc.LOGNOTICE)
wiz.cleanHouse(newpath)
wiz.removeFolder(newpath)
try:
wiz.copytree(ADDONPATH, newpath)
except Exception, e:
pass
wiz.forceUpdate(True)
try:
mybuilds = xbmc.translatePath(MYBUILDS)
if not os.path.exists(mybuilds): xbmcvfs.mkdirs(mybuilds)
except:
pass
wiz.log("[Auto Install Repo] Started", xbmc.LOGNOTICE)
if AUTOINSTALL == 'Yes' and not os.path.exists(os.path.join(ADDONS, REPOID)):
workingxml = wiz.workingURL(REPOADDONXML)
if workingxml == True:
ver = wiz.parseDOM(wiz.openURL(REPOADDONXML), 'addon', ret='version', attrs = {'id': REPOID})
if len(ver) > 0:
installzip = '%s-%s.zip' % (REPOID, ver[0])
workingrepo = wiz.workingURL(REPOZIPURL+installzip)
if workingrepo == True:
DP.create(ADDONTITLE,'Downloading Repo...','', 'Please Wait')
if not os.path.exists(PACKAGES): os.makedirs(PACKAGES)
lib=os.path.join(PACKAGES, installzip)
try: os.remove(lib)
except: pass
downloader.download(REPOZIPURL+installzip,lib, DP)
extract.all(lib, ADDONS, DP)
try:
f = open(os.path.join(ADDONS, REPOID, 'addon.xml'), mode='r'); g = f.read(); f.close()
name = wiz.parseDOM(g, 'addon', ret='name', attrs = {'id': REPOID})
wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, name[0]), "[COLOR %s]Add-on updated[/COLOR]" % COLOR2, icon=os.path.join(ADDONS, REPOID, 'icon.png'))
except:
pass
if KODIV >= 17: wiz.addonDatabase(REPOID, 1)
DP.close()
xbmc.sleep(500)
wiz.forceUpdate(True)
wiz.log("[Auto Install Repo] Successfully Installed", xbmc.LOGNOTICE)
else:
wiz.LogNotify("[COLOR %s]Repo Install Error[/COLOR]" % COLOR1, "[COLOR %s]Invalid url for zip![/COLOR]" % COLOR2)
wiz.log("[Auto Install Repo] Was unable to create a working url for repository. %s" % workingrepo, xbmc.LOGERROR)
else:
wiz.log("Invalid URL for Repo Zip", xbmc.LOGERROR)
else:
wiz.LogNotify("[COLOR %s]Repo Install Error[/COLOR]" % COLOR1, "[COLOR %s]Invalid addon.xml file![/COLOR]" % COLOR2)
wiz.log("[Auto Install Repo] Unable to read the addon.xml file.", xbmc.LOGERROR)
elif not AUTOINSTALL == 'Yes': wiz.log("[Auto Install Repo] Not Enabled", xbmc.LOGNOTICE)
elif os.path.exists(os.path.join(ADDONS, REPOID)): wiz.log("[Auto Install Repo] Repository already installed")
wiz.log("[Auto Update Wizard] Started", xbmc.LOGNOTICE)
if AUTOUPDATE == 'Yes':
wiz.wizardUpdate('startup')
else: wiz.log("[Auto Update Wizard] Not Enabled", xbmc.LOGNOTICE)
wiz.log("[Notifications] Started", xbmc.LOGNOTICE)
if ENABLE == 'Yes':
if not NOTIFY == 'true':
url = wiz.workingURL(NOTIFICATION)
if url == True:
id, msg = wiz.splitNotify(NOTIFICATION)
if not id == False:
try:
id = int(id); NOTEID = int(NOTEID)
if id == NOTEID:
if NOTEDISMISS == 'false':
notify.notification(msg)
else: wiz.log("[Notifications] id[%s] Dismissed" % int(id), xbmc.LOGNOTICE)
elif id > NOTEID:
wiz.log("[Notifications] id: %s" % str(id), xbmc.LOGNOTICE)
wiz.setS('noteid', str(id))
wiz.setS('notedismiss', 'false')
notify.notification(msg=msg)
wiz.log("[Notifications] Complete", xbmc.LOGNOTICE)
except Exception, e:
wiz.log("Error on Notifications Window: %s" % str(e), xbmc.LOGERROR)
else: wiz.log("[Notifications] Text File not formated Correctly")
else: wiz.log("[Notifications] URL(%s): %s" % (NOTIFICATION, url), xbmc.LOGNOTICE)
else: wiz.log("[Notifications] Turned Off", xbmc.LOGNOTICE)
else: wiz.log("[Notifications] Not Enabled", xbmc.LOGNOTICE)
wiz.log("[Installed Check] Started", xbmc.LOGNOTICE)
if INSTALLED == 'true':
if KODIV >= 17:
wiz.kodi17Fix()
if SKIN in ['skin.confluence', 'skin.estuary']:
checkSkin()
FAILED = True
elif not EXTRACT == '100' and not BUILDNAME == "":
wiz.log("[Installed Check] Build was extracted %s/100 with [ERRORS: %s]" % (EXTRACT, EXTERROR), xbmc.LOGNOTICE)
yes=DIALOG.yesno(ADDONTITLE, '[COLOR %s]%s[/COLOR] [COLOR %s]was not installed correctly!' % (COLOR1, COLOR2, BUILDNAME), 'Installed: [COLOR %s]%s[/COLOR] / Error Count: [COLOR %s]%s[/COLOR]' % (COLOR1, EXTRACT, COLOR1, EXTERROR), 'Would you like to try again?[/COLOR]', nolabel='[B]No Thanks![/B]', yeslabel='[B]Retry Install[/B]')
wiz.clearS('build')
FAILED = True
if yes:
wiz.ebi("PlayMedia(plugin://%s/?mode=install&name=%s&url=fresh)" % (ADDON_ID, urllib.quote_plus(BUILDNAME)))
wiz.log("[Installed Check] Fresh Install Re-activated", xbmc.LOGNOTICE)
else: wiz.log("[Installed Check] Reinstall Ignored")
elif SKIN in ['skin.confluence', 'skin.estuary']:
wiz.log("[Installed Check] Incorrect skin: %s" % SKIN, xbmc.LOGNOTICE)
defaults = wiz.getS('defaultskin')
if not defaults == '':
if os.path.exists(os.path.join(ADDONS, defaults)):
skinSwitch.swapSkins(defaults)
x = 0
xbmc.sleep(1000)
while not xbmc.getCondVisibility("Window.isVisible(yesnodialog)") and x < 150:
x += 1
xbmc.sleep(200)
if xbmc.getCondVisibility("Window.isVisible(yesnodialog)"):
wiz.ebi('SendClick(11)')
wiz.lookandFeelData('restore')
if not wiz.currSkin() == defaults and not BUILDNAME == "":
gui = wiz.checkBuild(BUILDNAME, 'gui')
FAILED = True
if gui == 'http://':
wiz.log("[Installed Check] Guifix was set to http://", xbmc.LOGNOTICE)
DIALOG.ok(ADDONTITLE, "[COLOR %s]It looks like the skin settings was not applied to the build." % COLOR2, "Sadly no gui fix was attatched to the build", "You will need to reinstall the build and make sure to do a force close[/COLOR]")
elif wiz.workingURL(gui):
yes=DIALOG.yesno(ADDONTITLE, '%s was not installed correctly!' % BUILDNAME, 'It looks like the skin settings was not applied to the build.', 'Would you like to apply the GuiFix?', nolabel='[B]No, Cancel[/B]', yeslabel='[B]Apply Fix[/B]')
if yes: wiz.ebi("PlayMedia(plugin://%s/?mode=install&name=%s&url=gui)" % (ADDON_ID, urllib.quote_plus(BUILDNAME))); wiz.log("[Installed Check] Guifix attempting to install")
else: wiz.log('[Installed Check] Guifix url working but cancelled: %s' % gui, xbmc.LOGNOTICE)
else:
DIALOG.ok(ADDONTITLE, "[COLOR %s]It looks like the skin settings was not applied to the build." % COLOR2, "Sadly no gui fix was attatched to the build", "You will need to reinstall the build and make sure to do a force close[/COLOR]")
wiz.log('[Installed Check] Guifix url not working: %s' % gui, xbmc.LOGNOTICE)
else:
wiz.log('[Installed Check] Install seems to be completed correctly', xbmc.LOGNOTICE)
if not wiz.getS('pvrclient') == "":
wiz.toggleAddon(wiz.getS('pvrclient'), 1)
wiz.ebi('StartPVRManager')
wiz.addonUpdates('reset')
if KEEPTRAKT == 'true': traktit.traktIt('restore', 'all'); wiz.log('[Installed Check] Restoring Trakt Data', xbmc.LOGNOTICE)
if KEEPREAL == 'true': debridit.debridIt('restore', 'all'); wiz.log('[Installed Check] Restoring Real Debrid Data', xbmc.LOGNOTICE)
if KEEPLOGIN == 'true': loginit.loginIt('restore', 'all'); wiz.log('[Installed Check] Restoring Login Data', xbmc.LOGNOTICE)
wiz.clearS('install')
else: wiz.log("[Installed Check] Not Enabled", xbmc.LOGNOTICE)
if FAILED == False:
wiz.log("[Build Check] Started", xbmc.LOGNOTICE)
if not WORKING:
wiz.log("[Build Check] Not a valid URL for Build File: %s" % BUILDFILE, xbmc.LOGNOTICE)
elif BUILDCHECK == '' and BUILDNAME == '':
wiz.log("[Build Check] First Run", xbmc.LOGNOTICE)
notify.firstRunSettings()
xbmc.sleep(500)
notify.firstRun()
xbmc.sleep(500)
wiz.setS('lastbuildcheck', str(NEXTCHECK))
elif not BUILDNAME == '':
wiz.log("[Build Check] Build Installed", xbmc.LOGNOTICE)
if SKIN in ['skin.confluence', 'skin.estuary'] and not DEFAULTIGNORE == 'true':
checkSkin()
wiz.log("[Build Check] Build Installed: Checking Updates", xbmc.LOGNOTICE)
wiz.setS('lastbuildcheck', str(NEXTCHECK))
checkUpdate()
elif BUILDCHECK <= str(TODAY):
wiz.log("[Build Check] Build Installed: Checking Updates", xbmc.LOGNOTICE)
wiz.setS('lastbuildcheck', str(NEXTCHECK))
checkUpdate()
else:
wiz.log("[Build Check] Build Installed: Next check isnt until: %s / TODAY is: %s" % (BUILDCHECK, str(TODAY)), xbmc.LOGNOTICE)
wiz.log("[Trakt Data] Started", xbmc.LOGNOTICE)
if KEEPTRAKT == 'true':
if TRAKTSAVE <= str(TODAY):
wiz.log("[Trakt Data] Saving all Data", xbmc.LOGNOTICE)
traktit.autoUpdate('all')
wiz.setS('traktlastsave', str(THREEDAYS))
else:
wiz.log("[Trakt Data] Next Auto Save isnt until: %s / TODAY is: %s" % (TRAKTSAVE, str(TODAY)), xbmc.LOGNOTICE)
else: wiz.log("[Trakt Data] Not Enabled", xbmc.LOGNOTICE)
wiz.log("[Real Debrid Data] Started", xbmc.LOGNOTICE)
if KEEPREAL == 'true':
if REALSAVE <= str(TODAY):
wiz.log("[Real Debrid Data] Saving all Data", xbmc.LOGNOTICE)
debridit.autoUpdate('all')
wiz.setS('debridlastsave', str(THREEDAYS))
else:
wiz.log("[Real Debrid Data] Next Auto Save isnt until: %s / TODAY is: %s" % (REALSAVE, str(TODAY)), xbmc.LOGNOTICE)
else: wiz.log("[Real Debrid Data] Not Enabled", xbmc.LOGNOTICE)
wiz.log("[Login Data] Started", xbmc.LOGNOTICE)
if KEEPLOGIN == 'true':
if LOGINSAVE <= str(TODAY):
wiz.log("[Login Data] Saving all Data", xbmc.LOGNOTICE)
loginit.autoUpdate('all')
wiz.setS('loginlastsave', str(THREEDAYS))
else:
wiz.log("[Login Data] Next Auto Save isnt until: %s / TODAY is: %s" % (LOGINSAVE, str(TODAY)), xbmc.LOGNOTICE)
else: wiz.log("[Login Data] Not Enabled", xbmc.LOGNOTICE)
wiz.log("[Auto Clean Up] Started", xbmc.LOGNOTICE)
if AUTOCLEANUP == 'true':
service = False
days = [TODAY, TOMORROW, THREEDAYS, ONEWEEK]
feq = int(float(AUTOFEQ))
if AUTONEXTRUN <= str(TODAY) or feq == 0:
service = True
next_run = days[feq]
wiz.setS('nextautocleanup', str(next_run))
else: wiz.log("[Auto Clean Up] Next Clean Up %s" % AUTONEXTRUN, xbmc.LOGNOTICE)
if service == True:
AUTOCACHE = wiz.getS('clearcache')
AUTOPACKAGES = wiz.getS('clearpackages')
AUTOTHUMBS = wiz.getS('clearthumbs')
if AUTOCACHE == 'true': wiz.log('[Auto Clean Up] Cache: On', xbmc.LOGNOTICE); wiz.clearCache(True)
else: wiz.log('[Auto Clean Up] Cache: Off', xbmc.LOGNOTICE)
if AUTOTHUMBS == 'true': wiz.log('[Auto Clean Up] Old Thumbs: On', xbmc.LOGNOTICE); wiz.oldThumbs()
else: wiz.log('[Auto Clean Up] Old Thumbs: Off', xbmc.LOGNOTICE)
if AUTOPACKAGES == 'true': wiz.log('[Auto Clean Up] Packages: On', xbmc.LOGNOTICE); wiz.clearPackagesStartup()
else: wiz.log('[Auto Clean Up] Packages: Off', xbmc.LOGNOTICE)
else: wiz.log('[Auto Clean Up] Turned off', xbmc.LOGNOTICE)
wiz.setS('kodi17iscrap', '') | theonekodi/repo.theone | plugin.program.theone.wizard/startup.py | Python | gpl-2.0 | 21,168 |
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
import os
class Nvme(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Collect config and system information about NVMe devices"""
plugin_name = "nvme"
packages = ('nvme-cli',)
def get_nvme_devices(self):
sys_block = os.listdir('/sys/block/')
return [dev for dev in sys_block if dev.startswith('nvme')]
def setup(self):
for dev in self.get_nvme_devices():
# runs nvme-cli commands
self.add_cmd_output([
"nvme list",
"nvme list-ns /dev/%s" % dev,
"nvme fw-log /dev/%s" % dev,
"nvme list-ctrl /dev/%s" % dev,
"nvme id-ctrl -H /dev/%s" % dev,
"nvme id-ns -H /dev/%s" % dev,
"nvme smart-log /dev/%s" % dev,
"nvme error-log /dev/%s" % dev,
"nvme show-regs /dev/%s" % dev])
# vim: set et ts=4 sw=4 :
| nijinashok/sos | sos/plugins/nvme.py | Python | gpl-2.0 | 1,479 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Default configuration of the SSE module."""
from __future__ import absolute_import, print_function
SSE_REDIS_URL = 'redis://localhost:6379/0'
"""Redis URL used to push and read the messages.
It should be in the form ``redis://username:password@host:port/db_index``.
"""
| egabancho/invenio-sse | invenio_sse/config.py | Python | gpl-2.0 | 1,248 |
characters = [{'x': 13.1015625, 'id': '', 'direction': 'W', 'ondeath': [], 'y': 8.0859375, 'name': '139'}, {'x': 11.765625, 'id': '', 'direction': 'NW', 'ondeath': ['ADD', 'item', 'Health Pill'], 'y': 3.359375, 'name': '249'}, {'x': 16.60522421789796, 'id': '', 'random_walk_area': [14.0, 24.0, 5.0, 5.0], 'ondeath': ['REPLACE', 'obstacle', 'boss_gate', '336'], 'y': 26.536870095625797, 'name': '821'}, {'x': 23.244164052208422, 'id': '', 'direction': 'NE', 'random_walk_area': [22.0, 22.0, 6.0, 6.0], 'ondeath': ['ACTIVATE', 'trigger', 'map3teleport'], 'y': 25.063066733669345, 'name': '999'}, {'x': 7.7421875, 'id': '', 'direction': 'W', 'ondeath': [], 'y': 1.6953125, 'name': 'GUA'}, {'name': '615', 'x': 2.984375, 'id': '', 'y': 13.953125, 'ondeath': []}] | lumidify/fahrenheit451 | maps/MapBook2/characters.py | Python | gpl-2.0 | 759 |
import random
import time
from NaoModule import NaoModule
class MoodModule(NaoModule):
# -------------------------------------
# Setup Module
# -------------------------------------
def __init__(self, name):
NaoModule.__init__(self, name)
self.handles = dict()
# get Proxies
self.getHandle("ALMemory", True)
self.getHandle("ALMotion")
self.getHandle("leds")
self.getHandle("ALBasicAwareness")
self.getHandle("ALSpeechRecognition")
self.getHandle("ALRobotPosture")
#setup proxy dependant stuff
self.setupMemory()
self.setupASR()
self.setupBasicAwareness()
self.posture = self.handles["ALRobotPosture"]
self.blink_frequency = 500 #ms
self.is_blinking = True
def setupMemory(self):
if self.hasHandle("ALMemory"):
memory = self.handles["ALMemory"]
memory.subscribeToEvent("emoBlink", self.name, "blinkingCallback")
memory.subscribeToEvent("ALSpeechRecognition/Status", self.name,\
"SpeechStatusCallback")
memory.subscribeToEvent("WordRecognized", self.name, "WordRecognizedCallback")
else:
self.logger.debug("Not setting up any callbacks")
def setupBasicAwareness(self):
if self.hasHandle("ALBasicAwareness"):
baware = self.handles["ALBasicAwareness"]
baware.setEngagementMode("FullyEngaged")
baware.setTrackingMode("Head")
baware.setStimulusDetectionEnabled("Sound", True)
baware.setStimulusDetectionEnabled("Movement", True)
baware.setStimulusDetectionEnabled("People", True)
baware.setStimulusDetectionEnabled("Touch", False)
else:
self.logger.debug("Not setting up Basic Awareness")
def setupASR(self):
if self.hasHandle("ALSpeechRecognition"):
asr = self.handles["ALSpeechRecognition"]
asr.setVisualExpression(False)
asr.setAudioExpression(False)
else:
self.logger.debug("Not setting up Speech Recognition")
# -------------------------------------
# Callbacks
# -------------------------------------
def blinkingCallback(self, event_name, blink_frequency):
""" Make Nao Blink in whatever color was set"""
self.blink_frequency = blink_frequency
self.handles["leds"].blink(0.2)
random_delay = random.random() * blink_frequency / 2.0
time.sleep((random_delay + blink_frequency) / 1000.0)
if self.is_blinking:
self.handles["ALMemory"].raiseEvent("emoBlink", blink_frequency)
def WordRecognizedCallback(self, eventName, value):
""" If a word was recognized either shine green (understood)
or flash red (not understood)"""
self.handles["ALMemory"].unsubscribeToEvent("WordRecognized", self.name)
self.logger.debug("Word Recognized Triggered with confidence %s", value[1])
if float(value[1]) > 0.5:
self.handles["leds"].set_eyes('g')
self.handles["leds"].eyes_on()
self.logger.debug("I have understood.")
elif float(value[1]) > 0.20:
self.handles["leds"].set_eyes('r')
self.handles["leds"].eyes_on()
self.logger.debug("Eyes Have flashed.")
time.sleep(0.5)
self.handles["leds"].set_eyes('w')
self.handles["leds"].eyes_on()
self.handles["ALMemory"].subscribeToEvent("WordRecognized", self.name, "WordRecognizedCallback")
def SpeechStatusCallback(self, eventName, status):
""" Report speech through ears only """
if status == "Idle":
pass
elif status == "ListenOn":
pass
elif status == "SpeechDetected":
self.handles["leds"].ears_on()
elif status == "EndOfProcess":
self.handles["leds"].ears_off()
elif status == "ListenOff":
pass
elif status == "Stop":
pass
# -------------------------------------
# Overwritten from NaoModule
# -------------------------------------
def __enter__(self):
if self.hasAllHandles(["ALBasicAwareness", "ALMotion"]):
self.posture.goToPosture("Stand", 0.5)
self.handles["ALBasicAwareness"].startAwareness()
return self
def __exit__(self, exec_type, exec_value, traceback):
self.is_blinking = False
time.sleep(1)
self.handles["leds"].eyes_on()
if self.hasAllHandles(["ALBasicAwareness", "ALMotion"]):
self.handles["ALBasicAwareness"].stopAwareness()
self.handles["ALMotion"].rest()
| FirefoxMetzger/naoProject | src/MoodModule.py | Python | gpl-2.0 | 4,833 |
__title__ = 'dash.exceptions'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = 'Copyright (c) 2013-2014 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = (
'BaseException', 'InvalidRegistryItemType', 'LayoutDoesNotExist',
'NoActiveLayout', 'PluginWidgetOutOfPlaceholderBoundaries',
'ImproperlyConfigured',
)
class BaseException(Exception):
"""
Base django-dash exception.
"""
class InvalidRegistryItemType(ValueError):
"""
Raised when an attempt is made to register an item in the registry which does not have a proper type.
"""
class LayoutDoesNotExist(BaseException):
"""
Raised when layout does not exist.
"""
class NoActiveLayoutChosen(BaseException):
"""
Raised when no active layout is chosen.
"""
class PluginWidgetOutOfPlaceholderBoundaries(BaseException):
"""
Raised when plugin widget is out of placeholder boundaries.
"""
class ImproperlyConfigured(BaseException):
"""
Raised when ``django-dash`` is somehow improperly configured.
"""
| georgistanev/django-dash | src/dash/exceptions.py | Python | gpl-2.0 | 1,080 |
#!/usr/bin/env python3
import sys
import yaml
with open('hardware.yaml') as f:
hw = yaml.load(f.read())
driver_submenus = \
hw['lirc']['main_menu']['submenus']['driver_select']['submenus']
for menu in driver_submenus:
try:
for item in driver_submenus[menu]['submenus']:
hw['lirc']['remotes'][item]['menu'] = menu
except KeyError:
continue
for remote in hw['lirc']['remotes']:
path = remote + '.conf'
with open(path, 'w') as f:
f.write("# This is a lirc configuration for a capture device.\n")
f.write("# See README.conf for more.\n")
f.write("\n")
f.write("config:\n")
hw['lirc']['remotes'][remote]['id'] = remote
for key in sorted(hw['lirc']['remotes'][remote]):
value = hw['lirc']['remotes'][remote][key]
if key == 'device':
if value.startswith('run_select_usb_tty'):
value = '/dev/ttyUSB*'
elif value.startswith('run_select_tty'):
value = '/dev/tty[0-9]*'
s = " %-16s%s\n" % (key + ':', value)
f.write(s)
| matzrh/lirc | configs/split_hw.py | Python | gpl-2.0 | 1,139 |
from __future__ import unicode_literals
from django.db import models
class Model1(models.Model):
field1 = models.CharField(max_length=42)
field2 = models.TextField()
class Model2(models.Model):
field1 = models.CharField(max_length=42)
| novafloss/django-MacFly | testproject/testproject/apps/testapp/models.py | Python | gpl-2.0 | 251 |
"""
Repositories in cobbler are way to create a local mirror of a yum repository.
When used in conjunction with a mirrored kickstart tree (see "cobbler import")
outside bandwidth needs can be reduced and/or eliminated.
Copyright 2006, Red Hat, Inc
Michael DeHaan <[email protected]>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import item_repo as repo
import utils
import collection
from cexceptions import *
from utils import _
TESTMODE = False
#--------------------------------------------
class Repos(collection.Collection):
def collection_type(self):
return "repo"
def factory_produce(self,config,seed_data):
"""
Return a system forged from seed_data
"""
return repo.Repo(config).from_datastruct(seed_data)
def remove(self,name,with_delete=True,with_sync=True,with_triggers=True):
"""
Remove element named 'name' from the collection
"""
# NOTE: with_delete isn't currently meaningful for repos
# but is left in for consistancy in the API. Unused.
name = name.lower()
obj = self.find(name=name)
if obj is not None:
if with_delete:
if with_triggers:
self._run_triggers(obj, "/var/lib/cobbler/triggers/delete/repo/pre/*")
del self.listing[name]
self.config.serialize_delete(self, obj)
if with_delete:
self.log_func("deleted repo %s" % name)
if with_triggers:
self._run_triggers(obj, "/var/lib/cobbler/triggers/delete/repo/post/*")
return True
raise CX(_("cannot delete an object that does not exist: %s") % name)
| brenton/cobbler | cobbler/collection_repos.py | Python | gpl-2.0 | 1,942 |
from __future__ import absolute_import, unicode_literals
from oauthlib.oauth1 import RequestTokenEndpoint, AuthorizationEndpoint, ResourceEndpoint, AccessTokenEndpoint
from oauthlib.common import Request
class AuthProvider(RequestTokenEndpoint, AuthorizationEndpoint,
AccessTokenEndpoint, ResourceEndpoint):
def __init__(self, request_validator):
RequestTokenEndpoint.__init__(self, request_validator)
AuthorizationEndpoint.__init__(self, request_validator)
AccessTokenEndpoint.__init__(self, request_validator)
ResourceEndpoint.__init__(self, request_validator)
def verify_authorize_request(self, request_token):
return self.request_validator.verify_authorize(request_token)
def authorize_request_token(self, request_token, user):
verifier = self.token_generator()
request = Request('')
request.resource_owner_key = user
return self.request_validator.save_verifier(request_token, verifier, request)
def verify_authorize_submission(self, request_token, user_email):
user = self.request_validator.verify_user_email(user_email)
token_and_consumer = self.request_validator.verify_authorize(request_token)
if not user or not token_and_consumer:
return None
token, consumer = token_and_consumer
return user, token, consumer
| stacksync/auth | stacksync_oauth/provider.py | Python | gpl-2.0 | 1,384 |
# Umount.py -- Unmount file system on clients
# Copyright (C) 2007-2015 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""
Shine `umount' command classes.
The umount command aims to stop Lustre filesystem clients.
"""
from __future__ import print_function
# Command base class
from Shine.Commands.Base.FSLiveCommand import FSLiveCommand
from Shine.Commands.Base.CommandRCDefs import RC_OK, \
RC_FAILURE, RC_TARGET_ERROR, \
RC_CLIENT_ERROR, RC_RUNTIME_ERROR
# Lustre events
from Shine.Commands.Base.FSEventHandler import FSGlobalEventHandler, \
FSLocalEventHandler
from Shine.Lustre.FileSystem import MOUNTED, RECOVERING, OFFLINE, \
TARGET_ERROR, CLIENT_ERROR, RUNTIME_ERROR
class Umount(FSLiveCommand):
"""
shine umount
"""
NAME = "umount"
DESCRIPTION = "Unmount file system clients."
GLOBAL_EH = FSGlobalEventHandler
LOCAL_EH = FSLocalEventHandler
TARGET_STATUS_RC_MAP = { \
MOUNTED : RC_FAILURE,
RECOVERING : RC_FAILURE,
OFFLINE : RC_OK,
TARGET_ERROR : RC_TARGET_ERROR,
CLIENT_ERROR : RC_CLIENT_ERROR,
RUNTIME_ERROR : RC_RUNTIME_ERROR }
def execute_fs(self, fs, fs_conf, eh, vlevel):
# Warn if trying to act on wrong nodes
comps = fs.components.managed(supports='umount')
if not self.check_valid_list(fs.fs_name, comps.servers(), "unmount"):
return RC_FAILURE
# Will call the handle_pre() method defined by the event handler.
if hasattr(eh, 'pre'):
eh.pre(fs)
status = fs.umount(addopts=self.options.additional,
dryrun=self.options.dryrun,
fanout=self.options.fanout)
rc = self.fs_status_to_rc(status)
if not self.options.remote:
if rc == RC_OK:
if vlevel > 0:
key = lambda c: c.state == OFFLINE
print("%s was successfully unmounted on %s" %
(fs.fs_name, comps.filter(key=key).servers()))
elif rc == RC_RUNTIME_ERROR:
self.display_proxy_errors(fs)
if hasattr(eh, 'post'):
eh.post(fs)
return rc
| cea-hpc/shine | lib/Shine/Commands/Umount.py | Python | gpl-2.0 | 3,083 |
# FreeSpeak - a GUI frontend to online translator engines
# freespeak/translators/yahoo.py
#
## Copyright (C) 2005, 2006, 2007, 2008, 2009 Luca Bruno <[email protected]>
##
## This file is part of FreeSpeak.
##
## FreeSpeak is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## FreeSpeak is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Library General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Yahoo Babelfish engine
"""
import httplib
import urllib
import lxml.html
from freespeak.translator import BaseLanguage, BaseTranslator
from freespeak.translation import (TextTranslationRequest,
WebTranslationRequest)
from freespeak.status import Status, StatusTextComplete, StatusWebComplete
class Language (BaseLanguage):
"""
Yahoo languages have countrycode and a name
"""
def __init__ (self, cc, name):
BaseLanguage.__init__ (self)
self.cc = cc # Country Code
self.name = name
def __cmp__ (self, other):
if not other or self.name < other.name:
return -1
elif self.name > other.name:
return 1
return 0
def __eq__ (self, other):
return self.name == other.name and self.cc == other.cc
def __hash__ (self):
return hash (self.cc + self.name)
def __str__ (self):
return self.name
class Translator (BaseTranslator):
"""
Yahoo translator
"""
name = 'Yahoo!'
capabilities = [TextTranslationRequest, WebTranslationRequest]
icon = "yahoo"
def __init__ (self):
BaseTranslator.__init__ (self)
self.language_table = {}
def get_language_table (self, capability):
"""
Overridden. Get the language table.
It doesn't depend on the capability as Yahoo has equal languages
for both text and web.
"""
if self.language_table:
return self.language_table
url = 'http://babelfish.yahoo.com/'
tree = lxml.html.parse (urllib.urlopen (url))
elements = tree.xpath ('//form[@name="frmTrText"]//select[@name="lp"]/option[@value!=""]')
for element in elements:
cc_to_cc = element.get ('value')
fromcc, tocc = cc_to_cc.split ('_')
name_to_name = element.text
fromname, toname = name_to_name.split (' to ')
fromlang = Language (fromcc, fromname)
tolang = Language (tocc, toname)
if not fromlang in self.language_table:
self.language_table[fromlang] = []
self.language_table[fromlang].append (tolang)
return self.language_table
def translate_text (self, request):
"""
Issue a POST to /translate_txt at babelfish.yahoo.com
"""
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
lp = request.from_lang.cc+'_'+request.to_lang.cc
params = urllib.urlencode ({'ei': 'UTF-8',
'doit': 'done',
'fr': 'bf-home',
'intl': '1',
'tt': 'urltext',
'trtext': request.text,
'lp': lp})
yield Status (_("Connecting to")+" babelfish.yahoo.com")
conn = httplib.HTTPConnection ('babelfish.yahoo.com')
conn.request ('POST', '/translate_txt', params, headers)
result = conn.getresponse().read ()
yield Status (_("Parsing result"))
tree = lxml.html.fromstring (result)
result = tree.get_element_by_id("result").text_content ()
yield StatusTextComplete (result)
def translate_web (self, request):
"""
Returns a straight url without doing any HTTP request
"""
lp = request.from_lang.cc+'_'+request.to_lang.cc
params = urllib.urlencode ({'doit': 'done',
'tt': 'url',
'intl': '1',
'fr': 'bf-res',
'lp': lp,
'trurl': request.url})
url = 'http://babelfish.yahoo.com/translate_url?'+params
yield StatusWebComplete (url)
def suggest_translations (self, request):
"""
Yahoo doesn't support suggestions
"""
raise NotImplementedError ()
| BackupTheBerlios/freespeak | freespeak/translators/yahoo.py | Python | gpl-2.0 | 5,042 |
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.ParentalControlSetup import ProtectedScreen
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.TextBox import TextBox
from Components.config import config
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.Slider import Slider
from Tools.BoundFunction import boundFunction
from enigma import eTimer, eDVBDB
from boxbranding import getBoxType
class UpdatePlugin(Screen, ProtectedScreen):
skin = """
<screen name="UpdatePlugin" position="center,center" size="550,300">
<widget name="activityslider" position="0,0" size="550,5" />
<widget name="slider" position="0,150" size="550,30" />
<widget source="package" render="Label" position="10,30" size="540,20" font="Regular;18" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
<widget source="status" render="Label" position="10,180" size="540,100" font="Regular;20" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, *args):
Screen.__init__(self, session)
ProtectedScreen.__init__(self)
self.sliderPackages = { "dreambox-dvb-modules": 1, "enigma2": 2, "tuxbox-image-info": 3 }
self.setTitle(_("Software update"))
self.slider = Slider(0, 4)
self["slider"] = self.slider
self.activityslider = Slider(0, 100)
self["activityslider"] = self.activityslider
self.status = StaticText(_("Please wait..."))
self["status"] = self.status
self.package = StaticText(_("Package list update"))
self["package"] = self.package
self.oktext = _("Press OK on your remote control to continue.")
self.packages = 0
self.error = 0
self.processed_packages = []
self.total_packages = None
self.channellist_only = 0
self.channellist_name = ''
self.updating = False
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.onClose.append(self.__close)
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.exit,
"back": self.exit
}, -1)
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.checkTraficLight)
self.activityTimer.callback.append(self.doActivityTimer)
self.activityTimer.start(100, True)
def isProtected(self):
return config.ParentalControl.setuppinactive.value and\
(not config.ParentalControl.config_sections.main_menu.value and not config.ParentalControl.config_sections.configuration.value or hasattr(self.session, 'infobar') and self.session.infobar is None) and\
config.ParentalControl.config_sections.software_update.value
def checkTraficLight(self):
self.activityTimer.callback.remove(self.checkTraficLight)
self.activityTimer.start(100, False)
from urllib import urlopen
import socket
import os
currentTimeoutDefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(3)
message = ""
picon = None
default = True
# try:
# TODO: Use Twisted's URL fetcher, urlopen is evil. And it can
# run in parallel to the package update.
# if getBoxType() in urlopen("http://openpli.org/status").read().split(','):
# message = _("The current beta image might not be stable.\nFor more information see %s.") % ("www.openpli.org")
# picon = MessageBox.TYPE_ERROR
# default = False
# except:
# message = _("The status of the current beta image could not be checked because %s can not be reached.") % ("www.openpli.org")
# picon = MessageBox.TYPE_ERROR
# default = False
socket.setdefaulttimeout(currentTimeoutDefault)
if default:
self.showDisclaimer()
else:
message += "\n" + _("Do you want to update your receiver?")
self.session.openWithCallback(self.startActualUpdate, MessageBox, message, default = default, picon = picon)
def showDisclaimer(self, justShow=False):
if config.usage.show_update_disclaimer.value or justShow:
message = _("The EuroSat team would like to point out that upgrading to the latest nightly build comes not only with the latest features, but also with some risks. After the update, it is possible that your device no longer works as expected. We recommend you create backups with Autobackup or Backupsuite. This allows you to quickly and easily restore your device to its previous state, should you experience any problems. If you encounter a 'bug', please report the issue on www.euro-sat-image.com.\n\nDo you understand this?")
list = not justShow and [(_("no"), False), (_("yes"), True), (_("yes") + " " + _("and never show this message again"), "never")] or []
self.session.openWithCallback(boundFunction(self.disclaimerCallback, justShow), MessageBox, message, list=list)
else:
self.startActualUpdate(True)
def disclaimerCallback(self, justShow, answer):
if answer == "never":
config.usage.show_update_disclaimer.value = False
config.usage.show_update_disclaimer.save()
if justShow and answer:
self.ipkgCallback(IpkgComponent.EVENT_DONE, None)
else:
self.startActualUpdate(answer)
def startActualUpdate(self,answer):
if answer:
self.updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
else:
self.close()
def doActivityTimer(self):
self.activity += 1
if self.activity == 100:
self.activity = 0
self.activityslider.setValue(self.activity)
def showUpdateCompletedMessage(self):
self.setEndMessage(ngettext("Update completed, %d package was installed.", "Update completed, %d packages were installed.", self.packages) % self.packages)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DOWNLOAD:
self.status.setText(_("Downloading"))
elif event == IpkgComponent.EVENT_UPGRADE:
if self.sliderPackages.has_key(param):
self.slider.setValue(self.sliderPackages[param])
self.package.setText(param)
self.status.setText(_("Upgrading") + ": %s/%s" % (self.packages, self.total_packages))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_INSTALL:
self.package.setText(param)
self.status.setText(_("Installing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_REMOVE:
self.package.setText(param)
self.status.setText(_("Removing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_CONFIGURING:
self.package.setText(param)
self.status.setText(_("Configuring"))
elif event == IpkgComponent.EVENT_MODIFIED:
if config.plugins.softwaremanager.overwriteConfigFiles.value in ("N", "Y"):
self.ipkg.write(True and config.plugins.softwaremanager.overwriteConfigFiles.value)
else:
self.session.openWithCallback(
self.modificationCallback,
MessageBox,
_("A configuration file (%s) has been modified since it was installed.\nDo you want to keep your modifications?") % (param)
)
elif event == IpkgComponent.EVENT_ERROR:
self.error += 1
elif event == IpkgComponent.EVENT_DONE:
if self.updating:
self.updating = False
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
elif self.ipkg.currentCommand == IpkgComponent.CMD_UPGRADE_LIST:
self.total_packages = len(self.ipkg.getFetchedList())
if self.total_packages:
message = _("Do you want to update your receiver?") + "\n(" + (ngettext("%s updated package available", "%s updated packages available", self.total_packages) % self.total_packages) + ")"
choices = [(_("Update and reboot (recommended)"), "cold"),
(_("Update and ask to reboot"), "hot"),
(_("Update channel list only"), "channels"),
(_("Show updated packages"), "showlist")]
if not config.usage.show_update_disclaimer.value:
choices.append((_("Show disclaimer"), "disclaimer"))
choices.append((_("Cancel"), ""))
self.session.openWithCallback(self.startActualUpgrade, ChoiceBox, title=message, list=choices)
else:
self.session.openWithCallback(self.close, MessageBox, _("No updates available"), type=MessageBox.TYPE_INFO, timeout=3, close_on_any_key=True)
elif self.channellist_only > 0:
if self.channellist_only == 1:
self.setEndMessage(_("Could not find installed channel list."))
elif self.channellist_only == 2:
self.slider.setValue(2)
self.ipkg.startCmd(IpkgComponent.CMD_REMOVE, {'package': self.channellist_name})
self.channellist_only += 1
elif self.channellist_only == 3:
self.slider.setValue(3)
self.ipkg.startCmd(IpkgComponent.CMD_INSTALL, {'package': self.channellist_name})
self.channellist_only += 1
elif self.channellist_only == 4:
self.showUpdateCompletedMessage()
eDVBDB.getInstance().reloadBouquets()
eDVBDB.getInstance().reloadServicelist()
elif self.error == 0:
self.showUpdateCompletedMessage()
else:
self.activityTimer.stop()
self.activityslider.setValue(0)
error = _("Your receiver might be unusable now. Please consult the manual for further assistance before rebooting your receiver.")
if self.packages == 0:
error = _("No updates available. Please try again later.")
if self.updating:
error = _("Update failed. Your receiver does not have a working internet connection.")
self.status.setText(_("Error") + " - " + error)
elif event == IpkgComponent.EVENT_LISTITEM:
if 'enigma2-plugin-settings-' in param[0] and self.channellist_only > 0:
self.channellist_name = param[0]
self.channellist_only = 2
#print event, "-", param
pass
def setEndMessage(self, txt):
self.slider.setValue(4)
self.activityTimer.stop()
self.activityslider.setValue(0)
self.package.setText(txt)
self.status.setText(self.oktext)
def startActualUpgrade(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == "cold":
self.session.open(TryQuitMainloop,retvalue=42)
self.close()
elif answer[1] == "channels":
self.channellist_only = 1
self.slider.setValue(1)
self.ipkg.startCmd(IpkgComponent.CMD_LIST, args = {'installed_only': True})
elif answer[1] == "disclaimer":
self.showDisclaimer(justShow=True)
elif answer[1] == "showlist":
text = ""
for i in [x[0] for x in sorted(self.ipkg.getFetchedList(), key=lambda d: d[0])]:
text = text and text + "\n" + i or i
self.session.openWithCallback(boundFunction(self.ipkgCallback, IpkgComponent.EVENT_DONE, None), TextBox, text, _("Packages to update"))
else:
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE, args = {'test_only': False})
def modificationCallback(self, res):
self.ipkg.write(res and "N" or "Y")
def exit(self):
if not self.ipkg.isRunning():
if self.packages != 0 and self.error == 0 and self.channellist_only == 0:
if fileExists("/etc/enigma2/.removelang"):
from Components.Language import language
language.delLanguage()
self.session.openWithCallback(self.exitAnswer, MessageBox, _("Update completed. Do you want to reboot your receiver?"))
else:
self.close()
else:
if not self.updating:
self.close()
def exitAnswer(self, result):
if result is not None and result:
self.session.open(TryQuitMainloop,retvalue=2)
self.close()
def __close(self):
self.ipkg.removeCallback(self.ipkgCallback)
| eurosata1/e2 | lib/python/Screens/SoftwareUpdate.py | Python | gpl-2.0 | 11,487 |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import print_function
import sys
from collections import defaultdict
from KafNafParserPy import KafNafParser
class Centity:
def __init__(self, line=None):
self.id = ''
self.type = ''
self.filename = ''
self.word_list = []
self.token_id_list = []
self.filename = ''
if line is not None:
self.load_from_line(line)
def create(self,this_id, this_type, this_filename, id_list, word_list):
self.id = this_id
self.type = this_type
self.filename = this_filename
self.word_list = word_list[:]
self.token_id_list = id_list[:]
def load_from_line(self,line):
fields = line.strip().split('\t')
self.type = fields[0]
self.word_list = fields[1].split(' ')
ids_with_filename = fields[2].split(' ')
for id_with_filename in ids_with_filename:
p = id_with_filename.rfind('#')
self.filename = id_with_filename[:p]
#self.token_id_list.append(id_with_filename)
self.token_id_list.append(id_with_filename[p+1:])
def to_line(self):
#in the DS we need to include also the filename
tokens_with_filename = [self.filename+'#'+token_id for token_id in self.token_id_list]
line = '%s\t%s\t%s' % (self.type,' '.join(self.word_list),' '.join(tokens_with_filename))
return line
def __str__(self):
s = ''
s += 'Type: %s\n' % self.type
s += 'Words: %s\n' % str(self.word_list)
s += 'Filename: %s\n' % self.filename
s += 'Ids: %s\n' % str(self.token_id_list)
return s
def get_avg_position(self, naf_obj):
offset_total = 0
for token_id in self.token_id_list:
token_obj = naf_obj.get_token(token_id)
offset_total += int(token_obj.get_offset())
avg_position = 1.0*offset_total/len(self.token_id_list)
return avg_position
def get_avg_position_num_tokens(self, naf_obj):
list_ids_offset = []
for token in naf_obj.get_tokens():
list_ids_offset.append((token.get_id(),int(token.get_offset())))
if hasattr(naf_obj, 'position_for_token'):
pass
else:
naf_obj.position_for_token = {}
numT = 0
for token_id, token_offset in sorted(list_ids_offset, key=lambda t: -t[1]):
naf_obj.position_for_token[token_id] = numT
numT += 1
position_total = 0
for token_id in self.token_id_list:
position = naf_obj.position_for_token[token_id]
position_total += position
avg_position = 1.0*position_total/len(self.token_id_list)
return avg_position
def get_sentence(self, naf_obj):
first_token = self.token_id_list[0]
token_obj = naf_obj.get_token(first_token)
sentence = token_obj.get_sent()
return sentence
def load_entities(filename):
list_entities = []
fd = open(filename,'r')
for line in fd:
entity = Centity(line)
list_entities.append(entity)
fd.close()
return list_entities
def match_entities(expression_entities, target_entities, knaf_obj):
matched_pairs = []
if len(expression_entities) > 0:
for target in target_entities:
target_sentence = target.get_sentence(knaf_obj)
#position_for_target = target.get_avg_position(knaf_obj)
position_for_target = target.get_avg_position_num_tokens(knaf_obj)
expressions_with_distance = []
#print 'Entity: ',expression.word_list, position_for_expression
for expression in expression_entities:
expression_sentence = expression.get_sentence(knaf_obj)
if target_sentence == expression_sentence:
#position_for_expression = expression.get_avg_position(knaf_obj)
position_for_expression = expression.get_avg_position_num_tokens(knaf_obj)
distance = abs(position_for_expression-position_for_target)
expressions_with_distance.append((expression,distance))
if len(expressions_with_distance) != 0:
expressions_with_distance.sort(key=lambda t: t[1])
#for target, d in expressions_with_distance:
# print '\t', target.word_list, d
#We select the first one
selected_expression = expressions_with_distance[0][0]
#print 'FIXED:', target
#for a,b in expressions_with_distance:
# print 'CANDIDATE', a.to_line(), b
#print
matched_pairs.append((selected_expression, target))
return matched_pairs
if __name__ == '__main__':
expression_filename = sys.argv[1] #test.mpqa.exp.csv
target_filename = sys.argv[2] #test.mpqa.tar.csv
expression_entities = load_entities(expression_filename)
target_entities = load_entities(target_filename)
target_entities_per_filename = defaultdict(list)
for t in target_entities:
target_entities_per_filename[t.filename].append(t)
for filename, list_targets in list(target_entities_per_filename.items()):
knaf_obj = KafNafParser(filename)
expression_candidates = []
for expression in expression_entities:
if expression.filename == filename:
expression_candidates.append(expression)
matched_pairs = match_entities(expression_candidates, list_targets, knaf_obj)
for exp, tar in matched_pairs:
print(exp.to_line())
print(tar.to_line())
print()
| rubenIzquierdo/opinion_miner_deluxePP | match_entities_by_distance.py | Python | gpl-2.0 | 5,954 |
#!/usr/bin/env python
# encoding: utf-8
"""Module for utilities concerning Wikipedia
"""
from __future__ import unicode_literals
import re
def create_article_url(prefix, title):
"""Creates url from prefix and title of the article
"""
title = term2wuri(title)
return "%s/%s" % (prefix, title)
def term2wuri(term):
"""Creates last part of wikipedia URI ("wuri") from a term
Examples:
'duke' -> 'Duke'
'Channel Islands' -> 'Channel_Islands'
'early modern period' -> 'Early_modern_period'
Args:
term (unicode)
any word, name, phrase
Returns:
unicode
"""
# TODO: handle namespaces (see wikiextractor.normalizedTitle())
# strip leading whitespace and underscores
# and replace spaces with underscores
wuri = term.strip(' _').replace(' ', '_')
# replace sequences of underscores with a single underscore
wuri = re.compile(r'_+').sub('_', wuri)
# ideally term shouldn't be empty, but it's Wikipedia
if len(wuri) > 0:
# first letter always capital
wuri = wuri[0].upper() + wuri[1:]
return wuri
| effa/wikicorpora | utils/wiki_utils.py | Python | gpl-2.0 | 1,136 |
"""
2161 : 카드1
URL : https://www.acmicpc.net/problem/2161
Input :
7
Output :
1 3 5 7 4 2 6
"""
N = int(input())
array = list(range(1, N + 1))
trash = []
while len(array) > 1:
trash.append(array[0])
array = array[2:] + [array[1]]
trash.append(array[0])
print('{}'.format(' '.join(str(x) for x in trash)))
| 0x1306e6d/Baekjoon | baekjoon/2161.py | Python | gpl-2.0 | 352 |
#!/bin/python3
# Complete the 'solve' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY arr as parameter.
# OPTIMAL (based on Discussions)
#
def solve(a: [int]) -> int:
s = stack()
l: list[int] = [0] * len(a)
for i in range(len(a)):
while not s.isempty() and a[s.top()] <= a[i]:
s.pop()
if not s.isempty():
l[i] = s.top() + 1
s.push(i)
s.clear()
r: list[int] = [0] * len(a)
for i in range(len(a) - 1, -1, -1):
while not s.isempty() and a[s.top()] <= a[i]:
s.pop()
if not s.isempty():
r[i] = s.top() + 1
s.push(i)
m = 0
for i in range(len(a)):
m = max(m, l[i] * r[i])
return m
class stack:
def __init__(self):
self.s = []
def top(self) -> int:
return self.s[-1]
def pop(self) -> int:
return self.s.pop()
def push(self, e: int) -> None:
self.s.append(e)
def isempty(self) -> bool:
return not self.s
def clear(self) -> None:
self.s.clear()
# ORIGINAL
# https://www.hackerrank.com/challenges/find-maximum-index-product/submissions/code/256530656
def solve(a: [int]) -> int:
m = []
for i in range(len(a)):
l = left(i, a)
r = right(i, a)
m.append(l * r)
return max(m)
def left(i: int, a: [int]) -> int:
j = i - 1
while j >= 0:
if a[j] > a[i]:
return j + 1
j -= 1
return 0
def right(i: int, a: [int]) -> int:
k = i + 1
while k < len(a):
if a[k] > a[i]:
return k + 1
k += 1
return 0
# Complete the 'arrayManipulation' function below.
#
# The function is expected to return a LONG_INTEGER.
# The function accepts following parameters:
# 1. INTEGER n
# 2. 2D_INTEGER_ARRAY queries
# OPTIMAL (based on Discussions)
# https://www.hackerrank.com/challenges/crush/submissions/code/255502595
def arrayManipulation(n, queries):
arr = [0] * n
for q in queries:
a, b, k = q[0] - 1, q[1], q[2]
arr[a] += k
if b < n:
arr[b] -= k
m = c = 0
for i in arr:
c += i
if c > m:
m = c
return m
# ORIGINAL
# https://www.hackerrank.com/challenges/crush/submissions/code/255445013
def arrayManipulation(n, queries):
a = [0] * n
for q in queries:
for i in range(q[0] - 1, q[1]):
a[i] += q[2]
return max(a)
| vargax/ejemplos | python/hackerrank/hard.py | Python | gpl-2.0 | 2,507 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except ImportError:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_webapi import add_cname_to_hepname_record
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_SITE_NAME, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import (getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.access_control_admin import acc_get_user_roles
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_doi, get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author
from invenio.webauthorprofile_orcidutils import get_dois_from_orcid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/generate_autoclaim_data
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'generate_autoclaim_data',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
## Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: true});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
# content += self._generate_person_info_box(ulevel, ln) #### Name variants
# metaheaderadd = self._scripts() + '\n <meta name="robots" content="nofollow" />'
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=self._generate_title(ulevel),
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body(content).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_optional_menu(self, ulevel, req, form):
'''
Generates the menu for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: menu
@rtype: str
'''
def generate_optional_menu_guest(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_user(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_admin(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu_admin(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
generate_optional_menu = {'guest': generate_optional_menu_guest,
'user': generate_optional_menu_user,
'admin': generate_optional_menu_admin}
return "<div class=\"clearfix\">" + generate_optional_menu[ulevel](req, form) + "</div>"
def _generate_ticket_box(self, ulevel, req):
'''
Generates the semi-permanent info box for the specified user permission
level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: info box
@rtype: str
'''
def generate_ticket_box_guest(req):
session = get_session(req)
pinfo = session['personinfo']
ticket = pinfo['ticket']
results = list()
pendingt = list()
for t in ticket:
if 'execution_result' in t:
for res in t['execution_result']:
results.append(res)
else:
pendingt.append(t)
box = ""
if pendingt:
box += TEMPLATE.tmpl_ticket_box('in_process', 'transaction', len(pendingt))
if results:
failed = [messages for status, messages in results if not status]
if failed:
box += TEMPLATE.tmpl_transaction_box('failure', failed)
successfull = [messages for status, messages in results if status]
if successfull:
box += TEMPLATE.tmpl_transaction_box('success', successfull)
return box
def generate_ticket_box_user(req):
return generate_ticket_box_guest(req)
def generate_ticket_box_admin(req):
return generate_ticket_box_guest(req)
generate_ticket_box = {'guest': generate_ticket_box_guest,
'user': generate_ticket_box_user,
'admin': generate_ticket_box_admin}
return generate_ticket_box[ulevel](req)
def _generate_person_info_box(self, ulevel, ln):
'''
Generates the name info box for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param ln: page display language
@type ln: str
@return: name info box
@rtype: str
'''
def generate_person_info_box_guest(ln):
names = webapi.get_person_names_from_id(self.person_id)
box = TEMPLATE.tmpl_admin_person_info_box(ln, person_id=self.person_id,
names=names)
return box
def generate_person_info_box_user(ln):
return generate_person_info_box_guest(ln)
def generate_person_info_box_admin(ln):
return generate_person_info_box_guest(ln)
generate_person_info_box = {'guest': generate_person_info_box_guest,
'user': generate_person_info_box_user,
'admin': generate_person_info_box_admin}
return generate_person_info_box[ulevel](ln)
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets == None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == True and pinfo['autoclaim']['checkout'] == True:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (CFG_SITE_URL, webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
# need review if should be deleted
def __user_is_authorized(self, req, action):
'''
Determines if a given user is authorized to perform a specified action
@param req: Apache Request Object
@type req: Apache Request Object
@param action: the action the user wants to perform
@type action: string
@return: True if user is allowed to perform the action, False if not
@rtype: boolean
'''
if not req:
return False
if not action:
return False
else:
action = escape(action)
uid = getUid(req)
if not isinstance(uid, int):
return False
if uid == 0:
return False
allowance = [i[1] for i in acc_find_user_role_actions({'uid': uid})
if i[1] == action]
if allowance:
return True
return False
@staticmethod
def _scripts(kill_browser_cache=False):
'''
Returns html code to be included in the meta header of the html page.
The actual code is stored in the template.
@return: html formatted Javascript and CSS inclusions for the <head>
@rtype: string
'''
return TEMPLATE.tmpl_meta_includes(kill_browser_cache)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review':(str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile':(str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system'] is not None:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id'] is not None:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, redirect_pid))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param=''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + argd['search_param']
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return = True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
#return self._error_page(req, ln, "Fatal: cannot create ticket without a person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message about the currently attempted merge
pinfo['merge_info_message'] = ("failure", "confirm_failure")
session.dirty = True
redirect_url = "%s/author/merge_profiles?primary_profile=%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
# when redirected back to the manage profile page display a message about the currently attempted merge
pinfo['merge_info_message'] = ("success", "confirm_success")
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str)}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj='Merge profiles request')
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
#pp = pprint.PrettyPrinter(indent=4)
#session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_claim(self, req, bibrefs, ln):
'''
Generate page to let user choose how to proceed
@param req: Apache Request Object
@type req: Apache Request Object
@param bibrefs: list of record IDs to perform an action on
@type bibrefs: list of int
@param ln: language to display the page in
@type ln: string
'''
session = get_session(req)
uid = getUid(req)
uinfo = collect_user_info(req)
pinfo = session["personinfo"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
no_access = self._page_access_permission_wall(req)
session.dirty = True
pid = -1
search_enabled = True
if not no_access and uinfo["precached_usepaperclaim"]:
tpid = webapi.get_pid_from_uid(uid)
if tpid > -1:
pid = tpid
last_viewed_pid = False
if (not no_access
and "claimpaper_admin_last_viewed_pid" in pinfo
and pinfo["claimpaper_admin_last_viewed_pid"]):
names = webapi.get_person_names_from_id(pinfo["claimpaper_admin_last_viewed_pid"])
names = sorted([i for i in names], key=lambda k: k[1], reverse=True)
if len(names) > 0:
if len(names[0]) > 0:
last_viewed_pid = [pinfo["claimpaper_admin_last_viewed_pid"], names[0][0]]
if no_access:
search_enabled = False
pinfo["referer"] = uinfo["referer"]
session.dirty = True
body = TEMPLATE.tmpl_open_claim(bibrefs, pid, last_viewed_pid,
search_enabled=search_enabled)
body = TEMPLATE.tmpl_person_detail_layout(body)
title = _('Claim this paper')
metaheaderadd = WebInterfaceBibAuthorIDClaimPages._scripts(kill_browser_cache=True)
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, webapi.get_person_redirect_link(str(pid))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '10';var gPID = '10'; var gNumOfWorkers= '10'; var gReqTimeout= '10'; var gPageTimeout= '10';",
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body(content)
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params = parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
if not session['personinfo']['merge_primary_profile']:
session['personinfo']['merge_primary_profile'] = [primary_cname, '1' if is_available else '0']
session.dirty = True
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
merge_page.add_bootstrapped_data(json.dumps({
"other": "var gMergeProfile = %s; var gMergeList = %s;" % ([primary_cname, '1' if is_available else '0'], profiles_to_merge)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body(body)
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile and primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if json_data.has_key('personId'):
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if json_data.has_key('personId'):
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if json_data.has_key('personId'):
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if json_data.has_key('personId'):
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body(content)
#In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, str(redirect_pid)))
else:
# get name strings and email addresses from SSO/Oauth logins: {'system':{'name':[variant1,...,variantn], 'email':'[email protected]', 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# # try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req, '%s/author/claim/action?associate_profile=True&redirect_pid=%s' % (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids )
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(probable_profile_suggestion_info, last_viewed_profile_suggestion_info, search_param)
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
############################################
# New autoclaim functions #
############################################
def generate_autoclaim_data(self, req, form):
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
pid = int(json_data['personId'])
except:
raise NotImplementedError("Some error with the parameter from the Ajax request occured.")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# If autoclaim was done already and no new remote systems exist
# in order to autoclaim new papers send the cached result
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
return json.dumps(json_response)
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
autoclaim_ticket = pinfo['autoclaim']['ticket']
ulevel = pinfo['ulevel']
uid = getUid(req)
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_status = webapi.get_login_info(uid, params)
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems, params, external_pubs_association))
already_claimed_recids = set([rec for _, _, rec in get_claimed_papers_of_author(pid)]) & papers_to_autoclaim
papers_to_autoclaim = papers_to_autoclaim - set([rec for _, _, rec in get_claimed_papers_of_author(pid)])
for paper in papers_to_autoclaim:
operation_parts = {'pid': pid,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
autoclaim_data = dict()
autoclaim_data['hidden'] = False
autoclaim_data['person_id'] = pid
autoclaim_data['successfull_recids'] = set([op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
autoclaim_data['unsuccessfull_recids'] = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['num_of_unsuccessfull_recids'] = len(autoclaim_data['unsuccessfull_recids'])
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
# cache the result in the session
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
req.write(json.dumps(json_response))
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
relevant_name = most_relevant_name(name_variants)
if relevant_name:
search_param = relevant_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'],external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([webapi.get_most_frequent_name_from_pid(int(t[0])),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "help", ln, is_owner, self._is_admin(pinfo))
title = "Help page"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
content = TEMPLATE.tmpl_help_page()
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
self.original_identifier = " "
return
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
title_message = _('Profile management')
ssl_param = 0
if req.is_https():
ssl_param = 1
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
if cname == self.person_id:
return page_not_authorized(req, text=_("This page is not accessible directly."))
menu = WebProfileMenu(cname, "manage_profile", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", webapi.get_longest_name_from_pid(self.person_id), no_cache=True)
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
# proccess and collect data for every box [LEGACY]
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ext_ids_data = None
int_ids_data = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
content += TEMPLATE.tmpl_profile_management(ln, person_data, arxiv_data,
orcid_data, claim_paper_data,
int_ids_data, ext_ids_data,
autoclaim_data, support_data,
merge_data, hepnames_data)
body = profile_page.get_wrapped_body(content)
return page(title=title_message,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
# author should have already an orcid if this method was triggered
try:
orcid_id = get_orcid_id_of_author(pinfo['pid'])[0][0]
except IndexError:
#weird, no orcid id in the database? Let's not do anything...
orcid_id = None
orcid_dois = get_dois_from_orcid(orcid_id)
# TODO: what to do in case some ORCID server error occurs?
if orcid_id is None or orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname':(str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.connect_author_with_hepname(cname, hepname)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, cname))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
webapi.connect_author_with_hepname(cname, hepname)
else:
uid = getUid(req)
add_cname_to_hepname_record(cname, hepname, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid':(str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCiD.")
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, pid))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
orcid = json_data['orcid']
pid = json_data['pid']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
if not is_valid_orcid(orcid):
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
def _fail(self, req, code):
req.status = code
return
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
index = __call__
class WebInterfaceAuthorTicketHandling(WebInterfaceDirectory):
_exports = ['get_status',
'update_status',
'add_operation',
'modify_operation',
'remove_operation',
'commit',
'abort']
@staticmethod
def bootstrap_status(pinfo, on_ticket):
'''
Function used for generating get_status json bootstrapping.
@param pinfo: person_info
@type req: dict
@param on_ticket: ticket target
@type on_ticket: str
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
author_ticketing = WebInterfaceAuthorTicketHandling()
ticket = author_ticketing._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return "{}"
ticket_status = webapi.get_ticket_status(ticket)
return json.dumps(ticket_status)
def get_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket_status = webapi.get_ticket_status(ticket)
session.dirty = True
req.content_type = 'application/json'
req.write(json.dumps(ticket_status))
def update_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.update_ticket_status(ticket)
session.dirty = True
def add_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
def modify_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_modified = webapi.construct_operation(operation_parts, pinfo, uid, should_have_bibref=False)
if operation_to_be_modified is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_modified = webapi.modify_operation_from_ticket(operation_to_be_modified, ticket)
if not operation_is_modified:
# Operation couldn't be modified because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def remove_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_removed = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_removed is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_removed = webapi.remove_operation_from_ticket(operation_to_be_removed, ticket)
if not operation_is_removed:
# Operation couldn't be removed because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def commit(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
additional_info = {'first_name': json_data.get('first_name',"Default"),
'last_name': json_data.get('last_name',"Default"),
'email': json_data.get('email',"Default"),
'comments': json_data['comments']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
uid = getUid(req)
user_is_guest = isGuestUser(uid)
if not user_is_guest:
try:
additional_info['first_name'] = session['user_info']['external_firstname']
additional_info['last_name'] = session['user_info']['external_familyname']
additional_info['email'] = session['user_info']['email']
except KeyError:
additional_info['first_name'] = additional_info['last_name'] = additional_info['email'] = str(uid)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a guest is claiming we should not commit if he
# doesn't provide us his full personal information
strict_check = user_is_guest
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=strict_check)
if userinfo is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.commit_operations_from_ticket(ticket, userinfo, uid, ulevel)
session.dirty = True
def abort(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a user is claiming we should completely delete his ticket if he
# aborts the claiming procedure
delete_ticket = (on_ticket == 'user')
webapi.abort_ticket(ticket, delete_ticket=delete_ticket)
session.dirty = True
def _get_according_ticket(self, on_ticket, pinfo):
ticket = None
if on_ticket == 'user':
ticket = pinfo['ticket']
elif on_ticket == 'autoclaim':
ticket = pinfo['autoclaim']['ticket']
return ticket
def _fail(self, req, code):
req.status = code
return
class WebAuthorSearch(WebInterfaceDirectory):
"""
Provides an interface to profile search using AJAX queries.
"""
_exports = ['list',
'details']
# This class requires JSON libraries
assert CFG_JSON_AVAILABLE, "[WebAuthorSearch] JSON must be enabled."
class QueryPerson(WebInterfaceDirectory):
_exports = ['']
MIN_QUERY_LENGTH = 2
QUERY_REGEX = re.compile(r"[\w\s\.\-,@]+$", re.UNICODE)
def __init__(self, query=None):
self.query = query
def _lookup(self, component, path):
if component not in self._exports:
return WebAuthorSearch.QueryPerson(component), path
def __call__(self, req, form):
if self.query is None or len(self.query) < self.MIN_QUERY_LENGTH:
req.status = apache.HTTP_BAD_REQUEST
return "Query too short"
if not self.QUERY_REGEX.match(self.query):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
pid_results = [{"pid": pid[0]} for pid in webapi.search_person_ids_by_name(self.query)]
req.content_type = 'application/json'
return json.dumps(pid_results)
# Request for index handled by __call__
index = __call__
def _JSON_received(self, form):
try:
return "jsondata" in form
except TypeError:
return False
def _extract_JSON(self, form):
try:
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
return json_data
except ValueError:
return None
def _get_pid_details(self, pid):
details = webapi.get_person_info_by_pid(pid)
details.update({
"names": [{"name": x, "paperCount": y} for x, y in webapi.get_person_names_from_id(pid)],
"externalIds": [{x: y} for x, y in webapi.get_external_ids_from_person_id(pid).items()]
})
details['cname'] = details.pop("canonical_name", None)
return details
def details(self, req, form):
if self._JSON_received(form):
try:
json_data = self._extract_JSON(form)
pids = json_data['pids']
req.content_type = 'application/json'
details = [self._get_pid_details(pid) for pid in pids]
return json.dumps(details)
except (TypeError, KeyError):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
else:
req.status = apache.HTTP_BAD_REQUEST
return "Incorrect query format."
list = QueryPerson()
class WebInterfaceAuthor(WebInterfaceDirectory):
'''
Handles /author/* pages.
Supplies the methods:
/author/choose_profile
/author/claim/
/author/help
/author/manage_profile
/author/merge_profiles
/author/profile/
/author/search
/author/ticket/
'''
_exports = ['',
'choose_profile',
'claim',
'help',
'manage_profile',
'merge_profiles',
'profile',
'search',
'search_ajax',
'ticket']
from invenio.webauthorprofile_webinterface import WebAuthorPages
claim = WebInterfaceBibAuthorIDClaimPages()
profile = WebAuthorPages()
choose_profile = claim.choose_profile
help = claim.help
manage_profile = WebInterfaceBibAuthorIDManageProfilePages()
merge_profiles = claim.merge_profiles
search = claim.search
search_ajax = WebAuthorSearch()
ticket = WebInterfaceAuthorTicketHandling()
def _lookup(self, component, path):
if component not in self._exports:
return WebInterfaceAuthor(component), path
def __init__(self, component=None):
self.path = component
def __call__(self, req, form):
if self.path is None or len(self.path) < 1:
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
# Check if canonical id: e.g. "J.R.Ellis.1"
pid = get_person_id_from_canonical_id(self.path)
if pid >= 0:
url = "%s/author/profile/%s" % (CFG_BASE_URL, get_person_redirect_link(pid))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
else:
try:
pid = int(self.path)
except ValueError:
redirect_to_url(req, "%s/author/search?q=%s" % (CFG_BASE_URL, self.path))
return
else:
if author_has_papers(pid):
cid = get_person_redirect_link(pid)
if is_valid_canonical_id(cid):
redirect_id = cid
else:
redirect_id = pid
url = "%s/author/profile/%s" % (CFG_BASE_URL, redirect_id)
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
return
index = __call__
class WebInterfacePerson(WebInterfaceDirectory):
'''
Handles /person/* pages.
Supplies the methods:
/person/welcome
'''
_exports = ['welcome','update', 'you']
def welcome(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def you(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def update(self, req, form):
"""
Generate hepnames update form
"""
argd = wash_urlargd(form,
{'ln': (str, CFG_SITE_LANG),
'email': (str, ''),
'IRN': (str, ''),
})
# Retrieve info for HEP name based on email or IRN
recids = []
if argd['email']:
recids = perform_request_search(p="371__m:%s" % argd['email'], cc="HepNames")
elif argd['IRN']:
recids = perform_request_search(p="001:%s" % argd['IRN'], cc="HepNames")
else:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
if not recids:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
else:
hepname_bibrec = get_bibrecord(recids[0])
# Extract all info from recid that should be included in the form
full_name = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="a")
display_name = record_get_field_value(hepname_bibrec, tag="880", ind1="", ind2="", code="a")
email = record_get_field_value(hepname_bibrec, tag="371", ind1="", ind2="", code="m")
status = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="g")
keynumber = record_get_field_value(hepname_bibrec, tag="970", ind1="", ind2="", code="a")
try:
keynumber = keynumber.split('-')[1]
except IndexError:
pass
research_field_list = record_get_field_values(hepname_bibrec, tag="650", ind1="1", ind2="7", code="a")
institution_list = []
for instance in record_get_field_instances(hepname_bibrec, tag="371", ind1="", ind2=""):
if not instance or field_get_subfield_values(instance, "m"):
continue
institution_info = ["", "", "", "", ""]
if field_get_subfield_values(instance, "a"):
institution_info[0] = field_get_subfield_values(instance, "a")[0]
if field_get_subfield_values(instance, "r"):
institution_info[1] = field_get_subfield_values(instance, "r")[0]
if field_get_subfield_values(instance, "s"):
institution_info[2] = field_get_subfield_values(instance, "s")[0]
if field_get_subfield_values(instance, "t"):
institution_info[3] = field_get_subfield_values(instance, "t")[0]
if field_get_subfield_values(instance, "z"):
institution_info[4] = field_get_subfield_values(instance, "z")[0]
institution_list.append(institution_info)
phd_advisor_list = record_get_field_values(hepname_bibrec, tag="701", ind1="", ind2="", code="a")
experiment_list = record_get_field_values(hepname_bibrec, tag="693", ind1="", ind2="", code="e")
web_page = record_get_field_value(hepname_bibrec, tag="856", ind1="1", ind2="", code="u")
# Create form and pass as parameters all the content from the record
body = TEMPLATE.tmpl_update_hep_name(full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page)
title = "HEPNames"
return page(title=title,
metaheaderadd = TEMPLATE.tmpl_update_hep_name_headers(),
body=body,
req=req,
)
# pylint: enable=C0301
# pylint: enable=W0613
| kaplun/ops | modules/bibauthorid/lib/bibauthorid_webinterface.py | Python | gpl-2.0 | 148,959 |
from duinobot import *
import numpy as np
import time
import cPickle as pickle
class RobotC():
def __init__(self,nombre,ide,control):
self.nombre=nombre
self.id=ide
self.control=control
self.frente=0
self.posX=0
self.posY=0
self.angulo=0
self.shotPos=(0,0)
self.status="Muerto"
#if self.status == "Lock" or "Detenido" or "Muerto" or "Avansando":
# self.velocidad=(30,30)
self.comandos={"avansar":self.avansar,"girar_der":self.girar_der,"girar_isq":self.girar_isq,"disparar":self.disparar,"Destruido":self.morir}
self.list_com=[self.avansar,self.girar_der,self.girar_isq,self.disparar]
def avansar(self):
der,isq=30,30
if self.status == "Avansado":
der,isq = (30,30)
elif self.status == "Avansado y girando (der)":
der,isq = (30,40)
elif self.status == "Avansado y girando (isq)":
der,isq = (40,30)
self.control.motors(der,isq)
def girar_der(self):
der,isq=30,30
if self.status == "girando (der)":
der,isq = (-30,30)
if self.status == "Avansado y girando (der)":
der,isq = (30,40)
self.control.motors(der,isq)
def girar_isq(self):
der,isq=30,30
if self.status == "girando (isq)":
der,isq = (30,-30)
if self.status == "Avansado y girando (isq)":
der,isq = (40,30)
self.control.motors(der,isq)
def reversa(self):
der,isq=30,30
if self.status == "Reversa":
der,isq=-30,-30
self.control.motors(der,isq)
def disparar(self,Server):
difx=self.posX-self.shotPos[0]
dify=self.posY-self.shotPos[1]
if difx < 0: difx=-difx
if dify < 0: dify=-dify
diferencia=int(np.sqrt(difx**2+dify**2))
Tf=(diferencia/10)*0.1
#Tf=Ti+Tf
#print self.posX,self.shotPos[0]
#print "Diferencia:",diferencia,"Ti:",Ti,"Tf:",Tf
data=["Fuego",Tf,self.shotPos]
data=pickle.dumps(data)
longitud=len(data)
cantidad=len(str(longitud))
Server.sock.send(str(cantidad))
Server.sock.send(str(longitud))
Server.sock.send(data)
#self.status="Analisis"
def morir(self):
self.status = "Muerto"
self.control.stop()
def parar(self):
self.control.stop()
| Robots-Linti/guerra-de-robots | Cliente/Robot_cliente.py | Python | gpl-2.0 | 2,560 |
#
# Copyright IBM Corp. 2012
# Copyright 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from contextlib import contextmanager
from itertools import product
import re
import shutil
import tempfile
import xml.dom.minidom
import xml.etree.ElementTree as ET
import libvirt
from virt import vm
from virt import vmexitreason
from vdsm import constants
from vdsm import define
from testrunner import VdsmTestCase as TestCaseBase
from testrunner import permutations, expandPermutations, namedTemporaryDir
import caps
from vdsm import utils
from vdsm import libvirtconnection
from monkeypatch import MonkeyPatch, MonkeyPatchScope
from vmTestsData import CONF_TO_DOMXML_X86_64
from vmTestsData import CONF_TO_DOMXML_PPC64
class ConnectionMock:
def __init__(self, *args):
pass
def domainEventRegisterAny(self, *arg):
pass
def listAllNetworks(self, *args):
return []
class FakeDomain:
def __init__(self, xml='', virtError=libvirt.VIR_ERR_OK):
self._xml = xml
self.devXml = ''
self._virtError = virtError
def _failIfRequested(self):
if self._virtError != libvirt.VIR_ERR_OK:
err = libvirt.libvirtError(defmsg='')
err.err = [self._virtError]
raise err
def info(self):
raise libvirt.libvirtError(defmsg='')
def XMLDesc(self, unused):
return self._xml
def updateDeviceFlags(self, devXml, unused):
self.devXml = devXml
def vcpusFlags(self, flags):
return -1
def metadata(self, type, uri, flags):
self._failIfRequested()
return '<qos></qos>'
def schedulerParameters(self):
return {'vcpu_quota': vm._NO_CPU_QUOTA,
'vcpu_period': vm._NO_CPU_PERIOD}
class TestVm(TestCaseBase):
PCI_ADDR = \
'bus="0x00" domain="0x0000" function="0x0" slot="0x03" type="pci"'
PCI_ADDR_DICT = {'slot': '0x03', 'bus': '0x00', 'domain': '0x0000',
'function': '0x0', 'type': 'pci'}
GRAPHICS_XMLS = [
"""
<graphics autoport="yes" keymap="en-us" passwd="*****"
passwdValidTo="1970-01-01T00:00:01" port="-1" type="vnc">
<listen network="vdsm-vmDisplay" type="network"/>
</graphics>""",
"""
<graphics autoport="yes" listen="0" passwd="*****"
passwdValidTo="1970-01-01T00:00:01" port="-1"
tlsPort="-1" type="spice">
<channel mode="secure" name="main"/>
<channel mode="secure" name="inputs"/>
<channel mode="secure" name="cursor"/>
<channel mode="secure" name="playback"/>
<channel mode="secure" name="record"/>
<channel mode="secure" name="display"/>
</graphics>""",
"""
<graphics autoport="yes" listen="0" passwd="*****"
passwdValidTo="1970-01-01T00:00:01" port="-1"
tlsPort="-1" type="spice">
<channel mode="secure" name="main"/>
</graphics>""",
"""
<graphics autoport="yes" listen="0" passwd="*****"
passwdValidTo="1970-01-01T00:00:01" port="-1"
tlsPort="-1" type="spice">
<clipboard copypaste="no"/>
</graphics>"""]
def __init__(self, *args, **kwargs):
TestCaseBase.__init__(self, *args, **kwargs)
self.conf = {'vmName': 'testVm',
'vmId': '9ffe28b6-6134-4b1e-8804-1185f49c436f',
'smp': '8', 'maxVCpus': '160',
'memSize': '1024', 'memGuaranteedSize': '512'}
def assertXML(self, element, expectedXML, path=None):
if path is None:
converted = element.toprettyxml()
else:
elem = ET.fromstring(element.toprettyxml())
converted = re.sub(' />', '/>',
ET.tostring(elem.find("./%s" % path)))
self.assertEqual(re.sub('\n\s*', ' ', converted).strip(' '),
re.sub('\n\s*', ' ', expectedXML).strip(' '))
def assertXMLNone(self, element, path):
elem = ET.fromstring(element.toprettyxml())
converted = elem.find("./%s" % path)
self.assertEqual(converted, None)
def assertBuildCmdLine(self, confToDom):
oldVdsmRun = constants.P_VDSM_RUN
constants.P_VDSM_RUN = tempfile.mkdtemp()
try:
for conf, expectedXML in confToDom:
expectedXML = expectedXML % conf
testVm = vm.Vm(self, conf)
output = testVm._buildCmdLine()
self.assertEqual(re.sub('\n\s*', ' ', output.strip(' ')),
re.sub('\n\s*', ' ', expectedXML.strip(' ')))
finally:
shutil.rmtree(constants.P_VDSM_RUN)
constants.P_VDSM_RUN = oldVdsmRun
def testDomXML(self):
expectedXML = """
<domain type="kvm">
<name>testVm</name>
<uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid>
<memory>1048576</memory>
<currentMemory>1048576</currentMemory>
<vcpu current="8">160</vcpu>
<memtune>
<min_guarantee>524288</min_guarantee>
</memtune>
<devices/>
</domain>"""
domxml = vm._DomXML(self.conf, self.log,
caps.Architecture.X86_64)
self.assertXML(domxml.dom, expectedXML)
def testOSXMLBootMenu(self):
vmConfs = (
# trivial cases first
{},
{'bootMenuEnable': 'true'},
{'bootMenuEnable': 'false'},
{'bootMenuEnable': True},
{'bootMenuEnable': False},
# next with more fields
{'bootMenuEnable': True,
'kernelArgs': 'console=ttyS0 1'},
{'bootMenuEnable': False,
'kernelArgs': 'console=ttyS0 1'})
expectedXMLs = ("""
<os>
<type arch="x86_64" machine="pc">hvm</type>
<smbios mode="sysinfo"/>
</os>""", """
<os>
<type arch="x86_64" machine="pc">hvm</type>
<smbios mode="sysinfo"/>
<bootmenu enable="yes"/>
</os>""", """
<os>
<type arch="x86_64" machine="pc">hvm</type>
<smbios mode="sysinfo"/>
</os>""", """
<os>
<type arch="x86_64" machine="pc">hvm</type>
<smbios mode="sysinfo"/>
<bootmenu enable="yes"/>
</os>""", """
<os>
<type arch="x86_64" machine="pc">hvm</type>
<smbios mode="sysinfo"/>
</os>""", """
<os>
<type arch="x86_64" machine="pc">hvm</type>
<cmdline>console=ttyS0 1</cmdline>
<smbios mode="sysinfo"/>
<bootmenu enable="yes"/>
</os>""", """
<os>
<type arch="x86_64" machine="pc">hvm</type>
<cmdline>console=ttyS0 1</cmdline>
<smbios mode="sysinfo"/>
</os>""")
for conf, xmlout in zip(vmConfs, expectedXMLs):
conf.update(self.conf)
domxml = vm._DomXML(conf, self.log,
caps.Architecture.X86_64)
domxml.appendOs()
self.assertXML(domxml.dom, xmlout, 'os')
def testOSXMLX86_64(self):
expectedXMLs = ["""
<os>
<type arch="x86_64" machine="pc">hvm</type>
<initrd>/tmp/initrd-2.6.18.img</initrd>
<kernel>/tmp/vmlinuz-2.6.18</kernel>
<cmdline>console=ttyS0 1</cmdline>
<smbios mode="sysinfo"/>
</os>"""]
vmConfs = [{'kernel': '/tmp/vmlinuz-2.6.18', 'initrd':
'/tmp/initrd-2.6.18.img', 'kernelArgs': 'console=ttyS0 1'}]
OSXML = """
<os>
<type arch="x86_64" machine="pc">hvm</type>
<boot dev="%s"/>
<smbios mode="sysinfo"/>
</os>"""
qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'}
for k, v in qemu2libvirtBoot.iteritems():
vmConfs.append({'boot': k})
expectedXMLs.append(OSXML % v)
for vmConf, xml in zip(vmConfs, expectedXMLs):
vmConf.update(self.conf)
domxml = vm._DomXML(vmConf, self.log,
caps.Architecture.X86_64)
domxml.appendOs()
self.assertXML(domxml.dom, xml, 'os')
def testOSPPCXML(self):
expectedXMLs = ["""
<os>
<type arch="ppc64" machine="pseries">hvm</type>
<initrd>/tmp/initrd-2.6.18.img</initrd>
<kernel>/tmp/vmlinuz-2.6.18</kernel>
<cmdline>console=ttyS0 1</cmdline>
</os>"""]
vmConfs = [{'kernel': '/tmp/vmlinuz-2.6.18', 'initrd':
'/tmp/initrd-2.6.18.img', 'kernelArgs': 'console=ttyS0 1'}]
OSXML = """
<os>
<type arch="ppc64" machine="pseries">hvm</type>
<boot dev="%s"/>
</os>"""
qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'}
for k, v in qemu2libvirtBoot.iteritems():
vmConfs.append({'boot': k})
expectedXMLs.append(OSXML % v)
for vmConf, xml in zip(vmConfs, expectedXMLs):
vmConf.update(self.conf)
domxml = vm._DomXML(vmConf, self.log,
caps.Architecture.PPC64)
domxml.appendOs()
self.assertXML(domxml.dom, xml, 'os')
def testSmartcardXML(self):
smartcardXML = '<smartcard mode="passthrough" type="spicevmc"/>'
dev = {'device': 'smartcard',
'specParams': {'mode': 'passthrough', 'type': 'spicevmc'}}
smartcard = vm.SmartCardDevice(self.conf, self.log, **dev)
self.assertXML(smartcard.getXML(), smartcardXML)
def testTpmXML(self):
tpmXML = """
<tpm model="tpm-tis">
<backend type="passthrough">
<device path="/dev/tpm0"/>
</backend>
</tpm>
"""
dev = {'device': 'tpm',
'specParams': {'mode': 'passthrough',
'path': '/dev/tpm0', 'model': 'tpm-tis'}}
tpm = vm.TpmDevice(self.conf, self.log, **dev)
self.assertXML(tpm.getXML(), tpmXML)
def testFeaturesXML(self):
featuresXML = """
<features>
<acpi/>
</features>"""
domxml = vm._DomXML(self.conf, self.log,
caps.Architecture.X86_64)
domxml.appendFeatures()
self.assertXML(domxml.dom, featuresXML, 'features')
def testFeaturesHyperVXML(self):
featuresXML = """
<features>
<acpi/>
<hyperv>
<relaxed state="on"/>
</hyperv>
</features>"""
conf = {'hypervEnable': 'true'}
conf.update(self.conf)
domxml = vm._DomXML(conf, self.log,
caps.Architecture.X86_64)
domxml.appendFeatures()
self.assertXML(domxml.dom, featuresXML, 'features')
def testSysinfoXML(self):
sysinfoXML = """
<sysinfo type="smbios">
<system>
<entry name="manufacturer">%s</entry>
<entry name="product">%s</entry>
<entry name="version">%s</entry>
<entry name="serial">%s</entry>
<entry name="uuid">%s</entry>
</system>
</sysinfo>"""
product = 'oVirt Node'
version = '17-1'
serial = 'A5955881-519B-11CB-8352-E78A528C28D8_00:21:cc:68:d7:38'
sysinfoXML = sysinfoXML % (constants.SMBIOS_MANUFACTURER,
product, version, serial, self.conf['vmId'])
domxml = vm._DomXML(self.conf, self.log,
caps.Architecture.X86_64)
domxml.appendSysinfo(product, version, serial)
self.assertXML(domxml.dom, sysinfoXML, 'sysinfo')
def testConsoleXML(self):
consoleXML = """
<console type="pty">
<target port="0" type="virtio"/>
</console>"""
dev = {'device': 'console'}
console = vm.ConsoleDevice(self.conf, self.log, **dev)
self.assertXML(console.getXML(), consoleXML)
def testClockXML(self):
clockXML = """
<clock adjustment="-3600" offset="variable">
<timer name="rtc" tickpolicy="catchup"/>
<timer name="pit" tickpolicy="delay"/>
<timer name="hpet" present="no"/>
</clock>"""
self.conf['timeOffset'] = '-3600'
domxml = vm._DomXML(self.conf, self.log,
caps.Architecture.X86_64)
domxml.appendClock()
self.assertXML(domxml.dom, clockXML, 'clock')
def testHyperVClockXML(self):
clockXML = """
<clock adjustment="-3600" offset="variable">
<timer name="rtc" tickpolicy="catchup" track="guest"/>
<timer name="pit" tickpolicy="delay"/>
<timer name="hpet" present="no"/>
</clock>"""
conf = {'timeOffset': '-3600', 'hypervEnable': 'true'}
conf.update(self.conf)
domxml = vm._DomXML(conf, self.log,
caps.Architecture.X86_64)
domxml.appendClock()
self.assertXML(domxml.dom, clockXML, 'clock')
def testCpuXML(self):
cpuXML = """
<cpu match="exact">
<model>Opteron_G4</model>
<feature name="sse4.1" policy="require"/>
<feature name="sse4.2" policy="require"/>
<feature name="svm" policy="disable"/>
<topology cores="2" sockets="40" threads="2"/>
<numa>
<cell cpus="0-1" memory="5242880"/>
<cell cpus="2,3" memory="5242880"/>
</numa>
</cpu> """
cputuneXML = """
<cputune>
<vcpupin cpuset="2-3" vcpu="1"/>
<vcpupin cpuset="0-1" vcpu="0"/>
</cputune> """
numatuneXML = """
<numatune>
<memory mode="strict" nodeset="0-1"/>
</numatune> """
vmConf = {'cpuType': "Opteron_G4,+sse4_1,+sse4_2,-svm",
'smpCoresPerSocket': 2, 'smpThreadsPerCore': 2,
'cpuPinning': {'0': '0-1', '1': '2-3'},
'numaTune': {'mode': 'strict', 'nodeset': '0-1'},
'guestNumaNodes': [{'cpus': '0-1', 'memory': '5120',
'nodeIndex': 0},
{'cpus': '2,3', 'memory': '5120',
'nodeIndex': 1}]}
vmConf.update(self.conf)
domxml = vm._DomXML(vmConf, self.log,
caps.Architecture.X86_64)
domxml.appendCpu()
self.assertXML(domxml.dom, cpuXML, 'cpu')
self.assertXML(domxml.dom, cputuneXML, 'cputune')
domxml.appendNumaTune()
self.assertXML(domxml.dom, numatuneXML, 'numatune')
def testChannelXML(self):
channelXML = """
<channel type="unix">
<target name="%s" type="virtio"/>
<source mode="bind" path="%s"/>
</channel>"""
path = '/tmp/channel-socket'
name = 'org.linux-kvm.port.0'
channelXML = channelXML % (name, path)
domxml = vm._DomXML(self.conf, self.log,
caps.Architecture.X86_64)
domxml._appendAgentDevice(path, name)
self.assertXML(domxml.dom, channelXML, 'devices/channel')
def testInputXMLX86_64(self):
expectedXMLs = [
"""<input bus="ps2" type="mouse"/>""",
"""<input bus="usb" type="tablet"/>"""]
vmConfs = [{}, {'tabletEnable': 'true'}]
for vmConf, xml in zip(vmConfs, expectedXMLs):
vmConf.update(self.conf)
domxml = vm._DomXML(vmConf, self.log,
caps.Architecture.X86_64)
domxml.appendInput()
self.assertXML(domxml.dom, xml, 'devices/input')
def testInputXMLPPC64(self):
expectedXMLs = [
"""<input bus="usb" type="mouse"/>""",
"""<input bus="usb" type="tablet"/>"""]
vmConfs = [{}, {'tabletEnable': 'true'}]
for vmConf, xml in zip(vmConfs, expectedXMLs):
vmConf.update(self.conf)
domxml = vm._DomXML(vmConf, self.log,
caps.Architecture.PPC64)
domxml.appendInput()
self.assertXML(domxml.dom, xml, 'devices/input')
def testLegacyGraphicsXML(self):
vmConfs = [
{'display': 'vnc', 'displayPort': '-1', 'displayNetwork':
'vmDisplay', 'keyboardLayout': 'en-us'},
{'display': 'qxl', 'displayPort': '-1', 'displaySecurePort': '-1',
'spiceSecureChannels':
"smain,sinputs,scursor,splayback,srecord,sdisplay"},
{'display': 'qxl', 'displayPort': '-1', 'displaySecurePort': '-1',
'spiceSecureChannels': "smain"},
{'display': 'qxl', 'displayPort': '-1', 'displaySecurePort': '-1',
'copyPasteEnable': 'false'}]
for vmConf, xml in zip(vmConfs, self.GRAPHICS_XMLS):
self._verifyGraphicsXML(vmConf, xml, isLegacy=True)
def testGraphicsDeviceXML(self):
vmConfs = [
{'devices': [{
'type': 'graphics', 'device': 'vnc', 'port': '-1',
'specParams': {
'displayNetwork': 'vmDisplay',
'keyMap': 'en-us'}}]},
{'devices': [{
'type': 'graphics', 'device': 'spice', 'port': '-1',
'tlsPort': '-1', 'specParams': {
'spiceSecureChannels':
'smain,sinputs,scursor,splayback,srecord,sdisplay'}}]},
{'devices': [{
'type': 'graphics', 'device': 'spice', 'port': '-1',
'tlsPort': '-1', 'specParams': {
'spiceSecureChannels': 'smain'}}]},
{'devices': [{
'type': 'graphics', 'device': 'spice', 'port': '-1',
'tlsPort': '-1', 'specParams': {
'copyPasteEnable': 'false'}}]}]
for vmConf, xml in zip(vmConfs, self.GRAPHICS_XMLS):
self._verifyGraphicsXML(vmConf, xml, isLegacy=False)
def _verifyGraphicsXML(self, vmConf, xml, isLegacy):
spiceChannelXML = """
<channel type="spicevmc">
<target name="com.redhat.spice.0" type="virtio"/>
</channel>"""
vmConf.update(self.conf)
with FakeVM(vmConf) as fake:
dev = (fake.getConfGraphics() if isLegacy
else vmConf['devices'])[0]
graph = vm.GraphicsDevice(vmConf, self.log, **dev)
self.assertXML(graph.getXML(), xml)
if graph.device == 'spice':
self.assertXML(graph.getSpiceVmcChannelsXML(),
spiceChannelXML)
def testBalloonXML(self):
balloonXML = '<memballoon model="virtio"/>'
dev = {'device': 'memballoon', 'type': 'balloon',
'specParams': {'model': 'virtio'}}
balloon = vm.BalloonDevice(self.conf, self.log, **dev)
self.assertXML(balloon.getXML(), balloonXML)
def testRngXML(self):
rngXML = """
<rng model="virtio">
<rate bytes="1234" period="2000"/>
<backend model="random">/dev/random</backend>
</rng>"""
dev = {'type': 'rng', 'model': 'virtio', 'specParams':
{'period': '2000', 'bytes': '1234', 'source': 'random'}}
rng = vm.RngDevice(self.conf, self.log, **dev)
self.assertXML(rng.getXML(), rngXML)
def testWatchdogXML(self):
watchdogXML = '<watchdog action="none" model="i6300esb"/>'
dev = {'device': 'watchdog', 'type': 'watchdog',
'specParams': {'model': 'i6300esb', 'action': 'none'}}
watchdog = vm.WatchdogDevice(self.conf, self.log, **dev)
self.assertXML(watchdog.getXML(), watchdogXML)
def testSoundXML(self):
soundXML = '<sound model="ac97"/>'
dev = {'device': 'ac97'}
sound = vm.SoundDevice(self.conf, self.log, **dev)
self.assertXML(sound.getXML(), soundXML)
def testVideoXML(self):
videoXML = """
<video>
<model heads="2" type="vga" vram="32768"/>
</video>"""
dev = {'device': 'vga', 'specParams': {'vram': '32768',
'heads': '2'}}
video = vm.VideoDevice(self.conf, self.log, **dev)
self.assertXML(video.getXML(), videoXML)
def testInterfaceXML(self):
interfaceXML = """
<interface type="bridge"> <address %s/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge="ovirtmgmt"/>
<filterref filter="no-mac-spoofing"/>
<boot order="1"/>
<driver name="vhost" queues="7"/>
<tune>
<sndbuf>0</sndbuf>
</tune>
<bandwidth>
<inbound average="1000" burst="1024" peak="5000"/>
<outbound average="128" burst="256"/>
</bandwidth>
</interface>""" % self.PCI_ADDR
dev = {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F',
'network': 'ovirtmgmt', 'address': self.PCI_ADDR_DICT,
'device': 'bridge', 'type': 'interface',
'bootOrder': '1', 'filter': 'no-mac-spoofing',
'specParams': {'inbound': {'average': 1000, 'peak': 5000,
'burst': 1024},
'outbound': {'average': 128, 'burst': 256}},
'custom': {'queues': '7'}}
self.conf['custom'] = {'vhost': 'ovirtmgmt:true', 'sndbuf': '0'}
iface = vm.NetworkInterfaceDevice(self.conf, self.log, **dev)
self.assertXML(iface.getXML(), interfaceXML)
def testInterfaceXMLBandwidthUpdate(self):
originalBwidthXML = """
<bandwidth>
<inbound average="1000" burst="1024" peak="5000"/>
<outbound average="128" burst="256"/>
</bandwidth>"""
NEW_OUT = {'outbound': {'average': 1042, 'burst': 128, 'peak': 500}}
updatedBwidthXML = """
<bandwidth>
<inbound average="1000" burst="1024" peak="5000"/>
<outbound average="%(average)s" burst="%(burst)s"
peak="%(peak)s"/>
</bandwidth>""" % NEW_OUT['outbound']
dev = {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F',
'network': 'ovirtmgmt', 'address': self.PCI_ADDR_DICT,
'device': 'bridge', 'type': 'interface',
'bootOrder': '1', 'filter': 'no-mac-spoofing',
'specParams': {'inbound': {'average': 1000, 'peak': 5000,
'burst': 1024},
'outbound': {'average': 128, 'burst': 256}},
'custom': {'queues': '7'}}
self.conf['custom'] = {'vhost': 'ovirtmgmt:true', 'sndbuf': '0'}
iface = vm.NetworkInterfaceDevice(self.conf, self.log, **dev)
originalBandwidth = iface.getXML().getElementsByTagName('bandwidth')[0]
self.assertXML(originalBandwidth, originalBwidthXML)
self.assertXML(iface.paramsToBandwidthXML(NEW_OUT, originalBandwidth),
updatedBwidthXML)
def testControllerXML(self):
devConfs = [
{'device': 'ide', 'index': '0', 'address': self.PCI_ADDR_DICT},
{'device': 'scsi', 'index': '0', 'model': 'virtio-scsi',
'address': self.PCI_ADDR_DICT},
{'device': 'virtio-serial', 'address': self.PCI_ADDR_DICT},
{'device': 'usb', 'model': 'ich9-ehci1', 'index': '0',
'master': {'startport': '0'}, 'address': self.PCI_ADDR_DICT}]
expectedXMLs = [
"""
<controller index="0" type="ide">
<address %s/>
</controller>""",
"""
<controller index="0" model="virtio-scsi" type="scsi">
<address %s/>
</controller>""",
"""
<controller index="0" ports="16" type="virtio-serial">
<address %s/>
</controller>""",
"""
<controller index="0" model="ich9-ehci1" type="usb">
<master startport="0"/>
<address %s/>
</controller>"""]
for devConf, xml in zip(devConfs, expectedXMLs):
dev = vm.ControllerDevice(self.conf, self.log, **devConf)
self.assertXML(dev.getXML(), xml % self.PCI_ADDR)
def testRedirXML(self):
redirXML = """
<redirdev type="spicevmc">
<address %s/>
</redirdev>""" % self.PCI_ADDR
dev = {'device': 'spicevmc', 'address': self.PCI_ADDR_DICT}
redir = vm.RedirDevice(self.conf, self.log, **dev)
self.assertXML(redir.getXML(), redirXML)
def testDriveSharedStatus(self):
sharedConfigs = [
# Backward compatibility
{'shared': True}, {'shared': 'True'}, {'shared': 'true'},
{'shared': False}, {'shared': 'False'}, {'shared': 'false'},
# Missing shared definition
{},
# New extended values
{'shared': 'exclusive'}, {'shared': 'shared'}, {'shared': 'none'},
{'shared': 'transient'},
]
expectedStates = [
# Backward compatibility
'shared', 'shared', 'shared', 'none', 'none', 'none',
# Missing shared definition
'none',
# New extended values
'exclusive', 'shared', 'none', 'transient',
]
driveConfig = {'index': '0', 'iface': 'virtio', 'device': 'disk'}
for driveInput, driveOutput in zip(sharedConfigs, expectedStates):
driveInput.update(driveConfig)
drive = vm.Drive({}, self.log, **driveInput)
self.assertEqual(drive.extSharedState, driveOutput)
# Negative flow, unsupported value
driveInput.update({'shared': 'UNKNOWN-VALUE'})
with self.assertRaises(ValueError):
drive = vm.Drive({}, self.log, **driveInput)
def testDriveXML(self):
SERIAL = '54-a672-23e5b495a9ea'
devConfs = [
{'index': '2', 'propagateErrors': 'off', 'iface': 'ide',
'name': 'hdc', 'format': 'raw', 'device': 'cdrom',
'path': '/tmp/fedora.iso', 'type': 'disk', 'readonly': 'True',
'shared': 'none', 'serial': SERIAL},
{'index': '0', 'propagateErrors': 'on', 'iface': 'virtio',
'name': 'vda', 'format': 'cow', 'device': 'disk',
'path': '/tmp/disk1.img', 'type': 'disk', 'readonly': 'False',
'shared': 'shared', 'serial': SERIAL,
'specParams': {'ioTune': {'read_bytes_sec': 6120000,
'total_iops_sec': 800}}},
{'index': '0', 'propagateErrors': 'off', 'iface': 'virtio',
'name': 'vda', 'format': 'raw', 'device': 'disk',
'path': '/dev/mapper/lun1', 'type': 'disk', 'readonly': 'False',
'shared': 'none', 'serial': SERIAL},
{'index': '0', 'propagateErrors': 'off', 'iface': 'scsi',
'name': 'sda', 'format': 'raw', 'device': 'disk',
'path': '/tmp/disk1.img', 'type': 'disk', 'readonly': 'False',
'shared': 'exclusive', 'serial': SERIAL},
{'index': '0', 'propagateErrors': 'off', 'iface': 'scsi',
'name': 'sda', 'format': 'raw', 'device': 'lun',
'path': '/dev/mapper/lun1', 'type': 'disk', 'readonly': 'False',
'shared': 'none', 'serial': SERIAL, 'sgio': 'unfiltered'}]
expectedXMLs = [
"""
<disk device="cdrom" snapshot="no" type="file">
<source file="/tmp/fedora.iso" startupPolicy="optional"/>
<target bus="ide" dev="hdc"/>
<readonly/>
<serial>%s</serial>
</disk>""",
"""
<disk device="disk" snapshot="no" type="file">
<source file="/tmp/disk1.img"/>
<target bus="virtio" dev="vda"/>
<shareable/>
<serial>%s</serial>
<driver cache="writethrough" error_policy="enospace"
io="threads" name="qemu" type="qcow2"/>
<iotune>
<read_bytes_sec>6120000</read_bytes_sec>
<total_iops_sec>800</total_iops_sec>
</iotune>
</disk>""",
"""
<disk device="disk" snapshot="no" type="block">
<source dev="/dev/mapper/lun1"/>
<target bus="virtio" dev="vda"/>
<serial>%s</serial>
<driver cache="none" error_policy="stop"
io="native" name="qemu" type="raw"/>
</disk>""",
"""
<disk device="disk" snapshot="no" type="file">
<source file="/tmp/disk1.img"/>
<target bus="scsi" dev="sda"/>
<serial>%s</serial>
<driver cache="none" error_policy="stop"
io="threads" name="qemu" type="raw"/>
</disk>""",
"""
<disk device="lun" sgio="unfiltered" snapshot="no" type="block">
<source dev="/dev/mapper/lun1"/>
<target bus="scsi" dev="sda"/>
<serial>%s</serial>
<driver cache="none" error_policy="stop"
io="native" name="qemu" type="raw"/>
</disk>"""]
blockDevs = [False, False, True, False, True]
vmConfs = [{}, {'custom': {'viodiskcache': 'writethrough'}},
{}, {}, {}]
for (devConf, xml, blockDev, vmConf) in \
zip(devConfs, expectedXMLs, blockDevs, vmConfs):
drive = vm.Drive(vmConf, self.log, **devConf)
# Patch Drive.blockDev to skip the block device checking.
drive._blockDev = blockDev
self.assertXML(drive.getXML(), xml % SERIAL)
def testIoTuneException(self):
SERIAL = '54-a672-23e5b495a9ea'
basicConf = {'index': '0', 'propagateErrors': 'on', 'iface': 'virtio',
'name': 'vda', 'format': 'cow', 'device': 'disk',
'path': '/tmp/disk1.img', 'type': 'disk',
'readonly': 'False', 'shared': 'True', 'serial': SERIAL}
tuneConfs = [
{'read_iops_sec': 1000, 'total_iops_sec': 2000},
{'read_bytes_sec': -5},
{'aaa': 100},
{'read_iops_sec': 'aaa'}]
devConfs = [dict(specParams=dict(ioTune=tuneConf), **basicConf)
for tuneConf in tuneConfs]
expectedExceptMsgs = [
'A non-zero total value and non-zero read/write value for'
' iops_sec can not be set at the same time',
'parameter read_bytes_sec value should be equal or greater'
' than zero',
'parameter aaa name is invalid',
'an integer is required for ioTune parameter read_iops_sec']
vmConf = {'custom': {'viodiskcache': 'writethrough'}}
for (devConf, exceptionMsg) in \
zip(devConfs, expectedExceptMsgs):
drive = vm.Drive(vmConf, self.log, **devConf)
# Patch Drive.blockDev to skip the block device checking.
drive._blockDev = False
with self.assertRaises(Exception) as cm:
drive.getXML()
self.assertEquals(cm.exception.args[0], exceptionMsg)
@MonkeyPatch(caps, 'getTargetArch', lambda: caps.Architecture.X86_64)
@MonkeyPatch(caps, 'osversion', lambda: {
'release': '1', 'version': '18', 'name': 'Fedora'})
@MonkeyPatch(constants, 'SMBIOS_MANUFACTURER', 'oVirt')
@MonkeyPatch(constants, 'SMBIOS_OSNAME', 'oVirt Node')
@MonkeyPatch(libvirtconnection, 'get', ConnectionMock)
@MonkeyPatch(utils, 'getHostUUID',
lambda: "fc25cbbe-5520-4f83-b82e-1541914753d9")
def testBuildCmdLineX86_64(self):
self.assertBuildCmdLine(CONF_TO_DOMXML_X86_64)
@MonkeyPatch(caps, 'getTargetArch', lambda: caps.Architecture.PPC64)
@MonkeyPatch(caps, 'osversion', lambda: {
'release': '1', 'version': '18', 'name': 'Fedora'})
@MonkeyPatch(libvirtconnection, 'get', ConnectionMock)
@MonkeyPatch(utils, 'getHostUUID',
lambda: "fc25cbbe-5520-4f83-b82e-1541914753d9")
def testBuildCmdLinePPC64(self):
self.assertBuildCmdLine(CONF_TO_DOMXML_PPC64)
def testGetVmPolicySucceded(self):
with FakeVM() as fake:
fake._dom = FakeDomain()
self.assertTrue(isinstance(fake._getVmPolicy(),
xml.dom.minidom.Element))
def testGetVmPolicyEmptyOnNoMetadata(self):
with FakeVM() as fake:
fake._dom = FakeDomain(
virtError=libvirt.VIR_ERR_NO_DOMAIN_METADATA)
self.assertTrue(isinstance(fake._getVmPolicy(),
xml.dom.minidom.Element))
def testGetVmPolicyFailOnNoDomain(self):
with FakeVM() as fake:
fake._dom = FakeDomain(virtError=libvirt.VIR_ERR_NO_DOMAIN)
self.assertEqual(fake._getVmPolicy(), None)
class FakeGuestAgent(object):
def getGuestInfo(self):
return {
'username': 'Unknown',
'session': 'Unknown',
'memUsage': 0,
'appsList': [],
'guestIPs': '',
'guestFQDN': '',
'disksUsage': [],
'netIfaces': [],
'memoryStats': {},
'guestCPUCount': -1}
@contextmanager
def FakeVM(params=None, devices=None, runCpu=False):
with namedTemporaryDir() as tmpDir:
with MonkeyPatchScope([(constants, 'P_VDSM_RUN', tmpDir + '/'),
(libvirtconnection, 'get', ConnectionMock)]):
vmParams = {'vmId': 'TESTING'}
vmParams.update({} if params is None else params)
fake = vm.Vm(None, vmParams)
fake.guestAgent = FakeGuestAgent()
fake.conf['devices'] = [] if devices is None else devices
fake._guestCpuRunning = runCpu
yield fake
@expandPermutations
class TestVmOperations(TestCaseBase):
# just numbers, no particular meaning
UPDATE_OFFSETS = [-3200, 3502, -2700, 3601]
BASE_OFFSET = 42
GRAPHIC_DEVICES = [{'type': 'graphics', 'device': 'spice', 'port': '-1'},
{'type': 'graphics', 'device': 'vnc', 'port': '-1'}]
@MonkeyPatch(libvirtconnection, 'get', lambda x: ConnectionMock())
@permutations([[define.NORMAL], [define.ERROR]])
def testTimeOffsetNotPresentByDefault(self, exitCode):
with FakeVM() as fake:
fake.setDownStatus(exitCode, vmexitreason.GENERIC_ERROR)
self.assertFalse('timeOffset' in fake.getStats())
@MonkeyPatch(libvirtconnection, 'get', lambda x: ConnectionMock())
@permutations([[define.NORMAL], [define.ERROR]])
def testTimeOffsetRoundtrip(self, exitCode):
with FakeVM({'timeOffset': self.BASE_OFFSET}) as fake:
fake.setDownStatus(exitCode, vmexitreason.GENERIC_ERROR)
self.assertEqual(fake.getStats()['timeOffset'],
self.BASE_OFFSET)
@MonkeyPatch(libvirtconnection, 'get', lambda x: ConnectionMock())
@permutations([[define.NORMAL], [define.ERROR]])
def testTimeOffsetRoundtriupAcrossInstances(self, exitCode):
# bz956741
lastOffset = 0
for offset in self.UPDATE_OFFSETS:
with FakeVM({'timeOffset': lastOffset}) as fake:
fake._rtcUpdate(offset)
fake.setDownStatus(exitCode, vmexitreason.GENERIC_ERROR)
vmOffset = fake.getStats()['timeOffset']
self.assertEqual(vmOffset, str(lastOffset + offset))
# the field in getStats is str, not int
lastOffset = int(vmOffset)
@MonkeyPatch(libvirtconnection, 'get', lambda x: ConnectionMock())
@permutations([[define.NORMAL], [define.ERROR]])
def testTimeOffsetUpdateIfAbsent(self, exitCode):
# bz956741 (-like, simpler case)
with FakeVM() as fake:
for offset in self.UPDATE_OFFSETS:
fake._rtcUpdate(offset)
# beware of type change!
fake.setDownStatus(exitCode, vmexitreason.GENERIC_ERROR)
self.assertEqual(fake.getStats()['timeOffset'],
str(self.UPDATE_OFFSETS[-1]))
@MonkeyPatch(libvirtconnection, 'get', lambda x: ConnectionMock())
@permutations([[define.NORMAL], [define.ERROR]])
def testTimeOffsetUpdateIfPresent(self, exitCode):
with FakeVM({'timeOffset': self.BASE_OFFSET}) as fake:
for offset in self.UPDATE_OFFSETS:
fake._rtcUpdate(offset)
# beware of type change!
fake.setDownStatus(exitCode, vmexitreason.GENERIC_ERROR)
self.assertEqual(fake.getStats()['timeOffset'],
str(self.BASE_OFFSET + self.UPDATE_OFFSETS[-1]))
def testUpdateSingleDeviceGraphics(self):
devXmls = (
'<graphics connected="disconnect" passwd="***"'
' port="5900" type="spice"/>',
'<graphics passwd="***" port="5900" type="vnc"/>')
for device, devXml in zip(self.GRAPHIC_DEVICES, devXmls):
domXml = '''
<devices>
<graphics type="%s" port="5900" />
</devices>''' % device['device']
self._verifyDeviceUpdate(device, device, domXml, devXml)
def testUpdateMultipleDeviceGraphics(self):
devXmls = (
'<graphics connected="disconnect" passwd="***"'
' port="5900" type="spice"/>',
'<graphics passwd="***" port="5901" type="vnc"/>')
domXml = '''
<devices>
<graphics type="spice" port="5900" />
<graphics type="vnc" port="5901" />
</devices>'''
for device, devXml in zip(self.GRAPHIC_DEVICES, devXmls):
self._verifyDeviceUpdate(
device, self.GRAPHIC_DEVICES, domXml, devXml)
def _verifyDeviceUpdate(self, device, allDevices, domXml, devXml):
with FakeVM(devices=allDevices) as fake:
fake._dom = FakeDomain(domXml)
fake.updateDevice({
'deviceType': 'graphics',
'graphicsType': device['device'],
'password': '***',
'ttl': 0,
'existingConnAction': 'disconnect'})
self.assertEquals(fake._dom.devXml, devXml)
VM_EXITS = tuple(product((define.NORMAL, define.ERROR),
vmexitreason.exitReasons.keys()))
@expandPermutations
class TestVmExit(TestCaseBase):
@permutations(VM_EXITS)
def testExitReason(self, exitCode, exitReason):
"""
test of:
exitReason round trip;
error message is constructed correctly automatically
"""
with FakeVM() as fake:
fake.setDownStatus(exitCode, exitReason)
stats = fake.getStats()
self.assertEqual(stats['exitReason'], exitReason)
self.assertEqual(stats['exitMessage'],
vmexitreason.exitReasons.get(exitReason))
@permutations(VM_EXITS)
def testExitReasonExplicitMessage(self, exitCode, exitReason):
"""
test of:
exitReason round trip;
error message can be overridden explicitely
"""
with FakeVM() as fake:
msg = "test custom error message"
fake.setDownStatus(exitCode, exitReason, msg)
stats = fake.getStats()
self.assertEqual(stats['exitReason'], exitReason)
self.assertEqual(stats['exitMessage'], msg)
class TestVmStatsThread(TestCaseBase):
VM_PARAMS = {'displayPort': -1, 'displaySecurePort': -1,
'display': 'qxl', 'displayIp': '127.0.0.1',
'vmType': 'kvm', 'memSize': 1024}
DEV_BALLOON = [{'type': 'balloon', 'specParams': {'model': 'virtio'}}]
def testGetNicStats(self):
GBPS = 10 ** 9 / 8
MAC = '52:54:00:59:F5:3F'
with FakeVM() as fake:
mock_stats_thread = vm.VmStatsThread(fake)
res = mock_stats_thread._getNicStats(
name='vnettest', model='virtio', mac=MAC,
start_sample=(2 ** 64 - 15 * GBPS, 1, 2, 3, 0, 4, 5, 6),
end_sample=(0, 7, 8, 9, 5 * GBPS, 10, 11, 12),
interval=15.0)
self.assertEqual(res, {
'rxErrors': '8', 'rxDropped': '9',
'txErrors': '11', 'txDropped': '12',
'macAddr': MAC, 'name': 'vnettest',
'speed': '1000', 'state': 'unknown',
'rxRate': '100.0', 'txRate': '33.3'})
def testGetStatsNoDom(self):
# bz1073478 - main case
with FakeVM(self.VM_PARAMS, self.DEV_BALLOON) as fake:
self.assertEqual(fake._dom, None)
mock_stats_thread = vm.VmStatsThread(fake)
res = {}
mock_stats_thread._getBalloonStats(res)
self.assertIn('balloonInfo', res)
self.assertIn('balloon_cur', res['balloonInfo'])
def testGetStatsDomInfoFail(self):
# bz1073478 - extra case
with FakeVM(self.VM_PARAMS, self.DEV_BALLOON) as fake:
fake._dom = FakeDomain()
mock_stats_thread = vm.VmStatsThread(fake)
res = {}
mock_stats_thread._getBalloonStats(res)
self.assertIn('balloonInfo', res)
self.assertIn('balloon_cur', res['balloonInfo'])
def testMultipleGraphicDeviceStats(self):
devices = [{'type': 'graphics', 'device': 'spice', 'port': '-1'},
{'type': 'graphics', 'device': 'vnc', 'port': '-1'}]
with FakeVM(self.VM_PARAMS, devices) as fake:
fake._updateDevices(fake.buildConfDevices())
res = fake.getStats()
self.assertIn('displayPort', res)
self.assertEqual(res['displayType'],
'qxl' if devices[0]['device'] == 'spice' else
'vnc')
for statsDev, confDev in zip(res['displayInfo'], devices):
self.assertIn(statsDev['type'], confDev['device'])
self.assertIn('port', statsDev)
class TestLibVirtCallbacks(TestCaseBase):
FAKE_ERROR = 'EFAKERROR'
def test_onIOErrorPause(self):
with FakeVM(runCpu=True) as fake:
self.assertTrue(fake._guestCpuRunning)
fake._onIOError('fakedev', self.FAKE_ERROR,
libvirt.VIR_DOMAIN_EVENT_IO_ERROR_PAUSE)
self.assertFalse(fake._guestCpuRunning)
self.assertEqual(fake.conf.get('pauseCode'), self.FAKE_ERROR)
def test_onIOErrorReport(self):
with FakeVM(runCpu=True) as fake:
self.assertTrue(fake._guestCpuRunning)
fake._onIOError('fakedev', self.FAKE_ERROR,
libvirt.VIR_DOMAIN_EVENT_IO_ERROR_REPORT)
self.assertTrue(fake._guestCpuRunning)
self.assertNotEquals(fake.conf.get('pauseCode'), self.FAKE_ERROR)
def test_onIOErrorNotSupported(self):
"""action not explicitely handled, must be skipped"""
with FakeVM(runCpu=True) as fake:
self.assertTrue(fake._guestCpuRunning)
fake._onIOError('fakedev', self.FAKE_ERROR,
libvirt.VIR_DOMAIN_EVENT_IO_ERROR_NONE)
self.assertTrue(fake._guestCpuRunning)
self.assertNotIn('pauseCode', fake.conf) # no error recorded
@expandPermutations
class TestVmDevices(TestCaseBase):
def setUp(self):
self.conf = {
'vmName': 'testVm',
'vmId': '9ffe28b6-6134-4b1e-8804-1185f49c436f',
'smp': '8', 'maxVCpus': '160',
'memSize': '1024', 'memGuaranteedSize': '512'}
self.confDisplayVnc = (
{'display': 'vnc', 'displayNetwork': 'vmDisplay'},
{'display': 'vnc', 'displayPort': '-1', 'displayNetwork':
'vmDisplay', 'keyboardLayout': 'en-us'})
self.confDisplaySpice = (
{'display': 'qxl', 'displayNetwork': 'vmDisplay'},
{'display': 'qxl', 'displayPort': '-1',
'displaySecurePort': '-1'})
self.confDeviceGraphicsVnc = (
({'type': 'graphics', 'device': 'vnc'},),
({'type': 'graphics', 'device': 'vnc', 'port': '-1',
'specParams': {
'displayNetwork': 'vmDisplay',
'keyMap': 'en-us'}},))
self.confDeviceGraphicsSpice = (
({'type': 'graphics', 'device': 'spice'},),
({'type': 'graphics', 'device': 'spice', 'port': '-1',
'tlsPort': '-1', 'specParams': {
'spiceSecureChannels':
'smain,sinputs,scursor,splayback,srecord,sdisplay'}},))
self.confDisplay = self.confDisplayVnc + self.confDisplaySpice
self.confDeviceGraphics = (self.confDeviceGraphicsVnc +
self.confDeviceGraphicsSpice)
def testGraphicsDeviceLegacy(self):
for conf in self.confDisplay:
conf.update(self.conf)
with FakeVM(conf) as fake:
devs = fake.buildConfDevices()
self.assertTrue(devs['graphics'])
def testGraphicsDevice(self):
for dev in self.confDeviceGraphics:
with FakeVM(self.conf, dev) as fake:
devs = fake.buildConfDevices()
self.assertTrue(devs['graphics'])
def testGraphicsDeviceMixed(self):
"""
if proper Graphics Devices are supplied, display* params must be
ignored.
"""
for conf in self.confDisplay:
conf.update(self.conf)
for dev in self.confDeviceGraphics:
with FakeVM(self.conf, dev) as fake:
devs = fake.buildConfDevices()
self.assertEqual(len(devs['graphics']), 1)
self.assertEqual(devs['graphics'][0]['device'],
dev[0]['device'])
def testGraphicsDeviceSanityLegacy(self):
for conf in self.confDisplay:
conf.update(self.conf)
self.assertTrue(vm.GraphicsDevice.isSupportedDisplayType(conf))
def testGraphicsDeviceSanity(self):
for dev in self.confDeviceGraphics:
conf = {'display': 'qxl', 'devices': list(dev)}
conf.update(self.conf)
self.assertTrue(vm.GraphicsDevice.isSupportedDisplayType(conf))
def testGraphicDeviceUnsupported(self):
conf = {'display': 'rdp'}
conf.update(self.conf)
self.assertFalse(vm.GraphicsDevice.isSupportedDisplayType(conf))
def testHasSpiceLegacy(self):
for conf in self.confDisplaySpice:
conf.update(self.conf)
with FakeVM(conf) as fake:
self.assertTrue(fake.hasSpice)
for conf in self.confDisplayVnc:
conf.update(self.conf)
with FakeVM(conf) as fake:
self.assertFalse(fake.hasSpice)
def testHasSpice(self):
for dev in self.confDeviceGraphicsSpice:
with FakeVM(self.conf, dev) as fake:
self.assertTrue(fake.hasSpice)
for dev in self.confDeviceGraphicsVnc:
with FakeVM(self.conf, dev) as fake:
self.assertFalse(fake.hasSpice)
@permutations([['vnc', 'spice'], ['spice', 'vnc']])
def testGraphicsDeviceMultiple(self, primary, secondary):
devices = [{'type': 'graphics', 'device': primary},
{'type': 'graphics', 'device': secondary}]
with FakeVM(self.conf, devices) as fake:
devs = fake.buildConfDevices()
self.assertTrue(len(devs['graphics']) == 2)
@permutations([['vnc'], ['spice']])
def testGraphicsDeviceDuplicated(self, devType):
devices = [{'type': 'graphics', 'device': devType},
{'type': 'graphics', 'device': devType}]
with FakeVM(self.conf, devices) as fake:
self.assertRaises(ValueError, fake.buildConfDevices)
| futurice/vdsm | tests/vmTests.py | Python | gpl-2.0 | 50,090 |
#! /usr/bin/env python
'''
p_sig.py: Builtin constant signal
Copyright (c) 2010 Bill Gribble <[email protected]>
'''
from ..processor import Processor
from ..mfp_app import MFPApp
class Sig(Processor):
doc_tooltip_obj = "Emit a constant signal"
doc_tooltip_inlet = ["Value to emit (default: initarg 0)"]
doc_tooltip_outlet = ["Signal output"]
def __init__(self, init_type, init_args, patch, scope, name):
Processor.__init__(self, 1, 1, init_type, init_args, patch, scope, name)
initargs, kwargs = self.parse_args(init_args)
if len(initargs):
value = initargs[0]
else:
value = 0
self.dsp_outlets = [0]
self.dsp_init("sig~")
self.dsp_obj.setparam("value", value)
def trigger(self):
val = float(self.inlets[0])
self.dsp_obj.setparam("value", val)
def register():
MFPApp().register("sig~", Sig)
| bgribble/mfp | mfp/builtins/sig.py | Python | gpl-2.0 | 925 |
from Components.ActionMap import ActionMap
from Components.Sensors import sensors
from Components.Sources.Sensor import SensorSource
from Components.Sources.StaticText import StaticText
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry
from Screens.Screen import Screen
from Plugins.Plugin import PluginDescriptor
from Components.FanControl import fancontrol
class TempFanControl(Screen, ConfigListScreen):
skin = """
<screen position="center,center" size="570,420" title="Fan Control" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="10,50" size="550,120" scrollbarMode="showOnDemand" />
<widget source="SensorTempText0" render="Label" position="10,150" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp0" render="Label" position="100,150" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText1" render="Label" position="10,170" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp1" render="Label" position="100,170" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText2" render="Label" position="10,190" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp2" render="Label" position="100,190" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText3" render="Label" position="10,210" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp3" render="Label" position="100,210" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText4" render="Label" position="10,230" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp4" render="Label" position="100,230" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText5" render="Label" position="10,250" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp5" render="Label" position="100,250" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText6" render="Label" position="10,270" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp6" render="Label" position="100,270" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText7" render="Label" position="10,290" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp7" render="Label" position="100,290" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText0" render="Label" position="290,150" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan0" render="Label" position="380,150" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText1" render="Label" position="290,170" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan1" render="Label" position="380,170" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText2" render="Label" position="290,190" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan2" render="Label" position="380,190" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText3" render="Label" position="290,210" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan3" render="Label" position="380,210" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText4" render="Label" position="290,230" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan4" render="Label" position="380,230" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText5" render="Label" position="290,250" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan5" render="Label" position="380,250" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText6" render="Label" position="290,270" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan6" render="Label" position="380,270" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText7" render="Label" position="290,290" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan7" render="Label" position="380,290" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
</screen>"""
def __init__(self, session, args = None):
Screen.__init__(self, session)
Screen.setTitle(self, _("Fan Control"))
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
fanlist = sensors.getSensorsList(sensors.TYPE_FAN_RPM)
fancount = len(fanlist)
self["red"] = StaticText(_("Cancel"))
self["green"] = StaticText(_("OK"))
self["yellow"] = StaticText("")
self["blue"] = StaticText("")
for count in range(8):
if count < tempcount:
id = templist[count]
self["SensorTempText%d" % count] = StaticText(sensors.getSensorName(id))
self["SensorTemp%d" % count] = SensorSource(sensorid = id)
else:
self["SensorTempText%d" % count] = StaticText("")
self["SensorTemp%d" % count] = SensorSource()
if count < fancount:
id = fanlist[count]
self["SensorFanText%d" % count] = StaticText(sensors.getSensorName(id))
self["SensorFan%d" % count] = SensorSource(sensorid = id)
else:
self["SensorFanText%d" % count] = StaticText("")
self["SensorFan%d" % count] = SensorSource()
self.list = []
for count in range(fancontrol.getFanCount()):
# self.list.append(getConfigListEntry(_("Fan %d Voltage") % (count + 1), fancontrol.getConfig(count).vlt)) # [iq]
self.list.append(getConfigListEntry(_("Fan %d PWM") % (count + 1), fancontrol.getConfig(count).pwm))
# self.list.append(getConfigListEntry(_("Standby Fan %d Voltage") % (count + 1), fancontrol.getConfig(count).vlt_standby)) # [iq]
self.list.append(getConfigListEntry(_("Standby Fan %d PWM") % (count + 1), fancontrol.getConfig(count).pwm_standby))
ConfigListScreen.__init__(self, self.list, session = self.session)
#self["config"].list = self.list
#self["config"].setList(self.list)
self["config"].l.setSeperation(300)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "MenuActions"],
{
"ok": self.save,
"cancel": self.revert,
"red": self.revert,
"green": self.save,
"menu": self.closeRecursive,
}, -1)
def save(self):
for count in range(fancontrol.getFanCount()):
# fancontrol.getConfig(count).vlt.save() # [iq]
fancontrol.getConfig(count).pwm.save()
# fancontrol.getConfig(count).vlt_standby.save() # [iq]
fancontrol.getConfig(count).pwm_standby.save()
self.close()
def revert(self):
for count in range(fancontrol.getFanCount()):
# fancontrol.getConfig(count).vlt.load() # [iq]
fancontrol.getConfig(count).pwm.load()
# fancontrol.getConfig(count).vlt_standby.load() # [iq]
fancontrol.getConfig(count).pwm_standby.load()
self.close()
def main(session, **kwargs):
session.open(TempFanControl)
def startMenu(menuid):
if menuid != "system":
return []
return [(_("Fan control"), main, "tempfancontrol", 80)]
def Plugins(**kwargs):
from Tools.HardwareInfo import HardwareInfo
HAVE_NOT_FAN = { "mediabox", "tmnanose", "force2", "tmnanoeco", "tmnanocombo", "force2solid", "force2plus" }
if HardwareInfo().get_device_name() in HAVE_NOT_FAN:
return []
else:
return PluginDescriptor(name = "Fan control", description = _("Fan control"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = startMenu)
| martinxp/openpli-oe-core | meta-dags/recipes-bsp/enigma2/enigma2-plugin-systemplugins-tempfancontrol/plugin.py | Python | gpl-2.0 | 10,871 |
# Copyright (C) 2008 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""fast-import command classes.
These objects are used by the parser to represent the content of
a fast-import stream.
"""
from __future__ import division
import re
import stat
from .helpers import (
newobject as object,
utf8_bytes_string,
)
# There is a bug in git 1.5.4.3 and older by which unquoting a string consumes
# one extra character. Set this variable to True to work-around it. It only
# happens when renaming a file whose name contains spaces and/or quotes, and
# the symptom is:
# % git-fast-import
# fatal: Missing space after source: R "file 1.txt" file 2.txt
# http://git.kernel.org/?p=git/git.git;a=commit;h=c8744d6a8b27115503565041566d97c21e722584
GIT_FAST_IMPORT_NEEDS_EXTRA_SPACE_AFTER_QUOTE = False
# Lists of command names
COMMAND_NAMES = [b'blob', b'checkpoint', b'commit', b'feature', b'progress',
b'reset', b'tag']
FILE_COMMAND_NAMES = [b'filemodify', b'filedelete', b'filecopy', b'filerename',
b'filedeleteall']
# Feature names
MULTIPLE_AUTHORS_FEATURE = b'multiple-authors'
COMMIT_PROPERTIES_FEATURE = b'commit-properties'
EMPTY_DIRS_FEATURE = b'empty-directories'
FEATURE_NAMES = [
MULTIPLE_AUTHORS_FEATURE,
COMMIT_PROPERTIES_FEATURE,
EMPTY_DIRS_FEATURE,
]
class ImportCommand(object):
"""Base class for import commands."""
def __init__(self, name):
self.name = name
# List of field names not to display
self._binary = []
def __str__(self):
return repr(self)
def __repr__(self):
return bytes(self).decode('utf8')
def __bytes__(self):
raise NotImplementedError(
'An implementation of __bytes__ is required'
)
def dump_str(self, names=None, child_lists=None, verbose=False):
"""Dump fields as a string.
For debugging.
:param names: the list of fields to include or
None for all public fields
:param child_lists: dictionary of child command names to
fields for that child command to include
:param verbose: if True, prefix each line with the command class and
display fields as a dictionary; if False, dump just the field
values with tabs between them
"""
interesting = {}
if names is None:
fields = [
k for k in list(self.__dict__.keys())
if not k.startswith(b'_')
]
else:
fields = names
for field in fields:
value = self.__dict__.get(field)
if field in self._binary and value is not None:
value = b'(...)'
interesting[field] = value
if verbose:
return "%s: %s" % (self.__class__.__name__, interesting)
else:
return "\t".join([repr(interesting[k]) for k in fields])
class BlobCommand(ImportCommand):
def __init__(self, mark, data, lineno=0):
ImportCommand.__init__(self, b'blob')
self.mark = mark
self.data = data
self.lineno = lineno
# Provide a unique id in case the mark is missing
if mark is None:
self.id = b'@' + ("%d" % lineno).encode('utf-8')
else:
self.id = b':' + mark
self._binary = [b'data']
def __bytes__(self):
if self.mark is None:
mark_line = b''
else:
mark_line = b"\nmark :" + self.mark
return (b'blob' + mark_line + b'\n' +
('data %d\n' % len(self.data)).encode('utf-8') + self.data)
class CheckpointCommand(ImportCommand):
def __init__(self):
ImportCommand.__init__(self, b'checkpoint')
def __bytes__(self):
return b'checkpoint'
class CommitCommand(ImportCommand):
def __init__(self, ref, mark, author, committer, message, from_,
merges, file_iter, lineno=0, more_authors=None,
properties=None):
ImportCommand.__init__(self, b'commit')
self.ref = ref
self.mark = mark
self.author = author
self.committer = committer
self.message = message
self.from_ = from_
self.merges = merges
self.file_iter = file_iter
self.more_authors = more_authors
self.properties = properties
self.lineno = lineno
self._binary = [b'file_iter']
# Provide a unique id in case the mark is missing
if self.mark is None:
self.id = b'@' + ('%d' % lineno).encode('utf-8')
else:
if isinstance(self.mark, (int)):
self.id = b':' + str(self.mark).encode('utf-8')
else:
self.id = b':' + self.mark
def copy(self, **kwargs):
if not isinstance(self.file_iter, list):
self.file_iter = list(self.file_iter)
fields = dict(
(key, value)
for key, value in self.__dict__.items()
if key not in ('id', 'name')
if not key.startswith('_')
)
fields.update(kwargs)
return CommitCommand(**fields)
def __bytes__(self):
return self.to_string(include_file_contents=True)
def to_string(self, use_features=True, include_file_contents=False):
"""
@todo the name to_string is ambiguous since the method actually
returns bytes.
"""
if self.mark is None:
mark_line = b''
else:
if isinstance(self.mark, (int)):
mark_line = b'\nmark :' + str(self.mark).encode('utf-8')
else:
mark_line = b'\nmark :' + self.mark
if self.author is None:
author_section = b''
else:
author_section = b'\nauthor ' + format_who_when(self.author)
if use_features and self.more_authors:
for author in self.more_authors:
author_section += b'\nauthor ' + format_who_when(author)
committer = b'committer ' + format_who_when(self.committer)
if self.message is None:
msg_section = b''
else:
msg = self.message
msg_section = ('\ndata %d\n' % len(msg)).encode('ascii') + msg
if self.from_ is None:
from_line = b''
else:
from_line = b'\nfrom ' + self.from_
if self.merges is None:
merge_lines = b''
else:
merge_lines = b''.join(
[b'\nmerge ' + m for m in self.merges])
if use_features and self.properties:
property_lines = []
for name in sorted(self.properties):
value = self.properties[name]
property_lines.append(b'\n' + format_property(name, value))
properties_section = b''.join(property_lines)
else:
properties_section = b''
if self.file_iter is None:
filecommands = b''
else:
if include_file_contents:
filecommands = b''.join(
[b'\n' + bytes(c) for c in self.iter_files()])
else:
filecommands = b''.join(
[b'\n' + str(c) for c in self.iter_files()])
return b''.join([
b'commit ',
self.ref,
mark_line,
author_section + b'\n',
committer,
msg_section,
from_line,
merge_lines,
properties_section,
filecommands])
def dump_str(self, names=None, child_lists=None, verbose=False):
result = [ImportCommand.dump_str(self, names, verbose=verbose)]
for f in self.iter_files():
if child_lists is None:
continue
try:
child_names = child_lists[f.name]
except KeyError:
continue
result.append('\t%s' % f.dump_str(child_names, verbose=verbose))
return '\n'.join(result)
def iter_files(self):
"""Iterate over files."""
# file_iter may be a callable or an iterator
if callable(self.file_iter):
return self.file_iter()
return iter(self.file_iter)
class FeatureCommand(ImportCommand):
def __init__(self, feature_name, value=None, lineno=0):
ImportCommand.__init__(self, b'feature')
self.feature_name = feature_name
self.value = value
self.lineno = lineno
def __bytes__(self):
if self.value is None:
value_text = b''
else:
value_text = b'=' + self.value
return b'feature ' + self.feature_name + value_text
class ProgressCommand(ImportCommand):
def __init__(self, message):
ImportCommand.__init__(self, b'progress')
self.message = message
def __bytes__(self):
return b'progress ' + self.message
class ResetCommand(ImportCommand):
def __init__(self, ref, from_):
ImportCommand.__init__(self, b'reset')
self.ref = ref
self.from_ = from_
def __bytes__(self):
if self.from_ is None:
from_line = b''
else:
# According to git-fast-import(1), the extra LF is optional here;
# however, versions of git up to 1.5.4.3 had a bug by which the LF
# was needed. Always emit it, since it doesn't hurt and maintains
# compatibility with older versions.
# http://git.kernel.org/?p=git/git.git;a=commit;h=655e8515f279c01f525745d443f509f97cd805ab
from_line = b'\nfrom ' + self.from_ + b'\n'
return b'reset ' + self.ref + from_line
class TagCommand(ImportCommand):
def __init__(self, id, from_, tagger, message):
ImportCommand.__init__(self, b'tag')
self.id = id
self.from_ = from_
self.tagger = tagger
self.message = message
def __bytes__(self):
if self.from_ is None:
from_line = b''
else:
from_line = b'\nfrom ' + self.from_
if self.tagger is None:
tagger_line = b''
else:
tagger_line = b'\ntagger ' + format_who_when(self.tagger)
if self.message is None:
msg_section = b''
else:
msg = self.message
msg_section = ('\ndata %d\n' % len(msg)).encode('ascii') + msg
return b'tag ' + self.id + from_line + tagger_line + msg_section
class FileCommand(ImportCommand):
"""Base class for file commands."""
pass
class FileModifyCommand(FileCommand):
def __init__(self, path, mode, dataref, data):
# Either dataref or data should be null
FileCommand.__init__(self, b'filemodify')
self.path = check_path(path)
self.mode = mode
self.dataref = dataref
self.data = data
self._binary = [b'data']
def __bytes__(self):
return self.to_string(include_file_contents=True)
def __str__(self):
return self.to_string(include_file_contents=False)
def _format_mode(self, mode):
if mode in (0o755, 0o100755):
return b'755'
elif mode in (0o644, 0o100644):
return b'644'
elif mode == 0o40000:
return b'040000'
elif mode == 0o120000:
return b'120000'
elif mode == 0o160000:
return b'160000'
else:
raise AssertionError('Unknown mode %o' % mode)
def to_string(self, include_file_contents=False):
datastr = b''
if stat.S_ISDIR(self.mode):
dataref = b'-'
elif self.dataref is None:
dataref = b'inline'
if include_file_contents:
datastr = (
('\ndata %d\n' % len(self.data)).encode('ascii') +
self.data)
else:
dataref = self.dataref
path = format_path(self.path)
return b' '.join(
[b'M', self._format_mode(self.mode), dataref, path + datastr])
class FileDeleteCommand(FileCommand):
def __init__(self, path):
FileCommand.__init__(self, b'filedelete')
self.path = check_path(path)
def __bytes__(self):
return b' '.join([b'D', format_path(self.path)])
class FileCopyCommand(FileCommand):
def __init__(self, src_path, dest_path):
FileCommand.__init__(self, b'filecopy')
self.src_path = check_path(src_path)
self.dest_path = check_path(dest_path)
def __bytes__(self):
return b' '.join(
[b'C', format_path(self.src_path, quote_spaces=True),
format_path(self.dest_path)])
class FileRenameCommand(FileCommand):
def __init__(self, old_path, new_path):
FileCommand.__init__(self, b'filerename')
self.old_path = check_path(old_path)
self.new_path = check_path(new_path)
def __bytes__(self):
return b' '.join([
b'R',
format_path(self.old_path, quote_spaces=True),
format_path(self.new_path)]
)
class FileDeleteAllCommand(FileCommand):
def __init__(self):
FileCommand.__init__(self, b'filedeleteall')
def __bytes__(self):
return b'deleteall'
class NoteModifyCommand(FileCommand):
def __init__(self, from_, data):
super(NoteModifyCommand, self).__init__(b'notemodify')
self.from_ = from_
self.data = data
self._binary = ['data']
def __bytes__(self):
return (b'N inline :' + self.from_ +
('\ndata %d\n' % len(self.data)).encode('ascii') + self.data)
def check_path(path):
"""Check that a path is legal.
:return: the path if all is OK
:raise ValueError: if the path is illegal
"""
if path is None or path == b'' or path.startswith(b'/'):
raise ValueError("illegal path '%s'" % path)
if not isinstance(path, bytes):
raise TypeError("illegal type for path '%r'" % path)
return path
def format_path(p, quote_spaces=False):
"""Format a path in utf8, quoting it if necessary."""
if b'\n' in p:
p = re.sub(b'\n', b'\\n', p)
quote = True
else:
quote = p[0] == b'"' or (quote_spaces and b' ' in p)
if quote:
extra = GIT_FAST_IMPORT_NEEDS_EXTRA_SPACE_AFTER_QUOTE and b' ' or b''
p = b'"' + p + b'"' + extra
return p
def format_who_when(fields):
"""Format tuple of name,email,secs-since-epoch,utc-offset-secs as bytes."""
offset = fields[3]
if offset < 0:
offset_sign = b'-'
offset = abs(offset)
else:
offset_sign = b'+'
offset_hours = offset // 3600
offset_minutes = offset // 60 - offset_hours * 60
offset_str = (
offset_sign +
('%02d%02d' % (offset_hours, offset_minutes)).encode('ascii'))
name = fields[0]
if name == b'':
sep = b''
else:
sep = b' '
name = utf8_bytes_string(name)
email = fields[1]
email = utf8_bytes_string(email)
return b''.join(
(name, sep, b'<', email, b'> ',
("%d" % fields[2]).encode('ascii'), b' ', offset_str))
def format_property(name, value):
"""Format the name and value (both unicode) of a property as a string."""
result = b''
utf8_name = utf8_bytes_string(name)
result = b'property ' + utf8_name
if value is not None:
utf8_value = utf8_bytes_string(value)
result += (b' ' + ('%d' % len(utf8_value)).encode('ascii') +
b' ' + utf8_value)
return result
| jelmer/python-fastimport | fastimport/commands.py | Python | gpl-2.0 | 16,185 |
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# GUIDefines.py - common definitions of GUI-related constants for GUIScripts
#button flags
IE_GUI_BUTTON_NORMAL = 0x00000004 #default button, doesn't stick
IE_GUI_BUTTON_NO_IMAGE = 0x00000001
IE_GUI_BUTTON_PICTURE = 0x00000002
IE_GUI_BUTTON_SOUND = 0x00000004
IE_GUI_BUTTON_CAPS = 0x00000008 #capitalize all the text
IE_GUI_BUTTON_CHECKBOX = 0x00000010 #or radio button
IE_GUI_BUTTON_RADIOBUTTON= 0x00000020 #sticks in a state
IE_GUI_BUTTON_DEFAULT = 0x00000040 #enter key triggers it
IE_GUI_BUTTON_ANIMATED = 0x00000080 # the button is animated
#these bits are hardcoded in the .chu structure, don't move them
IE_GUI_BUTTON_ALIGN_LEFT = 0x00000100
IE_GUI_BUTTON_ALIGN_RIGHT= 0x00000200
IE_GUI_BUTTON_ALIGN_TOP = 0x00000400
IE_GUI_BUTTON_ALIGN_BOTTOM = 0x00000800
IE_GUI_BUTTON_ALIGN_ANCHOR = 0x00001000
IE_GUI_BUTTON_LOWERCASE = 0x00002000
IE_GUI_BUTTON_MULTILINE = 0x00004000 # don't set the SINGLE_LINE font rendering flag
#end of hardcoded section
IE_GUI_BUTTON_DRAGGABLE = 0x00008000
IE_GUI_BUTTON_NO_TEXT = 0x00010000 # don't draw button label
IE_GUI_BUTTON_PLAYRANDOM = 0x00020000 # the button animation is random
IE_GUI_BUTTON_PLAYONCE = 0x00040000 # the button animation won't restart
IE_GUI_BUTTON_PLAYALWAYS = 0x00080000 # animation will play when game is paused
IE_GUI_BUTTON_CENTER_PICTURES = 0x00100000 # center the button's PictureList
IE_GUI_BUTTON_BG1_PAPERDOLL = 0x00200000 # BG1-style paperdoll
IE_GUI_BUTTON_HORIZONTAL = 0x00400000 # horizontal clipping of overlay
IE_GUI_BUTTON_CANCEL = 0x00800000 # escape key triggers it
#scrollbar flags
IE_GUI_SCROLLBAR_DEFAULT = 0x00000040 # mousewheel triggers it (same value as default button)
#textarea flags
IE_GUI_TEXTAREA_AUTOSCROLL = 0x05000001
IE_GUI_TEXTAREA_HISTORY = 0x05000002
IE_GUI_TEXTAREA_EDITABLE = 0x05000004
#gui control types
IE_GUI_BUTTON = 0
IE_GUI_PROGRESS = 1
IE_GUI_SLIDER = 2
IE_GUI_EDIT = 3
IE_GUI_TEXTAREA = 5
IE_GUI_LABEL = 6
IE_GUI_SCROLLBAR = 7
IE_GUI_WORLDMAP = 8
IE_GUI_MAP = 9
#events
IE_GUI_BUTTON_ON_PRESS = 0x00000000
IE_GUI_MOUSE_OVER_BUTTON = 0x00000001
IE_GUI_MOUSE_ENTER_BUTTON = 0x00000002
IE_GUI_MOUSE_LEAVE_BUTTON = 0x00000003
IE_GUI_BUTTON_ON_SHIFT_PRESS= 0x00000004
IE_GUI_BUTTON_ON_RIGHT_PRESS= 0x00000005
IE_GUI_BUTTON_ON_DRAG_DROP = 0x00000006
IE_GUI_BUTTON_ON_DRAG_DROP_PORTRAIT = 0x00000007
IE_GUI_BUTTON_ON_DRAG = 0x00000008
IE_GUI_BUTTON_ON_DOUBLE_PRESS = 0x00000009
IE_GUI_PROGRESS_END_REACHED = 0x01000000
IE_GUI_SLIDER_ON_CHANGE = 0x02000000
IE_GUI_EDIT_ON_CHANGE = 0x03000000
IE_GUI_EDIT_ON_DONE = 0x03000001
IE_GUI_EDIT_ON_CANCEL = 0x03000002
IE_GUI_TEXTAREA_ON_CHANGE = 0x05000000
IE_GUI_TEXTAREA_ON_SELECT = 0x05000001
IE_GUI_LABEL_ON_PRESS = 0x06000000
IE_GUI_SCROLLBAR_ON_CHANGE = 0x07000000
IE_GUI_WORLDMAP_ON_PRESS = 0x08000000
IE_GUI_MOUSE_ENTER_WORLDMAP = 0x08000002
IE_GUI_MAP_ON_PRESS = 0x09000000
IE_GUI_MAP_ON_RIGHT_PRESS = 0x09000005
IE_GUI_MAP_ON_DOUBLE_PRESS = 0x09000008
#common states
IE_GUI_CONTROL_FOCUSED = 0x7f000080
#button states
IE_GUI_BUTTON_ENABLED = 0x00000000
IE_GUI_BUTTON_UNPRESSED = 0x00000000
IE_GUI_BUTTON_PRESSED = 0x00000001
IE_GUI_BUTTON_SELECTED = 0x00000002
IE_GUI_BUTTON_DISABLED = 0x00000003
# Like DISABLED, but processes MouseOver events and draws UNPRESSED bitmap
IE_GUI_BUTTON_LOCKED = 0x00000004
# Draws DISABLED bitmap, but it isn't disabled
IE_GUI_BUTTON_FAKEDISABLED = 0x00000005
# Draws PRESSED bitmap, but it isn't shifted
IE_GUI_BUTTON_FAKEPRESSED = 0x00000006
#edit field states
IE_GUI_EDIT_NUMBER = 0x030000001
#mapcontrol states (add 0x090000000 if used with SetControlStatus)
IE_GUI_MAP_NO_NOTES = 0
IE_GUI_MAP_VIEW_NOTES = 1
IE_GUI_MAP_SET_NOTE = 2
IE_GUI_MAP_REVEAL_MAP = 3
# !!! Keep these synchronized with WorldMapControl.h !!!
# WorldMap label colors
IE_GUI_WMAP_COLOR_BACKGROUND = 0
IE_GUI_WMAP_COLOR_NORMAL = 1
IE_GUI_WMAP_COLOR_SELECTED = 2
IE_GUI_WMAP_COLOR_NOTVISITED = 3
# !!! Keep these synchronized with Font.h !!!
IE_FONT_ALIGN_LEFT = 0x00
IE_FONT_ALIGN_CENTER = 0x01
IE_FONT_ALIGN_RIGHT = 0x02
IE_FONT_ALIGN_BOTTOM = 0x04
IE_FONT_ALIGN_TOP = 0x10 # Single-Line and Multi-Line Text
IE_FONT_ALIGN_MIDDLE = 0x20 #Only for single line Text
IE_FONT_SINGLE_LINE = 0x40
OP_SET = 0
OP_AND = 1
OP_OR = 2
OP_XOR = 3
OP_NAND = 4
# Window position anchors/alignments
# !!! Keep these synchronized with Window.h !!!
WINDOW_TOPLEFT = 0x00
WINDOW_CENTER = 0x01
WINDOW_ABSCENTER = 0x02
WINDOW_RELATIVE = 0x04
WINDOW_SCALE = 0x08
WINDOW_BOUNDED = 0x10
# GameScreen flags
GS_PARTYAI = 1
GS_SMALLDIALOG = 0
GS_MEDIUMDIALOG = 2
GS_LARGEDIALOG = 6
GS_DIALOGMASK = 6
GS_DIALOG = 8
GS_HIDEGUI = 16
GS_OPTIONPANE = 32
GS_PORTRAITPANE = 64
GS_MAPNOTE = 128
# GameControl screen flags
# !!! Keep these synchronized with GameControl.h !!!
SF_DISABLEMOUSE = 1
SF_CENTERONACTOR = 2
SF_ALWAYSCENTER = 4
SF_GUIENABLED = 8
SF_LOCKSCROLL = 16
# GameControltarget modes
# !!! Keep these synchronized with GameControl.h !!!
TARGET_MODE_NONE = 0
TARGET_MODE_TALK = 1
TARGET_MODE_ATTACK = 2
TARGET_MODE_CAST = 3
TARGET_MODE_DEFEND = 4
TARGET_MODE_PICK = 5
GA_SELECT = 16
GA_NO_DEAD = 32
GA_POINT = 64
GA_NO_HIDDEN = 128
GA_NO_ALLY = 256
GA_NO_ENEMY = 512
GA_NO_NEUTRAL = 1024
GA_NO_SELF = 2048
# Game features, for Interface::SetFeatures()
# Defined in globals.h
GF_ALL_STRINGS_TAGGED = 1
# Shadow color for ShowModal()
# !!! Keep these synchronized with Interface.h !!!
MODAL_SHADOW_NONE = 0
MODAL_SHADOW_GRAY = 1
MODAL_SHADOW_BLACK = 2
# Flags for SetVisible()
# !!! Keep these synchronized with Interface.h !!!
#WINDOW_INVALID = -1
WINDOW_INVISIBLE = 0
WINDOW_VISIBLE = 1
WINDOW_GRAYED = 2
WINDOW_FRONT = 3
# character resource directories
# !!! Keep these synchronized with Interface.h !!!
CHR_PORTRAITS = 0
CHR_SOUNDS = 1
CHR_EXPORTS = 2
CHR_SCRIPTS = 3
# Flags for GameSelectPC()
# !!! Keep these synchronized with Game.h !!!
SELECT_NORMAL = 0x00
SELECT_REPLACE = 0x01
SELECT_QUIET = 0x02
# Spell types
# !!! Keep these synchronized with Spellbook.h !!!
IE_SPELL_TYPE_PRIEST = 0
IE_SPELL_TYPE_WIZARD = 1
IE_SPELL_TYPE_INNATE = 2
# IWD2 spell types
IE_IWD2_SPELL_BARD = 0
IE_IWD2_SPELL_CLERIC = 1
IE_IWD2_SPELL_DRUID = 2
IE_IWD2_SPELL_PALADIN = 3
IE_IWD2_SPELL_RANGER = 4
IE_IWD2_SPELL_SORCERER = 5
IE_IWD2_SPELL_WIZARD = 6
IE_IWD2_SPELL_DOMAIN = 7
IE_IWD2_SPELL_INNATE = 8
IE_IWD2_SPELL_SONG = 9
IE_IWD2_SPELL_SHAPE = 10
# Item Flags bits
# !!! Keep these synchronized with Item.h !!!
IE_ITEM_CRITICAL = 0x00000001
IE_ITEM_TWO_HANDED = 0x00000002
IE_ITEM_MOVABLE = 0x00000004
IE_ITEM_DISPLAYABLE = 0x00000008
IE_ITEM_CURSED = 0x00000010
IE_ITEM_NOT_COPYABLE = 0x00000020
IE_ITEM_MAGICAL = 0x00000040
IE_ITEM_BOW = 0x00000080
IE_ITEM_SILVER = 0x00000100
IE_ITEM_COLD_IRON = 0x00000200
IE_ITEM_STOLEN = 0x00000400
IE_ITEM_CONVERSABLE = 0x00000800
IE_ITEM_PULSATING = 0x00001000
IE_ITEM_UNSELLABLE = (IE_ITEM_CRITICAL | IE_ITEM_STOLEN)
# CREItem (SlotItem) Flags bits
# !!! Keep these synchronized with Inventory.h !!!
IE_INV_ITEM_IDENTIFIED = 0x01
IE_INV_ITEM_UNSTEALABLE = 0x02
IE_INV_ITEM_STOLEN = 0x04
IE_INV_ITEM_STEEL = 0x04 # pst only
IE_INV_ITEM_UNDROPPABLE = 0x08
# GemRB extensions
IE_INV_ITEM_ACQUIRED = 0x10
IE_INV_ITEM_DESTRUCTIBLE = 0x20
IE_INV_ITEM_EQUIPPED = 0x40
IE_INV_ITEM_STACKED = 0x80
# these come from the original item bits
IE_INV_ITEM_CRITICAL = 0x100
IE_INV_ITEM_TWOHANDED = 0x200
IE_INV_ITEM_MOVABLE = 0x400
IE_INV_ITEM_UNKNOWN800 = 0x800
IE_INV_ITEM_CURSED = 0x1000
IE_INV_ITEM_UNKNOWN2000 = 0x2000
IE_INV_ITEM_MAGICAL = 0x4000
IE_INV_ITEM_BOW = 0x8000
IE_INV_ITEM_SILVER = 0x10000
IE_INV_ITEM_COLDIRON = 0x20000
IE_INV_ITEM_STOLEN2 = 0x40000
IE_INV_ITEM_CONVERSIBLE = 0x80000
IE_INV_ITEM_PULSATING = 0x100000
#repeat key flags
GEM_RK_DOUBLESPEED = 1
GEM_RK_DISABLE = 2
GEM_RK_QUADRUPLESPEED = 4
SHOP_BUY = 1
SHOP_SELL = 2
SHOP_ID = 4
SHOP_STEAL = 8
SHOP_SELECT = 0x40
SHOP_FULL = 0x8000
#game constants
# !!! Keep this synchronized with Video.h !!!
TOOLTIP_DELAY_FACTOR = 250
#game strings
STR_LOADMOS = 0
STR_AREANAME = 1
STR_TEXTSCREEN = 2
#game integers
SV_BPP = 0
SV_WIDTH = 1
SV_HEIGHT = 2
SV_GAMEPATH = 3
SV_TOUCH = 4
# GUIEnhancements bits
GE_SCROLLBARS = 1
GE_TRY_IDENTIFY_ON_TRANSFER = 2
GE_ALWAYS_OPEN_CONTAINER_ITEMS = 4
# Log Levels
# !!! Keep this synchronized with System/Logging.h !!!
# no need for LOG_INTERNAL here since its internal to the logger class
LOG_NONE = -1 # here just for the scripts, not needed in core
LOG_FATAL = 0
LOG_ERROR = 1
LOG_WARNING = 2
LOG_MESSAGE = 3
LOG_COMBAT = 4
LOG_DEBUG = 5
# GetTableValue return modes
GTV_STR = 0
GTV_INT = 1
GTV_STAT = 2
GTV_REF = 3
# UpdateActionsWindow action levels
UAW_STANDARD = 0
UAW_EQUIPMENT = 1
UAW_SPELLS = 2
UAW_INNATES = 3
UAW_QWEAPONS = 4
UAW_ALLMAGE = 5
UAW_SKILLS = 6
UAW_QSPELLS = 7
UAW_QSHAPES = 8
UAW_QSONGS = 9
UAW_BOOK = 10
UAW_2DASPELLS = 11
UAW_SPELLS_DIRECT = 12
UAW_QITEMS = 13
# item extended header location field
ITEM_LOC_WEAPON = 1 # show on quick weapon ability selection
ITEM_LOC_EQUIPMENT = 3 # show on quick item ability selection
| Tomsod/gemrb | gemrb/GUIScripts/GUIDefines.py | Python | gpl-2.0 | 10,331 |
# This Python file uses the following encoding: utf-8
__author__ = 'JackRao'
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
def computer_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
# placeholder
xs = tf.placeholder(tf.float32, [None, 784])
ys = tf.placeholder(tf.float32, [None, 10])
# add output layer
prediction = add_layer(xs, 784, 10, activation_function=tf.nn.softmax)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
if i % 50 == 0:
print(computer_accuracy(
mnist.test.images, mnist.test.labels))
| caterzy/JackPlayer | TFTest/Classification.py | Python | gpl-2.0 | 1,630 |
# coding=UTF-8
### Some global variables
CONF_NAME = 'server.conf'
LIB_DIRNAME = 'lib'
TEMPLATES_DIRNAME = 'templates'
DEBUG = True
# Import all stuff we need
import os
import sys
import socket
import cherrypy
import datetime
from mako.template import Template
from mako.lookup import TemplateLookup
import schemaish, validatish, formish
from formish.renderer import _default_renderer
from pkg_resources import resource_filename
# Import the local copy of the OOOP module
current_folder = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(current_folder, LIB_DIRNAME, 'ooop'))
from ooop import OOOP
# Transform relative path to absolute
template_folder = os.path.join(current_folder, TEMPLATES_DIRNAME)
class MakoHandler(cherrypy.dispatch.LateParamPageHandler):
""" Callable which sets response.body.
Source: http://tools.cherrypy.org/wiki/Mako
"""
def __init__(self, template, next_handler):
self.template = template
self.next_handler = next_handler
def __call__(self):
env = globals().copy()
env.update(self.next_handler())
return self.template.render(**env)
class MakoLoader(object):
def __init__(self):
self.lookups = {}
def __call__(self, filename, directories=[template_folder], module_directory=None,
collection_size=-1, output_encoding='utf-8', input_encoding='utf-8',
encoding_errors='replace'):
# Always add formish's Mako templates
directories.append(resource_filename('formish', 'templates/mako'))
# Find the appropriate template lookup.
key = (tuple(directories), module_directory)
try:
lookup = self.lookups[key]
except KeyError:
lookup = TemplateLookup(directories=directories,
module_directory=module_directory,
collection_size=collection_size,
input_encoding=input_encoding,
output_encoding=output_encoding,
encoding_errors=encoding_errors)
self.lookups[key] = lookup
cherrypy.request.lookup = lookup
# Replace the current handler.
cherrypy.request.template = t = lookup.get_template(filename)
cherrypy.request.handler = MakoHandler(t, cherrypy.request.handler)
def redirect_home_on_error(status, message, traceback, version):
""" Callable to redirect to home page on any HTTP error
"""
home_url = "http://www.example.com"
# This is an ugly intermediate page to go back to the parent app and escape from the iframe we supposed to be in.
return """
<html>
<body onload="window.top.location.href = '%s';">
<h1>Redirecting to the parent website...</h1>
</body>
</html>
""" % home_url
def main():
# Here is the default config for statix content
conf = { '/static': { 'tools.staticdir.on' : True
, 'tools.staticdir.dir': os.path.join(current_folder, 'static')
}
, '/static/formish.css': { 'tools.staticfile.on' : True
, 'tools.staticfile.filename': resource_filename('formish', 'css/formish.css')
}
, '/static/formish.js' : { 'tools.staticfile.on' : True
, 'tools.staticfile.filename': resource_filename('formish', 'js/formish.js')
}
, '/favicon.png' : { 'tools.staticfile.on' : True
, 'tools.staticfile.filename': os.path.join(current_folder, 'static/favicon.png')
}
}
# Desactivate encoding to bypass CherryPy 3.2 new defaults (see: http://www.cherrypy.org/wiki/UpgradeTo32#Responseencoding)
cherrypy.config.update({'tools.encode.on': False})
# Load and apply the global config file
conf_file = os.path.join(current_folder, CONF_NAME)
cherrypy.config.update(conf_file)
# Only show default error page and traceback in debug mode
if DEBUG:
cherrypy.config.update({'autoreload.on': True})
else:
cherrypy.config.update({ 'autoreload.on' : False
, 'request.show_tracebacks': False
, 'error_page.default' : os.path.join(current_folder, 'static/error.html')
# Alternatively, we can call a method to handle generic HTTP errors
#, 'error_page.default' : redirect_home_on_error
# Treat 503 connectivity errors as maintenance
, 'error_page.503' : os.path.join(current_folder, 'static/maintenance.html')
})
# Monkey patch convertish to let it unfold multiple checkbox widgets encoded with dottedish
from convertish.convert import NumberToStringConverter
from dottedish.dottedlist import DottedList, unwrap_list
legacy_to_type = NumberToStringConverter.to_type
def to_type_wrapper(self_class, value, converter_options={}):
# Force decoding of dotted notation
if type(value) == DottedList:
value = unwrap_list(value)
if type(value) == type([]) and len(value) == 1:
value = value[0]
return legacy_to_type(self_class, value, converter_options)
NumberToStringConverter.to_type = to_type_wrapper
from convertish.convert import DateToStringConverter
# Monkey patch convertish again, but this time to parse our french-localized dates
legacy_parseDate = DateToStringConverter.parseDate
def parseDate_wrapper(self_class, value):
return legacy_parseDate(self_class, '-'.join(value.strip().split('/')[::-1]))
DateToStringConverter.parseDate = parseDate_wrapper
# This patch mirror the one above, to let convertish render our datetime object with our localized format
def from_type_replacement(self_class, value, converter_options={}):
return value is None and None or value.strftime('%d/%m/%Y')
DateToStringConverter.from_type = from_type_replacement
# Open a connection to our local OpenERP instance
try:
openerp = OOOP( user = 'admin'
, pwd = 'admin'
, dbname = 'kev_test'
, uri = 'http://localhost'
, port = 8069 # We are targetting the HTTP web service here
)
except (socket.timeout, socket.error):
raise cherrypy.HTTPError(503)
# Setup our Mako decorator
loader = MakoLoader()
cherrypy.tools.mako = cherrypy.Tool('on_start_resource', loader)
# Let the default formish Mako renderer look at our local directory fisrt
# This let us ovveride default formish Mako templates
_default_renderer.lookup.directories.insert(0, template_folder)
# Import our application logic
from app import app
# Start the CherryPy server
cherrypy.quickstart(app(openerp), config=conf)
if __name__ == '__main__':
main()
| kdeldycke/cherrypy_mako_formish_ooop_boilerplate | server.py | Python | gpl-2.0 | 7,319 |
#!/usr/bin/python
import platform
import sys
from os import path
from os import system
import sqlite3
#from scatterhoard.dbhoard import initDB
#Configure wack usage based on OS. / for linux/unix/macos and \ for Windows
wack = "/"
systemPlatform = 'Linux'
if platform.system() == 'Windows':
wack = "\\"
systemPlatform = 'Windows'
system("mode 200,200")
#Number of times a chunk from a file is backed up. Default is 3.
numOfChunkCopies = 3
#sctr.db is the default database. It must exist as the main config data is stored there. When using other databases (as a way to have seperate namespaces)
#You set the workingDatabase option to the other database from within config table in db sctr.db
#If ./sctr.db is not found workingDatabase defaults to sctr.db and is then created automatically during initialize() on program startup.
#path is the path to the directory, alter if you have a specific location for your database instead of the current working directory which is default
#directory retrieved files will be written to retrieveWriteDir. Default is cwd of the executable python script.
#Database types includes MySQL, PostGres, Sqlite3, nosql
if not path.isfile('.' + wack + 'sctr.db'):
workingDatabase = "sctr.db"
databasePath = "."
retrieveWriteDir = "."
dbType = "sqlite3"
else:
results = []
c = sqlite3.connect('.' + wack + 'sctr.db')
cursor = c.cursor()
q = "select * from config"
cursor.execute(q)
c.commit()
results = cursor.fetchall()
c.close()
workingDatabase = results[0][2]
databasePath = results[0][3]
if workingDatabase != "sctr.db":
try:
c = sqlite3.connect(databasePath + wack + workingDatabase)
cursor = c.cursor()
q = "select * from config"
cursor.execute(q)
c.commit()
results = cursor.fetchall()
c.close()
databasePath = results[0][3]
except:
workingDatabase = 'sctr.db'
retrieveWriteDir = results[0][1]
dbType = results[0][4]
numOfChunkCopies = results[0][6]
bytesStored = results[0][8]
baseBytesOfFiles = results[0][9]
sshCom = results[0][7]
#Select Database. Includes current working directory. Alter if you have a specific directory.
SELECTED_DB = databasePath + wack + workingDatabase
#interactive cli switch
command = False
#Whether to add tags to files being backed up. Useful to turn off if you're doing a large batch that don't need tags...
addTagsCheck = False
#When clicking "search" in the retrieve tab whether the search sorts alphabetically or by most recently added
sortByBatchCheckBox = False
#Flag when running acorn command in gui
acornRunning = False
#flag when a bad read/write IO function is detected
failedIO = False
#array showing what storage locations should be treated as read only... locations added when errors in writing occur.
readOnlyLoc = []
#Job lists for retrieve() and backUpRun()
#these possibly should be moved into main program...
backJobs = []
retrieveJobs = []
tagJobs = []
tagLock = False # used for processing tagJobs, prevents mutple queueRuns stumbling over eachother
#Threading object. Used for running localhost webserver in background
threadH = None
#local webserver port when using the web UI
#webPort = 8127
#GUi availability via Tkinter
#imagePreview requires python-pil.imagetk package under linux
gui = False
guiObj = None
imagePreview = False
#httpd object that serves web UI. Accessed from a thread to prevent non blocking. Currently not used.
httpd = None
#error queue is an array of error messages. Rather than being dumped to stdout some functions will append to this array instead.
#holds 1000 errors before removing old messages
errQ = []
#sshclient variables, passwords, hosts
#multidimensional. Put another list inside sshConnections such that
# [host, username, password, pkey, port]
sshCreds = []
#connection objects. Whenever using ssh functions look through the sshConnections list of objects first
#to determine if a live connection exists to that storage location.
sshConnections = []
#If paramiko is not available this is false. Prevents using ssh storage locations
sshAvailable = False
#If pkey fails to produce a valid key the first time, it will not continuously read from ~/.ssh/id_rsa
sshPkeyFailed = False
| grumblor/scatter-hoard | scatterhoard/shconfig.py | Python | gpl-2.0 | 4,180 |
import datetime
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from applications import settings
try:
from django.utils import timezone
now = timezone.now
except ImportError:
now = datetime.datetime.now
def action_handler(verb, **kwargs):
"""
Handler function to create Action instance upon action signal call.
"""
from applications.models import Action
kwargs.pop('signal', None)
actor = kwargs.pop('sender')
check_actionable_model(actor)
# We must store the unstranslated string
# If verb is an ugettext_lazyed string, fetch the original string
if hasattr(verb, '_proxy____args'):
verb = verb._proxy____args[0]
newaction = Action(
actor_content_type=ContentType.objects.get_for_model(actor),
actor_object_id=actor.pk,
verb=unicode(verb),
public=bool(kwargs.pop('public', True)),
description=kwargs.pop('description', None),
timestamp=kwargs.pop('timestamp', now())
)
for opt in ('target', 'action_object'):
obj = kwargs.pop(opt, None)
if not obj is None:
check_actionable_model(obj)
setattr(newaction, '%s_object_id' % opt, obj.pk)
setattr(newaction, '%s_content_type' % opt,
ContentType.objects.get_for_model(obj))
if settings.USE_JSONFIELD and len(kwargs):
newaction.data = kwargs
newaction.save()
| meletakis/collato | esn/applications/actions.py | Python | gpl-2.0 | 1,489 |
Subsets and Splits