code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# This file is distributed under the terms of the GNU General Public license.
# Copyright (C) 1999 Aloril (See the file COPYING for details).
import time
from mind.Goal import Goal
# goals for minds
def false(_): return False
def true(_): return True
class Delayed(Goal):
"""Will delay execution of sub goals until the specified time."""
def __init__(self, time: float, sub_goals: list, desc="A delayed goal."):
Goal.__init__(self, desc=desc, fulfilled=self.is_right_time, sub_goals=sub_goals)
self.time = time
def is_right_time(self, me):
# Return "false" when the time is right
is_right = time.time() < self.time
return is_right
class OneShot(Goal):
"""Will remove itself after the first successful execution of its subgoals."""
def __init__(self, sub_goals, desc="Executed once."):
Goal.__init__(self, desc=desc, sub_goals=sub_goals)
def check_goal_recursively(self, me, depth, debug_info):
res, debug_info = super().check_goal_recursively(me, depth, debug_info)
if res:
self.irrelevant = True
return res, debug_info
class DelayedOneShot(Goal):
"""Combines delayed execution with one shot. Useful when you want to perform one action once after a certain time."""
def __init__(self, sub_goals, desc="Executed once after a delay"):
Goal.__init__(self, desc=desc, sub_goals=[OneShot(sub_goals=[Delayed(time=time.time() + 1, sub_goals=sub_goals)])])
class Condition(Goal):
"""
A conditional goal which first executes a function, and then sets the subgoals to one of two possibilities.
If the condition function returns None then none of the subgoals will be executed.
"""
def __init__(self, condition_fn, goals_true, goals_false, desc="condition"):
Goal.__init__(self, desc=desc, fulfilled=self.assess_condition)
self.condition_fn = condition_fn
self.goals_true = goals_true
self.goals_false = goals_false
def assess_condition(self, me):
result = self.condition_fn(me)
if result is None:
return True
if result:
self.sub_goals = self.goals_true
else:
self.sub_goals = self.goals_false
return False
class Sequence(Goal):
"""A goal which will check on all subgoals in order."""
def __init__(self, sub_goals, desc="Sequence of goals"):
Goal.__init__(self, desc=desc, sub_goals=sub_goals)
def get_reach(me):
reach = 0
own_reach = me.entity.get_prop_float('reach')
if own_reach:
reach += own_reach
attached_current = me.get_attached_entity("hand_primary")
if attached_current:
attached_reach = attached_current.get_prop_float('reach')
if attached_reach:
reach += attached_reach
return reach
def get_focused_location(me, what):
thing = get_focused_thing(me, what)
if thing:
return thing.location
return None
def get_focused_thing(me, what):
focus_id = me.get_knowledge('focus', what)
if focus_id is None:
return None
thing = me.map.get(focus_id)
if thing is None:
me.remove_knowledge('focus', what)
return None
return thing
def get_task(me, task_name):
"""Gets the task by the name from the 'tasks' property, if it exists."""
tasks_prop = me.entity.get_prop_map('tasks')
if tasks_prop and task_name in tasks_prop:
return tasks_prop[task_name]
| worldforge/cyphesis | data/rulesets/basic/scripts/mind/goals/common/common.py | Python | gpl-2.0 | 3,486 |
'''
Allmyvideos urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urlparse
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class VidSpotResolver(UrlResolver):
name = "vidspot"
domains = ["vidspot.net"]
pattern = '(?://|\.)(vidspot\.net)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = helpers.get_hidden(html)
html = self.net.http_POST(url, data).content
r = re.search('"sources"\s*:\s*\[(.*?)\]', html, re.DOTALL)
if r:
fragment = r.group(1)
stream_url = None
for match in re.finditer('"file"\s*:\s*"([^"]+)', fragment):
stream_url = match.group(1)
if stream_url:
stream_url = '%s?%s&direct=false' % (stream_url.split('?')[0], urlparse.urlparse(stream_url).query)
return stream_url + helpers.append_headers({'User-Agent': common.IE_USER_AGENT})
else:
raise ResolverError('could not find file')
else:
raise ResolverError('could not find sources')
def get_url(self, host, media_id):
return 'http://vidspot.net/embed-%s.html' % (media_id)
| dejay313/dojostreams | script.module.urlresolver/lib/urlresolver/plugins/vidspot.py | Python | gpl-2.0 | 2,031 |
# Django settings for barista project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Sinan Midillili', '[email protected]'),
)
DEFAULT_FROM_EMAIL = '[email protected]',
SERVER_EMAIL = '[email protected]'
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Istanbul'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media/files/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = 'media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# STATIC_ROOT = os.path.join(os.path.realpath(os.path.dirname( __file__ )), 'media/' )
# STATIC_ROOT = os.path.join( os.path.dirname(__file__), 'media/')
# print STATIC_ROOT
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/media/'
# Additional locations of static files
# STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# ("suit/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/suit/')),
# ("static/css/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/css/')),
# ("static/images/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/images/')),
# ("static/js/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/js/')),
# ("static/markitup/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/markitup/')),
# )
# List of finder classes that know how to find static files in
# various locations.
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
#
# )
# Make this unique, and don't share it with anybody.
SECRET_KEY = '94*hza*y@ba!rcq#kalendermesrepcg8%)2%uye9x$1(%1w^x*e93'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
#
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
ROOT_URLCONF = 'barista.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'barista.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.realpath(os.path.dirname(__file__)), 'templates/'),
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'gunicorn',
'suit',
'barista.restaurants',
'django_extensions',
'django_kibrit',
'django.contrib.admin',
'django.contrib.admindocs',
)
SUIT_CONFIG = {
'ADMIN_NAME': 'Barista',
'SHOW_REQUIRED_ASTERISK': True
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
},
}
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'THERE IS A PASSWORD HERE'
EMAIL_USE_TLS = True
KIBRIT_PATH = "/home/snn/Projects/barista/src/barista"
TEMPLATE_CONTEXT_PROCESSORS += ('django_kibrit.context_processors.revision',)
POSTGIS_VERSION = (1, 5, 3)
try:
from settings_local import *
except ImportError:
pass
| sinanm89/barista | build/barista-site/barista/settings.py | Python | gpl-2.0 | 6,858 |
#!/usr/bin/env python3
import os
import shutil
import subprocess
import sys
if os.environ.get('DESTDIR'):
install_root = os.environ.get('DESTDIR') + os.path.abspath(sys.argv[1])
else:
install_root = sys.argv[1]
if not os.environ.get('DESTDIR'):
schemadir = os.path.join(install_root, 'glib-2.0', 'schemas')
print('Compile gsettings schemas...')
subprocess.call(['glib-compile-schemas', schemadir])
# FIXME: Meson is unable to copy a generated target file:
# https://groups.google.com/forum/#!topic/mesonbuild/3iIoYPrN4P0
dst_dir = os.path.join(install_root, 'wayland-sessions')
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
src = os.path.join(install_root, 'xsessions', 'gnome.desktop')
dst = os.path.join(dst_dir, 'gnome.desktop')
shutil.copyfile(src, dst)
| GNOME/gnome-session | meson_post_install.py | Python | gpl-2.0 | 789 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module provides functionality to work with zip files."""
# Perhaps all methods should work with a wildcard to limit searches in some
# way (examples: *.po, base.xlf, pootle-terminology.tbx)
#TODO: consider also providing directories as we currently provide files
#TODO: refactor with existing zip code (xpi.py, etc.)
from os import path
from zipfile import ZipFile
from translate.storage import factory
from translate.storage import directory
from translate.misc import wStringIO
class ZIPFile(directory.Directory):
"""This class represents a ZIP file like a directory."""
def __init__(self, filename=None):
self.filename = filename
self.filedata = []
def unit_iter(self):
"""Iterator over all the units in all the files in this zip file."""
for dirname, filename in self.file_iter():
strfile = wStringIO.StringIO(self.archive.read(path.join(dirname, filename)))
strfile.filename = filename
store = factory.getobject(strfile)
#TODO: don't regenerate all the storage objects
for unit in store.unit_iter():
yield unit
def scanfiles(self):
"""Populate the internal file data."""
self.filedata = []
self.archive = ZipFile(self.filename)
for completename in self.archive.namelist():
dir, name = path.split(completename)
self.filedata.append((dir, name))
| mozilla/verbatim | vendor/lib/python/translate/storage/zip.py | Python | gpl-2.0 | 2,203 |
# module includes
import elliptic
import heat
import IRT
print "Loading comatmor version 0.0.1"
| fameyer/comatmor | src/comatmor/__init__.py | Python | gpl-2.0 | 96 |
# coding=utf-8
import random
import time
import threading
import unittest
from lru_cache import LruCache
class TesLruCache(unittest.TestCase):
def test_cache_normal(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_none(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return None
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_timeout(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(2)
time.sleep(2)
foo(2)
self.assertEqual(a, [2, 2])
def test_cache_when_cache_is_full(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
foo(2)
foo(3)
foo(1)
self.assertEqual(a, [1, 2, 3, 1])
def test_cache_with_multi_thread(self):
a = []
@LruCache(maxsize=10, timeout=1)
def foo(num):
a.append(num)
return num
for i in xrange(10):
threading.Thread(target=foo, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
foo(random.randint(0, 9))
self.assertEqual(set(a), set(range(10)))
def test_cache_with_multi_thread_two_func(self):
a = []
@LruCache(maxsize=10, timeout=1)
def foo(num):
a.append(num)
return num
b = []
@LruCache(maxsize=10, timeout=1)
def bar(num):
b.append(num)
return num + 1
for i in xrange(10):
threading.Thread(target=foo, args=(i, )).start()
threading.Thread(target=bar, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
feed = random.randint(0, 9)
self.assertEqual(foo(feed), feed)
self.assertEqual(bar(feed), feed + 1)
self.assertEqual(set(a), set(range(10)))
self.assertEqual(set(b), set(range(10)))
def test_cache_when_timeout_and_maxsize_is_none(self):
a = []
@LruCache()
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_timeout_is_none(self):
a = []
@LruCache(maxsize=10)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal(self):
a = []
@LruCache(timeout=2)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_timeout(self):
a = []
@LruCache(timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
time.sleep(2)
foo(1)
self.assertEqual(a, [1, 1])
def test_cache_when_only_maxsize_is_none_normal_method(self):
a = []
class Func(object):
@LruCache(timeout=2)
def foo(self, num):
a.append(num)
return num
fun = Func()
fun.foo(1)
fun.foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal_method_timeout(self):
a = []
class Func(object):
@LruCache(timeout=1)
def foo(self, num):
a.append(num)
return num
fun = Func()
fun.foo(1)
time.sleep(2)
fun.foo(1)
self.assertEqual(a, [1, 1])
def test_invalidate(self):
a = []
@LruCache()
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
foo.invalidate(1)
foo(1)
self.assertEqual(a, [1, 1])
if __name__ == "__main__":
unittest.main()
| Backflipz/plugin.video.excubed | resources/lib/cache/tests.py | Python | gpl-2.0 | 4,445 |
# -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2013 Guake authors
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
import inspect
import time
# You can put calls to p() everywhere in this page to inspect timing
# g_start = time.time()
# def p():
# print(time.time() - g_start, __file__, inspect.currentframe().f_back.f_lineno)
import logging
import os
import signal
import subprocess
import sys
import uuid
from locale import gettext as _
from optparse import OptionParser
log = logging.getLogger(__name__)
from guake.globals import NAME
from guake.globals import bindtextdomain
from guake.support import print_support
from guake.utils import restore_preferences
from guake.utils import save_preferences
# When we are in the document generation on readthedocs, we do not have paths.py generated
try:
from guake.paths import LOCALE_DIR
bindtextdomain(NAME, LOCALE_DIR)
except: # pylint: disable=bare-except
pass
def main():
"""Parses the command line parameters and decide if dbus methods
should be called or not. If there is already a guake instance
running it will be used and a True value will be returned,
otherwise, false will be returned.
"""
# Force to xterm-256 colors for compatibility with some old command line programs
os.environ["TERM"] = "xterm-256color"
# Force use X11 backend underwayland
os.environ["GDK_BACKEND"] = "x11"
# do not use version keywords here, pbr might be slow to find the version of Guake module
parser = OptionParser()
parser.add_option(
'-V',
'--version',
dest='version',
action='store_true',
default=False,
help=_('Show Guake version number and exit')
)
parser.add_option(
'-v',
'--verbose',
dest='verbose',
action='store_true',
default=False,
help=_('Enable verbose logging')
)
parser.add_option(
'-f',
'--fullscreen',
dest='fullscreen',
action='store_true',
default=False,
help=_('Put Guake in fullscreen mode')
)
parser.add_option(
'-t',
'--toggle-visibility',
dest='show_hide',
action='store_true',
default=False,
help=_('Toggles the visibility of the terminal window')
)
parser.add_option(
'--show',
dest="show",
action='store_true',
default=False,
help=_('Shows Guake main window')
)
parser.add_option(
'--hide',
dest='hide',
action='store_true',
default=False,
help=_('Hides Guake main window')
)
parser.add_option(
'-p',
'--preferences',
dest='show_preferences',
action='store_true',
default=False,
help=_('Shows Guake preference window')
)
parser.add_option(
'-a',
'--about',
dest='show_about',
action='store_true',
default=False,
help=_('Shows Guake\'s about info')
)
parser.add_option(
'-n',
'--new-tab',
dest='new_tab',
action='store',
default='',
help=_('Add a new tab (with current directory set to NEW_TAB)')
)
parser.add_option(
'-s',
'--select-tab',
dest='select_tab',
action='store',
default='',
help=_('Select a tab (SELECT_TAB is the index of the tab)')
)
parser.add_option(
'-g',
'--selected-tab',
dest='selected_tab',
action='store_true',
default=False,
help=_('Return the selected tab index.')
)
parser.add_option(
'-l',
'--selected-tablabel',
dest='selected_tablabel',
action='store_true',
default=False,
help=_('Return the selected tab label.')
)
parser.add_option(
'--split-vertical',
dest='split_vertical',
action='store_true',
default=False,
help=_('Split the selected tab vertically.')
)
parser.add_option(
'--split-horizontal',
dest='split_horizontal',
action='store_true',
default=False,
help=_('Split the selected tab horizontally.')
)
parser.add_option(
'-e',
'--execute-command',
dest='command',
action='store',
default='',
help=_('Execute an arbitrary command in the selected tab.')
)
parser.add_option(
'-i',
'--tab-index',
dest='tab_index',
action='store',
default='0',
help=_('Specify the tab to rename. Default is 0. Can be used to select tab by UUID.')
)
parser.add_option(
'--bgcolor',
dest='bgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) background color of '
'the selected tab.')
)
parser.add_option(
'--fgcolor',
dest='fgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) foreground color of the '
'selected tab.')
)
parser.add_option(
'--change-palette',
dest='palette_name',
action='store',
default='',
help=_('Change Guake palette scheme')
)
parser.add_option(
'--rename-tab',
dest='rename_tab',
metavar='TITLE',
action='store',
default='',
help=_(
'Rename the specified tab by --tab-index. Reset to default if TITLE is '
'a single dash "-".'
)
)
parser.add_option(
'-r',
'--rename-current-tab',
dest='rename_current_tab',
metavar='TITLE',
action='store',
default='',
help=_('Rename the current tab. Reset to default if TITLE is a '
'single dash "-".')
)
parser.add_option(
'-q',
'--quit',
dest='quit',
action='store_true',
default=False,
help=_('Says to Guake go away =(')
)
parser.add_option(
'-u',
'--no-startup-script',
dest='execute_startup_script',
action='store_false',
default=True,
help=_('Do not execute the start up script')
)
parser.add_option(
'--save-preferences',
dest='save_preferences',
action='store',
default=None,
help=_('Save Guake preferences to this filename')
)
parser.add_option(
'--restore-preferences',
dest='restore_preferences',
action='store',
default=None,
help=_('Restore Guake preferences from this file')
)
parser.add_option(
'--support',
dest='support',
action='store_true',
default=False,
help=_('Show support infomations')
)
# checking mandatory dependencies
missing_deps = False
try:
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
except ValueError:
print("[ERROR] missing mandatory dependency: GtK 3.0")
missing_deps = True
try:
gi.require_version('Vte', '2.91') # vte-0.42
except ValueError:
print("[ERROR] missing mandatory dependency: Vte >= 0.42")
missing_deps = True
try:
gi.require_version('Keybinder', '3.0')
except ValueError:
print("[ERROR] missing mandatory dependency: Keybinder 3")
missing_deps = True
try:
import cairo
except ImportError:
print("[ERROR] missing mandatory dependency: cairo")
missing_deps = True
if missing_deps:
print(
"[ERROR] missing at least one system dependencies. "
"You need to install additional packages for Guake to run"
)
print(
"[ERROR] On Debian/Ubuntu you need to install the following libraries:\n"
" sudo apt-get install -y --no-install-recommends \\\n"
" gir1.2-keybinder-3.0 \\\n"
" gir1.2-notify-0.7 \\\n"
" gir1.2-vte-2.91 \\\n"
" gir1.2-wnck-3.0 \\\n"
" libkeybinder-3.0-0 \\\n"
" libutempter0 \\\n"
" python3 \\\n"
" python3-cairo \\\n"
" python3-dbus \\\n"
" python3-gi \\\n"
" python3-pbr \\\n"
" python3-pip"
)
sys.exit(1)
options = parser.parse_args()[0]
if options.version:
from guake import gtk_version
from guake import guake_version
from guake import vte_version
from guake import vte_runtime_version
print('Guake Terminal: {}'.format(guake_version()))
print('VTE: {}'.format(vte_version()))
print('VTE runtime: {}'.format(vte_runtime_version()))
print('Gtk: {}'.format(gtk_version()))
sys.exit(0)
if options.save_preferences and options.restore_preferences:
parser.error('options --save-preferences and --restore-preferences are mutually exclusive')
if options.save_preferences:
save_preferences(options.save_preferences)
sys.exit(0)
elif options.restore_preferences:
restore_preferences(options.restore_preferences)
sys.exit(0)
if options.support:
print_support()
sys.exit(0)
import dbus
from guake.dbusiface import DBUS_NAME
from guake.dbusiface import DBUS_PATH
from guake.dbusiface import DbusManager
from guake.guake_logging import setupLogging
instance = None
# Trying to get an already running instance of guake. If it is not
# possible, lets create a new instance. This function will return
# a boolean value depending on this decision.
try:
bus = dbus.SessionBus()
remote_object = bus.get_object(DBUS_NAME, DBUS_PATH)
already_running = True
except dbus.DBusException:
# can now configure the logging
setupLogging(options.verbose)
# COLORTERM is an environment variable set by some terminal emulators such as
# gnome-terminal.
# To avoid confusing applications running inside Guake, clean up COLORTERM at startup.
if "COLORTERM" in os.environ:
del os.environ['COLORTERM']
log.info("Guake not running, starting it")
# late loading of the Guake object, to speed up dbus comm
from guake.guake_app import Guake
instance = Guake()
remote_object = DbusManager(instance)
already_running = False
only_show_hide = True
if options.fullscreen:
remote_object.fullscreen()
if options.show:
remote_object.show_from_remote()
if options.hide:
remote_object.hide_from_remote()
if options.show_preferences:
remote_object.show_prefs()
only_show_hide = options.show
if options.new_tab:
remote_object.add_tab(options.new_tab)
only_show_hide = options.show
if options.select_tab:
selected = int(options.select_tab)
tab_count = int(remote_object.get_tab_count())
if 0 <= selected < tab_count:
remote_object.select_tab(selected)
else:
sys.stderr.write('invalid index: %d\n' % selected)
only_show_hide = options.show
if options.selected_tab:
selected = remote_object.get_selected_tab()
sys.stdout.write('%d\n' % selected)
only_show_hide = options.show
if options.selected_tablabel:
selectedlabel = remote_object.get_selected_tablabel()
sys.stdout.write('%s\n' % selectedlabel)
only_show_hide = options.show
if options.split_vertical:
remote_object.v_split_current_terminal()
only_show_hide = options.show
if options.split_horizontal:
remote_object.h_split_current_terminal()
only_show_hide = options.show
if options.command:
remote_object.execute_command(options.command)
only_show_hide = options.show
if options.tab_index and options.rename_tab:
try:
remote_object.rename_tab_uuid(str(uuid.UUID(options.tab_index)), options.rename_tab)
except ValueError:
remote_object.rename_tab(int(options.tab_index), options.rename_tab)
only_show_hide = options.show
if options.bgcolor:
remote_object.set_bgcolor(options.bgcolor)
only_show_hide = options.show
if options.fgcolor:
remote_object.set_fgcolor(options.fgcolor)
only_show_hide = options.show
if options.palette_name:
remote_object.change_palette_name(options.palette_name)
only_show_hide = options.show
if options.rename_current_tab:
remote_object.rename_current_tab(options.rename_current_tab)
only_show_hide = options.show
if options.show_about:
remote_object.show_about()
only_show_hide = options.show
if options.quit:
try:
remote_object.quit()
return True
except dbus.DBusException:
return True
if already_running and only_show_hide:
# here we know that guake was called without any parameter and
# it is already running, so, lets toggle its visibility.
remote_object.show_hide()
if options.execute_startup_script:
if not already_running:
startup_script = instance.settings.general.get_string("startup-script")
if startup_script:
log.info("Calling startup script: %s", startup_script)
pid = subprocess.Popen([startup_script],
shell=True,
stdin=None,
stdout=None,
stderr=None,
close_fds=True)
log.info("Startup script started with pid: %s", pid)
# Please ensure this is the last line !!!!
else:
log.info("--no-startup-script argument defined, so don't execute the startup script")
if already_running:
log.info("Guake is already running")
return already_running
def exec_main():
if not main():
log.debug("Running main gtk loop")
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Load gi pretty late, to speed up as much as possible the parsing of the option for DBus
# comm through command line
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
Gtk.main()
if __name__ == '__main__':
exec_main()
| mouseratti/guake | guake/main.py | Python | gpl-2.0 | 15,344 |
#!/usr/bin/env python
import sys
import json
from elasticsearch1 import Elasticsearch
def init_es(es_host, es_index):
es = Elasticsearch([ es_host ])
es.indices.delete( es_index, ignore=[400, 404] )
es.indices.create( es_index, ignore=400 )
# create mappings
with open('pcawg_summary.mapping.json', 'r') as m:
es_mapping = m.read()
es.indices.put_mapping(index=es_index, doc_type='donor', body=es_mapping)
return es
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
es_host = 'localhost:9200'
es_index = 'pcawg_summary'
es = init_es(es_host, es_index)
with open('pcawg_summary.jsonl', 'r') as t:
for entity in t:
doc = json.loads(entity)
es.index(index=es_index, doc_type='donor', id=doc['donor_unique_id'], \
body=doc, timeout=90 )
if __name__ == "__main__":
sys.exit(main())
| ICGC-TCGA-PanCancer/pcawg-central-index | pcawg_metadata_parser/pcawg_summary.loader.py | Python | gpl-2.0 | 955 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser
import json
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import XHR
from salts_lib.utils2 import i18n
import scraper
BASE_URL = 'http://www.snagfilms.com'
SOURCE_BASE_URL = 'http://mp4.snagfilms.com'
SEARCH_URL = '/apis/search.json'
SEARCH_TYPES = {VIDEO_TYPES.MOVIE: 'film', VIDEO_TYPES.TVSHOW: 'show'}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.username = kodi.get_setting('%s-username' % (self.get_name()))
self.password = kodi.get_setting('%s-password' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'SnagFilms'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'film-container'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
iframe_url = urlparse.urljoin(self.base_url, iframe_url[0])
headers = {'Referer': page_url}
html = self._http_get(iframe_url, headers=headers, cache_limit=.5)
sources = self._parse_sources_list(html)
for source in sources:
quality = sources[source]['quality']
host = self._get_direct_hostname(source)
stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': iframe_url})
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
match = re.search('(\d+[a-z]bps)', source)
if match:
hoster['extra'] = match.group(1)
hosters.append(hoster)
hosters.sort(key=lambda x: x.get('extra', ''), reverse=True)
return hosters
def _get_episode_url(self, season_url, video):
episode_pattern = 'data-title\s*=\s*"Season\s+0*%s\s+Episode\s+0*%s[^>]*data-permalink\s*=\s*"([^"]+)' % (video.season, video.episode)
title_pattern = 'data-title\s*=\s*"Season\s+\d+\s+Episode\s+\d+\s*(?P<title>[^"]+)[^>]+data-permalink\s*=\s*"(?P<url>[^"]+)'
return self._default_get_episode_url(season_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = urlparse.urljoin(self.base_url, SEARCH_URL)
referer = urlparse.urljoin(self.base_url, '/search/?q=%s')
referer = referer % (urllib.quote_plus(title))
headers = {'Referer': referer}
headers.update(XHR)
params = {'searchTerm': title, 'type': SEARCH_TYPES[video_type], 'limit': 500}
html = self._http_get(search_url, params=params, headers=headers, auth=False, cache_limit=2)
js_data = scraper_utils.parse_json(html, search_url)
if 'results' in js_data:
for result in js_data['results']:
match_year = str(result.get('year', ''))
match_url = result.get('permalink', '')
match_title = result.get('title', '')
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
name = cls.get_name()
settings.append(' <setting id="%s-username" type="text" label=" %s" default="" visible="eq(-4,true)"/>' % (name, i18n('username')))
settings.append(' <setting id="%s-password" type="text" label=" %s" option="hidden" default="" visible="eq(-5,true)"/>' % (name, i18n('password')))
return settings
def _http_get(self, url, params=None, data=None, headers=None, auth=True, method=None, cache_limit=8):
# return all uncached blank pages if no user or pass
if not self.username or not self.password:
return ''
html = super(self.__class__, self)._http_get(url, params=params, data=data, headers=headers, method=method, cache_limit=cache_limit)
if auth and not dom_parser.parse_dom(html, 'span', {'class': 'user-name'}):
log_utils.log('Logging in for url (%s)' % (url), log_utils.LOGDEBUG)
self.__login()
html = super(self.__class__, self)._http_get(url, params=params, data=data, headers=headers, method=method, cache_limit=0)
return html
def __login(self):
url = urlparse.urljoin(self.base_url, '/apis/v2/user/login.json')
data = {'email': self.username, 'password': self.password, 'rememberMe': True}
referer = urlparse.urljoin(self.base_url, '/login')
headers = {'Content-Type': 'application/json', 'Referer': referer}
headers.update(XHR)
html = super(self.__class__, self)._http_get(url, data=json.dumps(data), headers=headers, cache_limit=0)
js_data = scraper_utils.parse_json(html, url)
return js_data.get('status') == 'success'
| JamesLinEngineer/RKMC | addons/plugin.video.salts/scrapers/snagfilms_scraper.py | Python | gpl-2.0 | 6,712 |
"""okc_scraper includes all the functions needed to scrape profiles from
OKCupid"""
import requests
import cPickle as pickle
import time
from BeautifulSoup import BeautifulSoup
def authorize(username, password):
"""Log into OKCupid to scrape profiles"""
user_info = {"username": username, "password": password}
okc = requests.session()
okc.post("https://www.okcupid.com/login", data=user_info)
return okc
def getProfiles(okc):
"""Searches for profiles and returns a list of profiles (10)"""
# match_info = {"filter1": "0,63", "filter2": "2,100,18",
# "filter3": "5,2678400", "filter4": "1,1",
# "locid": "1", "custom_search": "0",
# "matchOrderBy": "SPECIAL_BLEND",
# "sa": "1", "sort_type": "0", "update_prefs": "1"}
soup = BeautifulSoup(okc.post("https://www.okcupid.com/match?filter1=0,63&filter2=2,100,18&filter3=5,2678400&filter4=1,1&locid=0&timekey=1&matchOrderBy=SPECIAL_BLEND&custom_search=0&fromWhoOnline=0&mygender=mwww.okcupid.com/match?filter1=0,63&filter2=2,100,18&filter3=5,2678400&filter4=1,1&locid=0&timekey=1&matchOrderBy=SPECIAL_BLEND&custom_search=0&fromWhoOnline=0&mygender=m").text)
users = soup.findAll("div", {"class": "user_info"})
return (["https://www.okcupid.com" +
user.find("a")["href"].replace("?cf=regular", "")
for user in users], soup)
def getProfile(okc, profile_link):
"""Takes a link to a profile and returns a BeautifulSoup object"""
page = BeautifulSoup(okc.get(profile_link).text)
return (page, page.find("form", {"id": "flag_form"})
.findAll("input")[0]["value"])
def getInfo(profile, profile_id):
"""Take a BeautifulSoup object corresponding to a profile's home page
and the profile's id and return a list of the profile's user info
(username, age, gender...)"""
try:
main = profile.find("div", {"id": "basic_info"}).findAll("span")
return {"id_table": {"user_id": profile_id,
"user_name": main[0].text,
"user_age": main[1].text,
"user_gender": main[2].text,
"user_orient": main[3].text,
"user_status": main[4].text,
"user_location": main[5].text}, }
except:
print profile
return {"id_table": {"user_id": profile_id,
"data": "NA"}}
def getEssays(profile, profile_id):
"""Takes a BeautifulSoup object corresponding to a profiles home
page and returns a list of the profile's essays"""
etd = {"user_id": profile_id, }
essay_index = ["self_summary", "my_life", "good_at", "first_thing",
"favorite", "six_things", "lot_time", "typical_Friday",
"most_private"]
main = profile.find("div", {"id": "main_column"})
for i in range(0, 9):
try:
etd[essay_index[i]] = (main.find("div", {"id": "essay_text_"
+ str(i)})
.getText(' '))
except:
etd[essay_index[i]] = ""
return {"essay_table": etd, }
def getLookingFor(profile, profile_id):
"""Takes a BeautifulSoup object corresponding to a profiles home
page and returns a list of the profile's looking for items"""
try:
main = (profile.find("div", {"id": "main_column"})
.find("div", {"id": "what_i_want"}).findAll("li"))
if len(main) == 4:
return {"looking_for_table": {"user_id": profile_id,
"other_user": main[0].text,
"other_age": main[1].text,
"other_location": main[2].text,
"other_type": main[3].text}, }
if len(main) == 5:
return {"looking_for_table": {"user_id": profile_id,
"other_user": main[0].text,
"other_age": main[1].text,
"other_location": main[2].text,
"other_status": main[3].text,
"other_type": main[4].text}, }
except:
print profile
return {"looking_for_table": {"user_id": profile_id,
"data": "NA"}}
def getDetails(profile, profile_id):
"""Takes a BeautifulSoup object corresponding to profiles home
page and returns a list of profile's details"""
try:
main = profile.find("div", {"id": "profile_details"}).findAll("dd")
return {"details_table": {"user_id": profile_id,
"last_online": main[0].text,
"ethnicity": main[1].text,
"height": main[2].text,
"body_type": main[3].text,
"diet": main[4].text,
"smokes": main[5].text,
"drinks": main[6].text,
"religion": main[7].text,
"sign": main[8].text,
"education": main[9].text,
"job": main[10].text,
"income": main[11].text,
"offspring": main[12].text,
"pets": main[13].text,
"speaks": main[14].text}, }
except:
print profile
return {"details_table": {"user_id": profile_id,
"data": "NA"}}
def getQuestions(okc, profile_link, profile_id):
"""Take a link to a profile and return a list the questions a user
has answered"""
# Currently this doesn't return anything. All functions need to be
# changed up to work with mysql 07/19/2013 22:50
question_list = []
question_categories = ["Ethics", "Sex", "Religion", "Lifestyle",
"Dating", "Other"]
for category in question_categories:
q = BeautifulSoup(okc.get(profile_link + "/questions?"
+ category).text)
try:
max_page = int(q.find("div", {"class": "pages clearfix"})
.findAll("li")[1].find("a").text)
except IndexError:
max_page = 1
except AttributeError:
return []
for page in range(1, max_page + 1):
q_page = BeautifulSoup(okc.get(profile_link + "/questions?"
+ category + "="
+ str(page)).text)
questions = [q for q in q_page.find("div", {"id": "questions"})
.findAll("div",
{"class":
"question public talk clearfix"})]
for question in questions:
question_id = question["id"]
qtext = question.find("p", {"class": "qtext"}).text
atext = question.find("p",
{"class":
"answer target clearfix"}).text
question_list.append({"question_table":
{"user_id": profile_id,
"question_id": question_id,
"question_text": qtext,
"user_answer": atext,
"question_category": category},
})
return question_list
def pickleDict(dict_, dir):
"""Takes in a directory and a dictionary to be pickled and pickles
the dict in the directory"""
dict_id = dict_.keys()[0]
tab_i = pickle.load(open(dir + dict_id + ".p", "rb"))
tab_i.append(dict_)
pickle.dump(tab_i, open(dir + dict_id + ".p", "wb"))
def main(okc_instance):
"""The main event, takes an okc_instance (logged in) and writes a
profile to the docs"""
profiles, soup = getProfiles(okc_instance)
locations = [l.text.split(";")[1] for l in
soup.findAll("div", {"class": "userinfo"})]
if len([l for l in locations if l == "Chicago, IL"]) > 2:
print "Possible Reset"
for profile in profiles:
prof = getProfile(okc_instance, profile)
pickleDict(getInfo(prof[0], prof[1]), "data/")
pickleDict(getEssays(prof[0], prof[1]), "data/")
pickleDict(getLookingFor(prof[0], prof[1]), "data/")
pickleDict(getDetails(prof[0], prof[1]), "data/")
time.sleep(2)
return prof[1]
| lbybee/okc_project | okc_scraper_no_q.py | Python | gpl-2.0 | 8,941 |
#
#
# Copyright (C) 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Support classes and functions for testing the cmdlib module.
"""
from cmdlib.testsupport.cmdlib_testcase import CmdlibTestCase, \
withLockedLU
from cmdlib.testsupport.config_mock import ConfigMock
from cmdlib.testsupport.iallocator_mock import patchIAllocator
from cmdlib.testsupport.livelock_mock import LiveLockMock
from cmdlib.testsupport.utils_mock import patchUtils
from cmdlib.testsupport.netutils_mock import patchNetutils, HostnameMock
from cmdlib.testsupport.processor_mock import ProcessorMock
from cmdlib.testsupport.rpc_runner_mock import CreateRpcRunnerMock, \
RpcResultsBuilder
from cmdlib.testsupport.ssh_mock import patchSsh
from cmdlib.testsupport.wconfd_mock import WConfdMock
__all__ = ["CmdlibTestCase",
"withLockedLU",
"ConfigMock",
"CreateRpcRunnerMock",
"HostnameMock",
"patchIAllocator",
"patchUtils",
"patchNetutils",
"patchSsh",
"ProcessorMock",
"RpcResultsBuilder",
"LiveLockMock",
"WConfdMock",
]
| ribag/ganeti-experiments | test/py/cmdlib/testsupport/__init__.py | Python | gpl-2.0 | 1,827 |
#!/usr/bin/python
import unittest
import apt_pkg
import apt.progress.base
class TestCache(unittest.TestCase):
"""Test invocation of apt_pkg.Cache()"""
def setUp(self):
apt_pkg.init_config()
apt_pkg.init_system()
def test_wrong_invocation(self):
"""cache_invocation: Test wrong invocation."""
apt_cache = apt_pkg.Cache(progress=None)
self.assertRaises(ValueError, apt_pkg.Cache, apt_cache)
self.assertRaises(ValueError, apt_pkg.Cache,
apt.progress.base.AcquireProgress())
self.assertRaises(ValueError, apt_pkg.Cache, 0)
def test_proper_invocation(self):
"""cache_invocation: Test correct invocation."""
apt_cache = apt_pkg.Cache(progress=None)
apt_depcache = apt_pkg.DepCache(apt_cache)
if __name__ == "__main__":
unittest.main()
| suokko/python-apt | tests/test_cache_invocation.py | Python | gpl-2.0 | 863 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with a *procedures* dictionary specifying available quantum
chemical methods.
"""
from __future__ import print_function
from __future__ import absolute_import
from . import proc
from . import interface_cfour
# never import wrappers or aliases into this file
# Procedure lookup tables
procedures = {
'energy': {
'hf' : proc.run_scf,
'scf' : proc.run_scf,
'mcscf' : proc.run_mcscf,
'dcft' : proc.run_dcft,
'mp3' : proc.select_mp3,
'mp2.5' : proc.select_mp2p5,
'mp2' : proc.select_mp2,
'omp2' : proc.select_omp2,
'scs-omp2' : proc.run_occ,
'scs(n)-omp2' : proc.run_occ,
'scs-omp2-vdw' : proc.run_occ,
'sos-omp2' : proc.run_occ,
'sos-pi-omp2' : proc.run_occ,
'omp3' : proc.select_omp3,
'scs-omp3' : proc.run_occ,
'scs(n)-omp3' : proc.run_occ,
'scs-omp3-vdw' : proc.run_occ,
'sos-omp3' : proc.run_occ,
'sos-pi-omp3' : proc.run_occ,
'olccd' : proc.select_olccd,
'omp2.5' : proc.select_omp2p5,
'dfocc' : proc.run_dfocc, # full control over dfocc
'qchf' : proc.run_qchf,
'ccd' : proc.run_dfocc,
'sapt0' : proc.run_sapt,
'ssapt0' : proc.run_sapt,
'sapt2' : proc.run_sapt,
'sapt2+' : proc.run_sapt,
'sapt2+(3)' : proc.run_sapt,
'sapt2+3' : proc.run_sapt,
'sapt2+(ccd)' : proc.run_sapt,
'sapt2+(3)(ccd)': proc.run_sapt,
'sapt2+3(ccd)' : proc.run_sapt,
'sapt2+dmp2' : proc.run_sapt,
'sapt2+(3)dmp2' : proc.run_sapt,
'sapt2+3dmp2' : proc.run_sapt,
'sapt2+(ccd)dmp2' : proc.run_sapt,
'sapt2+(3)(ccd)dmp2' : proc.run_sapt,
'sapt2+3(ccd)dmp2' : proc.run_sapt,
'sapt0-ct' : proc.run_sapt_ct,
'sapt2-ct' : proc.run_sapt_ct,
'sapt2+-ct' : proc.run_sapt_ct,
'sapt2+(3)-ct' : proc.run_sapt_ct,
'sapt2+3-ct' : proc.run_sapt_ct,
'sapt2+(ccd)-ct' : proc.run_sapt_ct,
'sapt2+(3)(ccd)-ct' : proc.run_sapt_ct,
'sapt2+3(ccd)-ct' : proc.run_sapt_ct,
'fisapt0' : proc.run_fisapt,
'ccenergy' : proc.run_ccenergy, # full control over ccenergy
'ccsd' : proc.select_ccsd,
'ccsd(t)' : proc.select_ccsd_t_,
'ccsd(at)' : proc.select_ccsd_at_,
'cc2' : proc.run_ccenergy,
'cc3' : proc.run_ccenergy,
'mrcc' : proc.run_mrcc, # interface to Kallay's MRCC program
'bccd' : proc.run_bccd,
'bccd(t)' : proc.run_bccd,
'eom-ccsd' : proc.run_eom_cc,
'eom-cc2' : proc.run_eom_cc,
'eom-cc3' : proc.run_eom_cc,
'detci' : proc.run_detci, # full control over detci
'mp' : proc.run_detci, # arbitrary order mp(n)
'zapt' : proc.run_detci, # arbitrary order zapt(n)
'cisd' : proc.select_cisd,
'cisdt' : proc.run_detci,
'cisdtq' : proc.run_detci,
'ci' : proc.run_detci, # arbitrary order ci(n)
'fci' : proc.run_detci,
'casscf' : proc.run_detcas,
'rasscf' : proc.run_detcas,
'adc' : proc.run_adc,
# 'cphf' : proc.run_libfock,
# 'cis' : proc.run_libfock,
# 'tdhf' : proc.run_libfock,
# 'cpks' : proc.run_libfock,
# 'tda' : proc.run_libfock,
# 'tddft' : proc.run_libfock,
'psimrcc' : proc.run_psimrcc,
'psimrcc_scf' : proc.run_psimrcc_scf,
'qcisd' : proc.run_fnocc,
'qcisd(t)' : proc.run_fnocc,
'mp4' : proc.select_mp4,
'mp4(sdq)' : proc.run_fnocc,
'fno-ccsd' : proc.select_fnoccsd,
'fno-ccsd(t)' : proc.select_fnoccsd_t_,
'fno-qcisd' : proc.run_fnocc,
'fno-qcisd(t)' : proc.run_fnocc,
'fno-mp3' : proc.run_fnocc,
'fno-mp4(sdq)' : proc.run_fnocc,
'fno-mp4' : proc.run_fnocc,
'fno-lccd' : proc.run_cepa,
'fno-lccsd' : proc.run_cepa,
'fno-cepa(0)' : proc.run_cepa,
'fno-cepa(1)' : proc.run_cepa,
'fno-cepa(3)' : proc.run_cepa,
'fno-acpf' : proc.run_cepa,
'fno-aqcc' : proc.run_cepa,
'fno-cisd' : proc.run_cepa,
'lccd' : proc.select_lccd,
'lccsd' : proc.run_cepa,
'cepa(0)' : proc.run_cepa,
'cepa(1)' : proc.run_cepa,
'cepa(3)' : proc.run_cepa,
'acpf' : proc.run_cepa,
'aqcc' : proc.run_cepa,
'efp' : proc.run_efp,
'dmrg-scf' : proc.run_dmrgscf,
'dmrg-caspt2' : proc.run_dmrgscf,
'dmrg-ci' : proc.run_dmrgci,
# Upon adding a method to this list, add it to the docstring in energy() below
# Aliases are discouraged. If you must add an alias to this list (e.g.,
# lccsd/cepa(0)), please search the whole driver to find uses of
# name in return values and psi variables and extend the logic to
# encompass the new alias.
},
'gradient' : {
'hf' : proc.run_scf_gradient,
'scf' : proc.run_scf_gradient,
'cc2' : proc.run_ccenergy_gradient,
'ccsd' : proc.select_ccsd_gradient,
'ccsd(t)' : proc.select_ccsd_t__gradient,
'mp2' : proc.select_mp2_gradient,
'eom-ccsd' : proc.run_eom_cc_gradient,
'dcft' : proc.run_dcft_gradient,
'omp2' : proc.select_omp2_gradient,
'omp3' : proc.select_omp3_gradient,
'mp3' : proc.select_mp3_gradient,
'mp2.5' : proc.select_mp2p5_gradient,
'omp2.5' : proc.select_omp2p5_gradient,
'lccd' : proc.select_lccd_gradient,
'olccd' : proc.select_olccd_gradient,
'ccd' : proc.run_dfocc_gradient,
# Upon adding a method to this list, add it to the docstring in optimize() below
},
'hessian' : {
# Upon adding a method to this list, add it to the docstring in frequency() below
'hf' : proc.run_scf_hessian,
'scf' : proc.run_scf_hessian,
},
'property' : {
'hf' : proc.run_scf_property,
'scf' : proc.run_scf_property,
'mp2' : proc.select_mp2_property,
'cc2' : proc.run_cc_property,
'ccsd' : proc.run_cc_property,
'eom-cc2' : proc.run_cc_property,
'eom-ccsd' : proc.run_cc_property,
'detci' : proc.run_detci_property, # full control over detci
'cisd' : proc.run_detci_property,
'cisdt' : proc.run_detci_property,
'cisdtq' : proc.run_detci_property,
'ci' : proc.run_detci_property, # arbitrary order ci(n)
'fci' : proc.run_detci_property,
'rasscf' : proc.run_detci_property,
'casscf' : proc.run_detci_property,
# Upon adding a method to this list, add it to the docstring in property() below
}}
# Will only allow energy to be run for the following methods
energy_only_methods = [x for x in procedures['energy'].keys() if 'sapt' in x]
energy_only_methods += ['adc', 'efp', 'cphf', 'tdhf', 'cis']
# Integrate DFT with driver routines
superfunc_list = proc.dft_functional.superfunctional_list
for ssuper in superfunc_list:
procedures['energy'][ssuper.name().lower()] = proc.run_dft
if not ssuper.is_c_hybrid():
procedures['property'][ssuper.name().lower()] = proc.run_dft_property
for ssuper in superfunc_list:
if ((not ssuper.is_c_hybrid()) and (not ssuper.is_c_lrc()) and (not ssuper.is_x_lrc())):
procedures['gradient'][ssuper.name().lower()] = proc.run_dft_gradient
# Integrate CFOUR with driver routines
for ssuper in interface_cfour.cfour_list():
procedures['energy'][ssuper.lower()] = interface_cfour.run_cfour
for ssuper in interface_cfour.cfour_gradient_list():
procedures['gradient'][ssuper.lower()] = interface_cfour.run_cfour
# dictionary to register pre- and post-compute hooks for driver routines
hooks = dict((k1, dict((k2, []) for k2 in ['pre', 'post'])) for k1 in ['energy', 'optimize', 'frequency'])
| andysim/psi4 | psi4/driver/procrouting/proc_table.py | Python | gpl-2.0 | 10,293 |
#
# downloadview.py
#
# Copyright 2010 Brett Mravec <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
class DownloadView:
def __init__ (self, downloadlist):
self.downloadlist = downloadlist
downloadlist.view = self
def add_download (self, download):
print 'DownloadView.add_download (download): stub'
def update_download (self, download):
print 'DownloadView.update_download (download): stub'
def remove_download (self, download):
print 'DownloadView.remove_download (download): stub'
def get_selected (self):
print 'DownloadView.get_selected (): stub'
return []
| bmravec/DownMan | downman/gui/downloadview.py | Python | gpl-2.0 | 1,413 |
# -*- coding: utf-8 -*-
"""
Estimate the international "visibility" of countries by retrieving the
average number of articles the New York Times returns in its search query.
For each year and each country, a query is send to the NYT api and the
number of returned hits (i.e. articles) is taken as estimate for the
international "visibility". To ensure optimal coverage, for each country
synonyms have been defined (see CountryCodeMapper.py) and the average of
the count is taken.
----
Copyright (C) 2015 Niklas Berliner
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import sys
import os
import pickle
from time import sleep
from countryCodeMapper import CountryCodeMapper
from utils import Country, CountryContainer
from nytimesarticles import articleAPI, DeveloperOverRate
api = articleAPI('/* Your API access key here */')
# Use tmp folder to keep intermediate results. Final output will be placed there as well
tmpFolder = "../data/newspaper/raw/"
## Read the temporary folder content
done = [ int(fname[8:-2]) for fname in os.listdir(tmpFolder) if fname != "country_aggregate.p" ]
# Initialise some variables
C = CountryCodeMapper()
countries = C.countryNames()
container = CountryContainer()
# Run the scrape for the years 1980 to 2014 (including)
dates = range(1980,2015)
for date in dates:
if date in done:
print("Loading year", date)
a = pickle.load(open(tmpFolder + "country_%s.p" %str(date), "rb"))
else:
print("Processing year", date)
a = Country(date)
for idx, country in enumerate(countries):
success = False
i = 0
while i<=3 and not success:
try:
query = api.search( q = country,
begin_date = str(date) + '0101',
end_date = str(date) + '1231'
)
sleep(.1)
assert( query["status"] == "OK" )
count = query["response"]["meta"]["hits"]
a(country, count)
i += 1
success = True
except DeveloperOverRate:
print("You most probably exceeded you api key limit\n")
sys.exit()
except:
success = False
i += 1
sleep(1) # allow the server some quiet time
if not success:
print("Error in %s, %s" %(date, country))
# Store the year as pickle in case something breaks during the run
pickle.dump(a, open(tmpFolder + "country_%s.p" %str(date), "wb"))
# Save the original data as csv file
a.save(tmpFolder + "country_%s.csv" %str(date))
# Add the country to the container
container(a)
pickle.dump(container, open(tmpFolder + "country_aggregate.p", "wb"))
# Save everything to a csv file. The columns will be the countries, the rows
# will be the years. One column contains the years (sanity check to ensure
# that the row order is not messed up).
container.save(tmpFolder + "/NYT_scrape.csv")
| nberliner/delveData | lib/countryMentionNYT.py | Python | gpl-2.0 | 3,856 |
# -*- coding: utf-8 -*-
from mock import patch
import os
import pprint
import shutil
import subprocess
import unittest
from pyspatialite import dbapi2 as db
import qgis.core # Need to import this before PyQt to ensure QGIS parts work
from PyQt4.QtSql import QSqlQuery, QSqlDatabase
from Roadnet.database import connect_and_open
from Roadnet.tests.integration.roadnet_test_cases import QgisTestCase
import Roadnet.roadnet_exceptions as rn_except
from Roadnet.ramp import wdm
from Roadnet.bin import shapefile_attributes
this_dir = os.path.dirname(os.path.abspath(__file__))
SQL_SCRIPT = """
INSERT INTO rdpoly VALUES (
1, 11, 1, 'CGWAY', 'LAR', NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'C119/10', '/CGWAY/', '/CGWAY//', NULL, NULL, 11111,
GeomFromText("MULTIPOLYGON(((287500 691400, 287500 691500, 287600 691500, 287600 691400 )))", 27700) );
INSERT INTO rdpoly VALUES (
2, 11, 2, 'FTWAY', 'LAF', NULL, NULL, NULL, 'E', 1, 1, NULL, 'C119/10', '/FTWAY/1', '/FTWAY/E/1', NULL, NULL, 22222,
GeomFromText("MULTIPOLYGON(((288000 691400, 288000 691500, 288100 691500, 288100 691400 )))", 27700) );
INSERT INTO rdpoly VALUES (
3, 11, 3, 'FTWAY', 'LAF', NULL, NULL, NULL, 'E', 2, 2, NULL, 'C119/10', '/FTWAY/2', '/FTWAY/E/2', NULL, NULL, 33333,
GeomFromText("MULTIPOLYGON(((287500 691900, 287500 692000, 287600 692000, 287600 691900 )))", 27700) );
INSERT INTO rdpoly VALUES (
4, 11, 4, 'FTWAY', 'LAF', NULL, NULL, NULL, 'S', 1, 1, NULL, 'C119/20', '/FTWAY/1', '/FTWAY/S/1', NULL, NULL, 44444,
GeomFromText("MULTIPOLYGON(((287800 692200, 287800 692300, 287900 692300, 287900 692200 )))", 27700) );
INSERT INTO mcl VALUES (
1, 20574, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 60,
'Test MCL One',
NULL, 30, 'U', 'FT', 'Public', 11111, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((0 0,0 1,0 2))", 27700) );
INSERT INTO mcl VALUES (
2, 20573, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 50,
'Test MCL Two',
NULL, 30, 'U', 'FT', 'Public', 22222, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((293166.277 680074.52,293180.28 680074.606,293181.610 680074.83))", 27700) );
INSERT INTO mcl VALUES (
3, 18163, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 40,
'Test MCL Three',
NULL, 30, 'U', 'FT', 'Public', 33333, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((293141.8919999999 680074.376,293166.2779999999 680074.5219999999))", 27700) );
INSERT INTO mcl VALUES (
4, 18163, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 40,
'Test MCL Four',
NULL, 30, 'U', 'FT', 'Public', 44444, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((293141.8919999999 680074.376,293166.2779999999 680074.5219999999))", 27700) );
"""
class TestWDMExports(QgisTestCase):
empty_db_path = os.path.join('database_files', 'roadnet_empty.sqlite')
test_db_path = os.path.join(this_dir, 'roadnet_test.sqlite')
test_directory = os.path.join(this_dir, 'test_dir')
db = None
def setUp(self):
super(TestWDMExports, self).setUp()
# Make copy of empty database to work on
shutil.copy(self.empty_db_path, self.test_db_path)
# Populate with example data
conn = db.connect(self.test_db_path)
curs = conn.cursor()
try:
curs.executescript(SQL_SCRIPT)
finally:
conn.close()
# Open connection for tests
self.tidy()
os.makedirs(self.test_directory)
self.db = connect_and_open(self.test_db_path, 'integration_testing')
def tearDown(self):
super(TestWDMExports, self).tearDown()
if self.db: # Just in case self.db doesn't get set
self.db.close()
del self.db
QSqlDatabase.removeDatabase('integration_testing')
if os.path.exists(self.test_db_path):
os.remove(self.test_db_path)
def tidy(self):
shutil.rmtree(self.test_directory, ignore_errors=True)
def test_query_db_for_features_success(self):
# Arrange and Act
q = wdm.query_db_for_features('FTWAY', self.db)
# Assert
try:
self.assertTrue(isinstance(q, QSqlQuery),
"An active QSqlQuery wasn't returned ({})".format(type(q)))
finally:
q.finish()
del q
def test_ftway_export_returns_three_features(self):
# Arrange
features_query = wdm.query_db_for_features('FTWAY', self.db)
vlayer = wdm.create_temp_layer_in_memory()
# Act
wdm.add_features_to_vlayer(features_query, vlayer)
# Assert
expected = 3
count = vlayer.featureCount()
self.assertEqual(
expected, count,
"Number of exported FTWAY features was not {} ({})".format(expected, count))
@patch.object(rn_except.QMessageBoxWarningError, 'show_message_box')
def test_exported_attributes(self, mock_error):
# Arrange
outfile_names = {'CGWAY': 'RAMPEXPORT_Carriageway.shp',
'CYCLE': 'RAMPEXPORT_Cycleway_Path.shp',
'FTWAY': 'RAMPEXPORT_Footway.shp'}
expected_attributes = {
'CGWAY': [['1', 'CGWAY', 'LAR', '', '', '', '2.000000000000000',
'11111', '14305470', 'F-5470', '60', 'Test MCL One', '',
'30', 'U', 'U', '']],
'CYCLE': [],
'FTWAY': [
['2', 'FTWAY', 'LAF', 'E', '1', '1', '', '22222', '14305470',
'F-5470', '50', 'Test MCL Two', '', '30', 'U', 'U', ''],
['3', 'FTWAY', 'LAF', 'E', '2', '2', '', '33333', '14305470',
'F-5470', '40', 'Test MCL Three', '', '30', 'U', 'U', ''],
['4', 'FTWAY', 'LAF', 'S', '1', '1', '', '44444', '14305470',
'F-5470', '40', 'Test MCL Four', '', '30', 'U', 'U', '']]}
# Act
for element_type in outfile_names:
shapefile_path = os.path.join(self.test_directory,
outfile_names[element_type])
wdm.export(element_type, self.db, self.test_directory)
attr = shapefile_attributes.get_ogr2csv(shapefile_path)
# Assert
print("-------------")
print("Expected")
pprint.pprint(expected_attributes[element_type])
print("")
print("Actual")
pprint.pprint(attr)
print("-------------")
self.assertEqual(expected_attributes[element_type], attr)
def test_create_sql_command_without_length(self):
# Arrange
expected = """
SELECT AsBinary(rdpoly.geometry) AS geometry, rd_pol_id, element, hierarchy,
desc_2, desc_3, ref_3, currency_flag, feature_length, r_usrn,
mcl_ref, usrn, lor_ref_1, lor_ref_2, lor_desc, lane_number,
speed_limit, rural_urban_id, street_classification, carriageway
FROM rdpoly
LEFT OUTER JOIN mcl
ON rdpoly.mcl_cref = mcl.mcl_ref
WHERE element = "FTWAY"
AND rdpoly.symbol IN (11, 12);"""
# Act
sql = wdm.create_sql_command("FTWAY")
# Assert
self.assertEqual(expected, sql)
def test_create_sql_command_with_length(self):
# Arrange
expected = """
SELECT AsBinary(rdpoly.geometry) AS geometry, rd_pol_id, element, hierarchy,
desc_2, desc_3, ref_3, currency_flag, GLength(mcl.geometry) AS feature_length, r_usrn,
mcl_ref, usrn, lor_ref_1, lor_ref_2, lor_desc, lane_number,
speed_limit, rural_urban_id, street_classification, carriageway
FROM rdpoly
LEFT OUTER JOIN mcl
ON rdpoly.mcl_cref = mcl.mcl_ref
WHERE element = "CGWAY"
AND rdpoly.symbol IN (11, 12);"""
# Act
sql = wdm.create_sql_command("CGWAY")
# Assert
self.assertEqual(expected, sql)
def get_ogr_output_feature_count(shapefile_path):
cmd = ["ogrinfo", shapefile_path, "-al"]
ogr_output = subprocess.check_output(cmd)
for line in ogr_output.split('\n'):
if line.startswith("Feature Count"):
count = line.split(':')[1]
count = count.strip()
return int(count)
raise RuntimeError('Feature Count line not found in {}'.format(shapefile_path))
if __name__ == '__main__':
unittest.main()
| thinkWhere/Roadnet | tests/integration/test_wdm.py | Python | gpl-2.0 | 8,439 |
#!/usr/bin/python2
import check
from fractions import gcd
# Algorithm taken from en.wikipedia.org/wiki/Line-line-_intersection
# All code written by Joel Williamson
## intersection: Int Int Int Int Int Int Int Int -> (union "parallel" (tuple Int Int Int Int))
##
## Purpose: Treating the input as 4 pairs of integers, each representing the
## endpoint of a line, returns the intersection of the two lines, or
## "parallel" if they are parallel
##
## Effects:
##
## Example: intersection(-15,15,15,-15,-10,-10,10,10) => [0,1,0,1]
def intersection(x1, y1, x2, y2, x3, y3, x4, y4):
x_numerator = ((x1*y2-y1*x2)*(x3-x4) - (x1-x2)*(x3*y4-y3*x4))
denominator = (x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)
if (denominator == 0) :
return "parallel"
x_gcd = gcd(x_numerator,denominator)
y_numerator = (x1*y2-y1*x2)*(y3-y4)-(y1-y2)*(x3*y4-y3*x4)
y_gcd = gcd(y_numerator,denominator)
return (x_numerator/x_gcd,denominator/x_gcd,
y_numerator/y_gcd,denominator/y_gcd)
## Tests:
check.expect('Sample test', intersection(-15,15,15,-15,-10,-10,10,10), (0,1,0,1))
check.expect('Parallel', intersection(-10,-10,10,10,-20,-10,0,10),"parallel")
## point_range: (listof Int) (listof Int) (listof Int) (listof Int) (optional (tuple Int Int Int Int))
## -> (iterable (tuple Int Int Int Int))
##
## Purpose: Merges four lists of equal length into an iterable of points,
## optionally starting after the point specified by (init_x1,init_y1,initx2,inity2)
##
## Example: i_p = point_range([1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16])
## i_p.next() = (1,5,9,13)
## i_p.next() = (2,6,10,14)
def point_range(X1,Y1,X2,Y2,(init_x1 ,init_y1, init_x2, init_y2 )= (None,None,None,None)) :
if (init_x1 == None) :
started = True
else :
started = False
for i in range(len(X1)) :
if (not started and not((X1[i],Y1[i],X2[i],Y2[i]) == (init_x1,init_y1,init_x2,init_y2))) :
continue
elif (not started) :
started = True
continue
yield (X1[i],Y1[i],X2[i],Y2[i])
## pieces: Int Int (listof Int) (listof Int) (listof Int) (listof Int) -> Int
##
## Purpose: pieces takes the radius of a circle, N is the number of lines dividing
## the circle and the four lists correspond to the endpoints of the lines
## It produces the number of segments the lines divide the circle into.
##
## Effects:
##
## Examples: pieces(10,3,[-15,1,10],[15,12,4],[15,-6,-10],[-15,-12,-8]) => 7
## pieces(10,3,[0,-11,-11],[11,3,-1],[0,11,11],[-11,3,7]) => 6
def pieces(R, N, X1, Y1, X2, Y2):
segments = 1
for l1 in point_range(X1,Y1,X2,Y2) :
segments += 1
intersections = {}
for l2 in point_range(X1,Y1,X2,Y2,(l1[0],l1[1],l1[2],l1[3])) :
inter = intersection(l1[0],l1[1],l1[2],l1[3],l2[0],l2[1],l2[2],l2[3])
if (inter == "parallel") :
continue
if inter in intersections :
continue
if ((inter[0]*inter[0])/(inter[1]*inter[1]) + (inter[2]*inter[2])/(inter[3]*inter[3]) >= R*R) :
continue
intersections[inter] = True
segments += 1
return segments
## Tests:
check.expect('Example 1',pieces(10,3,[-15,1,10],[15,12,4],[15,-6,-10],[-15,-12,-8]),7)
check.expect('Example 2',pieces(10,3,[0,-11,-11],[11,3,-1],[0,11,11],[-11,3,7]),6)
# Be sure to do lots more of your own testing!
| joelwilliamson/cs234 | a1/a01q2b.py | Python | gpl-2.0 | 3,280 |
# -*- coding: utf-8 -*-
__author__ = 'Eren Turkay <[email protected]>'
from scrapy import log
from scrapy.http import Request
from scrapy.exceptions import CloseSpider
from datetime import datetime
from . import GenericSozlukSpider
from ..items import Girdi
class ItusozlukBaslikSpider(GenericSozlukSpider):
name = 'itusozluk'
def __init__(self, **kwargs):
super(ItusozlukBaslikSpider, self).__init__(**kwargs)
self.allowed_domains = ['itusozluk.com']
def parse(self, response):
self.log("PARSING: %s" % response.request.url, level=log.INFO)
items_to_scrape = response.xpath('//*[@id="entry-list"]/li/article')
if len(items_to_scrape) == 0:
self.log("!!! No item to parse found. It may indicate a problem with HTML !!!",
level=log.ERROR)
raise CloseSpider('no_item_found')
for sel in items_to_scrape:
girdi_id = sel.xpath('./footer/div[@class="entrymenu"]/@data-info').extract()[0].split(',')[0]
baslik_id = response.xpath('//*[@id="canonical_url"]/@value').re(r'--(\d*)')[0]
baslik = response.xpath('//*[@id="title"]/a/text()').extract()[0]
date = sel.xpath('./footer/div[2]/time/a/text()').re(r'\d{2}[.]\d{2}[.]\d{4} \d{2}[:]\d{2}')[0]
text = sel.xpath('string(./div)').extract()[0]
nick = sel.css('a.yazarlink').xpath('text()').extract()[0]
item = Girdi()
item['source'] = self.name
item['baslik'] = baslik
item['girdi_id'] = girdi_id
item['baslik_id'] = baslik_id
item['datetime'] = datetime.strptime(date, '%d.%m.%Y %H:%M')
item['text'] = text
item['nick'] = nick
yield item
current_url = response.request.url.split('/sayfa')[0]
title_re = response.xpath('//title').re(r'sayfa (\d*)')
current_page = int(title_re[0]) if title_re else 1
page_count = int(response.xpath('//a[@rel="last"]')[0].xpath('text()').extract()[0])
next_page = current_page + 1
if page_count >= next_page:
# if current_page < 2:
yield Request('%s/sayfa/%s' % (current_url, next_page)) | eren/sozlukcrawler | sozlukcrawl/spiders/itusozluk.py | Python | gpl-2.0 | 2,234 |
import osclib.api
import osc.conf
import os
import os.path
import vcr
import pytest
import requests
TESTROOT = os.path.join(pytest.config.rootdir, "osclib-tests")
OSCRC = os.path.join(TESTROOT, "oscrc", "oscrc_test_api")
VCRROOT = os.path.join(TESTROOT, "fixtures", "vcr")
def test_default_api(monkeypatch):
"""
default_api() should return a valid Api instance based on oscrc
"""
monkeypatch.setenv("OSC_CONFIG", OSCRC)
api = osclib.api.default_api()
assert isinstance(api, osclib.api.Api)
assert api.apiurl == "https://obs.example.com"
assert api.username == "grace"
def test_new_api():
"""
new Api instances should properly read authentication info
from osc config, or accept it from arguments.
"""
osc.conf.get_config(override_conffile=OSCRC)
api = osclib.api.Api("https://obs.example.com")
assert api.username == "grace"
api = osclib.api.Api("https://obs.example.org")
assert api.username == "sally"
api = osclib.api.Api("https://notobs.example.org", username="deborah", password="estrin")
assert api.apiurl == "https://notobs.example.org"
assert api.username == "deborah"
assert api.password == "estrin"
with pytest.raises(Exception):
osclib.api.Api("https://notobs.example.org")
with pytest.raises(Exception):
osclib.api.Api("https://obs.example.com", password="onlypassword")
@vcr.use_cassette(os.path.join(VCRROOT, "test_request.yaml"), filter_headers=['authorization'])
def test_request():
"""
Let's download a thing from the api.
This test assumes that the user running it has a valid oscrc file
with entries for api.opensuse.org.
"""
# first clear out osc.conf settings
osc.conf.get_config()
# download a thing
api = osclib.api.Api("https://api.opensuse.org")
r = api.request(["source", "openSUSE:Factory", "osc"])
assert isinstance(r, requests.Response)
assert 'name="osc"' in r.text
# check that we get to handle bad status
r = api.request(["source", "openSUSE:Factory", "does not exist"])
assert not r.ok
# check that method, query and data is supported
r = api.request(["source", "openSUSE:Factory", "does not exist"],
method="POST",
query={"hello": "world"},
data="see a thing")
assert isinstance(r, requests.Response)
assert not r.ok
@vcr.use_cassette(os.path.join(VCRROOT, "test_request.yaml"), filter_headers=['authorization'])
def test_get_xml():
"""
get_xml() should return a xml document, or raise an exception
TODO maybe get_xml should always return xml document?
This test assumes that the user running it has a valid oscrc file
with entries for api.opensuse.org.
"""
# first clear out osc.conf settings
osc.conf.get_config()
api = osclib.api.Api("https://api.opensuse.org")
root = api.get_xml(["source", "openSUSE:Factory", "osc"])
assert root is not None
assert root.tag == "directory"
assert root.get("name") == "osc"
with pytest.raises(requests.exceptions.HTTPError):
root = api.get_xml(["source", "openSUSE:Factory", "does not exist"])
| matejcik/osc | osclib-tests/test_api.py | Python | gpl-2.0 | 3,172 |
r"""A proxy enabling multiple wiring guide instances to interact with the same
SpiNNaker boards.
A very simple protocol is used between the client and server. Clients may send
the following new-line delimited commands to the server:
* ``VERSION,[versionstring]\n`` The server will disconnect any client with an
incompatible version number reported for ``[versionstring]``. Returns
``OK\n``.
* ``LED,[c],[f],[b],[lednum],[state]\n`` Turn on or off the specified LED. Note
that the LED remains switched on while *any* client wants it to be on.
Returns ``OK\n``.
* ``TARGET,[c],[f],[b],[link]\n`` Discover what link is at the other end of the
supplied link. Returns ``[c],[f],[b],[link]\n`` or ``None\n`` if no link is
connected. Note that links are represented by their number, not their name.
"""
import traceback
import socket
import select
from collections import defaultdict
import logging
from six import iteritems
from spinner.version import __version__
from spinner.topology import Direction
DEFAULT_PORT = 6512
class ProxyError(Exception):
"""Exception raised when the proxy cannot connect."""
pass
class ProxyServer(object):
"""A proxy server enabling multiple wiring guide instances to interact with
the same SpiNNaker boards.
"""
def __init__(self, bmp_controller, wiring_probe,
hostname="", port=DEFAULT_PORT):
self.bmp_controller = bmp_controller
self.wiring_probe = wiring_probe
# Open a TCP socket
self.server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.server_sock.bind((hostname, port))
self.server_sock.listen(5)
self.client_socks = []
# A buffer for unprocessed data received from each client
self.client_buffer = {}
# For each LED, maintains a set of clients which have turned it on
self.led_setters = defaultdict(set)
def add_client(self, sock, addr):
"""Register a new client."""
logging.info("New connection {} from {}".format(sock, addr))
self.client_socks.append(sock)
# Create buffer for received data (and schedule its deletion upon
# disconnection)
self.client_buffer[sock] = b""
def remove_client(self, sock):
"""Disconnect and cleanup after a particular child."""
logging.info("Closing socket {}".format(sock))
# Remove buffer
self.client_buffer.pop(sock)
# Turn off any LEDs left on by the client
for (c, f, b, led), socks in iteritems(self.led_setters):
if sock in socks:
self.set_led(sock, c, f, b, led, False)
# Close socket
self.client_socks.remove(sock)
sock.close()
def set_led(self, sock, c, f, b, led, state):
"""Set the state of a diagnostic LED.
An LED is turned on if at least one client has turned it on. An LED is only
turned off if all clients which have turned the LED on have also turned it
off again.
"""
setters = self.led_setters[(c, f, b, led)]
cur_led_state = bool(setters)
if state:
setters.add(sock)
else:
setters.discard(sock)
new_led_state = bool(setters)
if cur_led_state != new_led_state:
self.bmp_controller.set_led(led, new_led_state, c, f, b)
def handle_version(self, sock, args):
"""Handle "VERSION" commands.
This command contains, as the argument, the SpiNNer version number of the
remote client. If the version of the client does not match the server, the
client is disconnected.
Arguments: vX.Y.Z
Returns: OK
"""
# Check for identical version
assert args.decode("ascii") == __version__
sock.send(b"OK\n")
def handle_led(self, sock, args):
"""Handle "LED" commands.
Set the state of a diagnostic LED on a board.
Arguments: c,f,b,led,state
Returns: OK
"""
c, f, b, led, state = map(int, args.split(b","))
self.set_led(sock, c, f, b, led, state)
sock.send(b"OK\n")
def handle_target(self, sock, args):
"""Handle "TARGET" commands.
Determine what is at the other end of a given link.
Arguments: c,f,b,d
Returns: c,f,b,d or None
"""
c, f, b, d = map(int, args.split(b","))
target = self.wiring_probe.get_link_target(c, f, b, d)
if target is None:
sock.send(b"None\n")
else:
sock.send("{},{},{},{}\n".format(*map(int, target)).encode("ascii"))
def process_data(self, sock, data):
"""Process data received from a socket."""
# Prepend any previously unprocessed data
data = self.client_buffer[sock] + data
# Handle any received commands. If a command fails (or is invalid) the
# connection is dropped.
try:
while b"\n" in data:
line, _, data = data.partition(b"\n")
logging.debug("Handling command {} from {}".format(line, sock))
cmd, _, args = line.partition(b",")
# If an unrecognised command arrives, this lookup will fail and get
# caught by the exception handler, printing an error and disconnecting
# the client.
{
b"VERSION": self.handle_version,
b"LED": self.handle_led,
b"TARGET": self.handle_target,
}[cmd](sock, args)
except Exception as e:
logging.exception(
"Disconnected client {} due to bad command (above)".format(sock))
self.remove_client(sock)
return
# Retain any remaining unprocessed data
self.client_buffer[sock] = data
def main(self):
logging.info("Starting proxy server...")
try:
while True:
ready, _1, _2 = select.select([self.server_sock] + self.client_socks, [], [])
for sock in ready:
if sock is self.server_sock:
# New client connected!
self.add_client(*self.server_sock.accept())
else:
# Data arrived from a client
try:
data = sock.recv(1024)
except (IOError, OSError) as exc:
logging.error(
"Socket {} failed to receive: {}".format(sock, exc))
# Cause socket to get closed
data = b"" # pragma: no branch
if len(data) == 0:
# Connection closed
self.remove_client(sock)
else:
self.process_data(sock, data)
except KeyboardInterrupt:
# Disconnect all clients (also cleans up LED states, etc.)
for sock in self.client_socks:
self.remove_client(sock)
logging.info("Proxy server terminated cleanly.")
class ProxyClient(object):
"""A client for the ProxyServer object defined above.
This object implements a BMPController-compatible ``set_led`` method and
WiringProbe compatible ``get_link_target`` method and thus may be substituted
for the above when these functions are all that are required, e.g. for the
InteractiveWiringGuide.
"""
def __init__(self, hostname, port=DEFAULT_PORT):
"""Connect to a running ProxyServer."""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((hostname, port))
# A receive buffer
self.buf = b""
# Check for protocol version compatibility.
self.check_version()
def recvline(self):
"""Wait for a full line to be received from the server."""
while b"\n" not in self.buf:
data = self.sock.recv(1024)
self.buf += data
if len(data) == 0:
raise ProxyError("Remote server closed the connection.")
line, _, self.buf = self.buf.partition(b"\n")
return line
def check_version(self):
"""Check that the remote server has a compatible protocol version."""
self.sock.send("VERSION,{}\n".format(__version__).encode("ascii"))
if self.recvline() != b"OK":
raise ProxyError("Remote server has incompatible protocol version")
def set_led(self, led, state, c, f, b):
"""Set the state of an LED on the remote machine."""
self.sock.send("LED,{},{},{},{},{}\n".format(
c, f, b, led, int(state)).encode("ascii"))
if self.recvline() != b"OK":
raise ProxyError("Got unexpected response to LED command.")
def get_link_target(self, c, f, b, d):
"""Discover the other end of a specified link on a remote machine."""
self.sock.send("TARGET,{},{},{},{}\n".format(
c, f, b, int(d)).encode("ascii"))
response = self.recvline()
if response == b"None":
return None
else:
try:
c, f, b, d = map(int, response.split(b","))
return (c, f, b, Direction(d))
except ValueError:
raise ProxyError("Got unexpected response to TARGET command.")
| SpiNNakerManchester/SpiNNer | spinner/proxy.py | Python | gpl-2.0 | 8,272 |
# Copyright (C) 2013-2015 Red Hat, Inc.
# Copyright (C) 2015 Thomas Spura
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnfpluginscore import _, logger
import dnf
import dnf.cli
import dnf.exceptions
import dnf.i18n
import dnf.subject
import dnfpluginscore
import hawkey
import itertools
import os
import shutil
import copy
import glob
import subprocess
import sys
class BuildLocal(dnf.Plugin):
name = 'buildlocal'
def __init__(self, base, cli):
super(BuildLocal, self).__init__(base, cli)
self.base = base
self.cli = cli
if self.cli is not None:
self.cli.register_command(BuildLocalCommand)
#class BuildLocalCommand(dnf.cli.Command):
class BuildLocalCommand(dnf.cli.Command):
aliases = ['buildlocal']
summary = _('Locally build and install package')
usage = _('PACKAGE...')
def __init__(self, cli):
super(BuildLocalCommand, self).__init__(cli)
self.opts = None
self.parser = None
def configure(self, args):
# setup sack and populate it with enabled repos
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
def run(self, args):
"""Execute the util action here."""
# Setup ArgumentParser to handle util
# You must only add options not used by dnf already
self.parser = dnfpluginscore.ArgumentParser(self.aliases[0])
self.parser.add_argument('packages', nargs='+',
help=_('packages to install'))
self.parser.add_argument("--source", #action='store_true',
help=_('download the src.rpm instead'))
self.parser.add_argument(
'--destdir',
help=_('download path, default is current dir'))
self.parser.add_argument(
'--resolve', action='store_true',
help=_('resolve and download needed dependencies'))
# parse the options/args
# list available options/args on errors & exit
self.opts = self.parser.parse_args(args)
# show util help & exit
if self.opts.help_cmd:
print(self.parser.format_help())
return
locations = self._download_source(self.opts.packages)
pkgs = copy.deepcopy(self.opts.packages)
for loc in locations:
ret = subprocess.call(["mock", loc])
if ret != 0:
print("Building failed. Aborting...")
sys.exit(1)
# Install possible packages before continuing
g = glob.glob("/var/lib/mock/fedora-*/result/*[0-9]*.rpm")
print("FILES IN GLOB", g)
# save to cache
# inspect what to install and collect full list while shrinking pkgs
print(locations)
assert len(pkgs) == 0, "Not all packages were correctly installed %s"% pkgs
# do the install
return
if self.opts.source:
locations = self._download_source(self.opts.packages)
else:
locations = self._download_rpms(self.opts.packages)
if self.opts.destdir:
dest = self.opts.destdir
else:
dest = dnf.i18n.ucd(os.getcwd())
self._move_packages(dest, locations)
def _download_rpms(self, pkg_specs):
"""Download packages to dnf cache."""
if self.opts.resolve:
pkgs = self._get_packages_with_deps(pkg_specs)
else:
pkgs = self._get_packages(pkg_specs)
self.base.download_packages(pkgs, self.base.output.progress)
locations = sorted([pkg.localPkg() for pkg in pkgs])
return locations
def _download_source(self, pkg_specs):
"""Download source packages to dnf cache."""
pkgs = self._get_packages(pkg_specs)
source_pkgs = self._get_source_packages(pkgs)
self._enable_source_repos()
pkgs = self._get_packages(source_pkgs, source=True)
self.base.download_packages(pkgs, self.base.output.progress)
locations = sorted([pkg.localPkg() for pkg in pkgs])
return locations
def _get_packages(self, pkg_specs, source=False):
"""Get packages matching pkg_specs."""
if source:
queries = map(self._get_query_source, pkg_specs)
else:
queries = map(self._get_query, pkg_specs)
pkgs = list(itertools.chain(*queries))
return pkgs
def _get_packages_with_deps(self, pkg_specs, source=False):
"""Get packages matching pkg_specs and the deps."""
pkgs = self._get_packages(pkg_specs)
goal = hawkey.Goal(self.base.sack)
for pkg in pkgs:
goal.install(pkg)
rc = goal.run()
if rc:
pkgs = goal.list_installs()
return pkgs
else:
logger.debug(_('Error in resolve'))
return []
@staticmethod
def _get_source_packages(pkgs):
"""Get list of source rpm names for a list of packages."""
source_pkgs = set()
for pkg in pkgs:
if pkg.sourcerpm:
source_pkgs.add(pkg.sourcerpm)
logger.debug(' --> Package : %s Source : %s',
str(pkg), pkg.sourcerpm)
elif pkg.arch == 'src':
source_pkgs.add("%s-%s.src.rpm" % (pkg.name, pkg.evr))
else:
logger.info(_("No source rpm defined for %s"), str(pkg))
return list(source_pkgs)
def _enable_source_repos(self):
"""Enable source repositories for enabled binary repositories.
Don't disable the binary ones because they can contain SRPMs as well
(this applies to COPR and to user-managed repos).
The dnf sack will be reloaded.
"""
# enable the source repos
for repo in self.base.repos.iter_enabled():
source_repo_id = '%s-source' % repo.id
if source_repo_id in self.base.repos:
source_repo = self.base.repos[source_repo_id]
logger.info(_('enabled %s repository'), source_repo.id)
source_repo.enable()
# reload the sack
self.base.fill_sack()
def _get_query(self, pkg_spec):
"""Return a query to match a pkg_spec."""
subj = dnf.subject.Subject(pkg_spec)
q = subj.get_best_query(self.base.sack)
q = q.available()
q = q.latest()
return q
def _get_query_source(self, pkg_spec):
""""Return a query to match a source rpm file name."""
pkg_spec = pkg_spec[:-4] # skip the .rpm
nevra = hawkey.split_nevra(pkg_spec)
q = self.base.sack.query()
q = q.available()
q = q.latest()
q = q.filter(name=nevra.name, version=nevra.version,
release=nevra.release, arch=nevra.arch)
return q
@staticmethod
def _move_packages(target, locations):
"""Move the downloaded package to target."""
if not os.path.exists(target):
os.makedirs(target)
for pkg in locations:
shutil.copy(pkg, target)
os.unlink(pkg)
return target
| tomspur/dnf-plugins-buildlocal | buildlocal.py | Python | gpl-2.0 | 8,142 |
from testscenarios import TestWithScenarios
import unittest
from geocode.geocode import GeoCodeAccessAPI
class GeoCodeTests(TestWithScenarios, unittest.TestCase):
scenarios = [
(
"Scenario - 1: Get latlng from address",
{
'address': "Sydney NSW",
'latlng': (-33.8674869, 151.2069902),
'method': "geocode",
}
),
(
"Scenario - 2: Get address from latlng",
{
'address': "Sydney NSW",
'latlng': (-33.8674869, 151.2069902),
'method': "address",
}
),
]
def setUp(self):
self.api = GeoCodeAccessAPI()
def test_geocode(self):
if self.method == 'geocode':
expected_address = self.address
expected_lat = self.latlng[0]
expected_lng = self.latlng[1]
geocode = self.api.get_geocode(expected_address)
self.assertAlmostEqual(geocode.lat, expected_lat, delta=5)
self.assertAlmostEqual(geocode.lng, expected_lng, delta=5)
self.assertIn(expected_address, geocode.address)
else:
expected_address = self.address
expected_lat = self.latlng[0]
expected_lng = self.latlng[1]
address = self.api.get_address(lat=expected_lat, lng=expected_lng)
self.assertIn(expected_address, address)
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
| saleem-latif/GeoCode | tests/unittest_geocode.py | Python | gpl-2.0 | 1,541 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
package/module TEST
Descripción del test.
Autor: PABLO PIZARRO @ github.com/ppizarror
Fecha: AGOSTO 2016
Licencia: GPLv2
"""
__author__ = "ppizarror"
# Importación de librerías
# noinspection PyUnresolvedReferences
from _testpath import * # @UnusedWildImport
import unittest
# Constantes de los test
DISABLE_HEAVY_TESTS = True
DISABLE_HEAVY_TESTS_MSG = "Se desactivaron los tests pesados"
VERBOSE = False
# Se cargan argumentos desde la consola
if __name__ == '__main__':
from bin.arguments import argument_parser_factory
argparser = argument_parser_factory("Template Test", verbose=True, version=True,
enable_skipped_test=True).parse_args()
DISABLE_HEAVY_TESTS = argparser.enableHeavyTest
VERBOSE = argparser.verbose
# Clase UnitTest
class ModuleTest(unittest.TestCase):
def setUp(self):
"""
Inicio de los test.
:return: void
:rtype: None
"""
pass
# noinspection PyMethodMayBeStatic
def testA(self):
"""
Ejemplo de test.
:return: void
:rtype: None
"""
pass
@unittest.skipIf(DISABLE_HEAVY_TESTS, DISABLE_HEAVY_TESTS_MSG)
def testSkipped(self):
"""
Ejemplo de test saltado.
:return: void
:rtype: None
"""
pass
# Main test
if __name__ == '__main__':
runner = unittest.TextTestRunner()
itersuite = unittest.TestLoader().loadTestsFromTestCase(ModuleTest)
runner.run(itersuite)
| ppizarror/korektor | test/_template.py | Python | gpl-2.0 | 1,642 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import serial
gSerialName = '/dev/ttyS0'
gBaudrate = 9600
gTimeout = 0
gRequestByte = 1
if __name__ == "__main__":
ser = serial.Serial(
port = gSerialName,
baudrate = gBaudrate,
bytesize = serial.EIGHTBITS,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
timeout = gTimeout,
xonxoff = False,
rtscts = False,
writeTimeout = None,
dsrdtr = False,
interCharTimeout = None)
print 'wating for message... ',
print ser.portstr + ',',
print str(ser.timeout) + ',',
print ser.baudrate
while True:
r = ser.read(gRequestByte)
if 0 != len(r):
print repr(r)
print
ser.close()
| yougukepp/openwrt | radio_tools/radio_tools/cli/serialServer.py | Python | gpl-2.0 | 822 |
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vendula Poncova <[email protected]>
#
import unittest
from blivet.devices import DiskDevice
from blivet.formats import get_format
from blivet.size import Size
from pyanaconda.modules.common.constants.objects import DISK_SELECTION
from pyanaconda.modules.common.errors.storage import UnavailableStorageError
from pyanaconda.modules.common.structures.validation import ValidationReport
from pyanaconda.modules.storage.disk_selection import DiskSelectionModule
from pyanaconda.modules.storage.disk_selection.selection_interface import DiskSelectionInterface
from pyanaconda.storage.initialization import create_storage
from tests.nosetests.pyanaconda_tests import check_dbus_property
class DiskSelectionInterfaceTestCase(unittest.TestCase):
"""Test DBus interface of the disk selection module."""
def setUp(self):
"""Set up the module."""
self.disk_selection_module = DiskSelectionModule()
self.disk_selection_interface = DiskSelectionInterface(self.disk_selection_module)
def _test_dbus_property(self, *args, **kwargs):
check_dbus_property(
self,
DISK_SELECTION,
self.disk_selection_interface,
*args, **kwargs
)
def selected_disks_property_test(self):
"""Test the selected disks property."""
self._test_dbus_property(
"SelectedDisks",
["sda", "sdb"]
)
def validate_selected_disks_test(self):
"""Test ValidateSelectedDisks."""
storage = create_storage()
self.disk_selection_module.on_storage_changed(storage)
dev1 = DiskDevice(
"dev1",
exists=False,
size=Size("15 GiB"),
fmt=get_format("disklabel")
)
dev2 = DiskDevice(
"dev2",
exists=False,
parents=[dev1],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
dev3 = DiskDevice(
"dev3",
exists=False,
parents=[dev2],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
storage.devicetree._add_device(dev1)
storage.devicetree._add_device(dev2)
storage.devicetree._add_device(dev3)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks([])
)
self.assertEqual(report.is_valid(), True)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also use "
"unselected disks dev2, dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set.",
"You selected disk dev2, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2", "dev3"])
)
self.assertEqual(report.is_valid(), True)
def exclusive_disks_property_test(self):
"""Test the exclusive disks property."""
self._test_dbus_property(
"ExclusiveDisks",
["sda", "sdb"]
)
def ignored_disks_property_test(self):
"""Test the ignored disks property."""
self._test_dbus_property(
"IgnoredDisks",
["sda", "sdb"]
)
def protected_disks_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"ProtectedDevices",
["sda", "sdb"]
)
def disk_images_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"DiskImages",
{
"image_1": "/path/1",
"image_2": "/path/2"
}
)
def get_usable_disks_test(self):
"""Test the GetUsableDisks method."""
with self.assertRaises(UnavailableStorageError):
self.disk_selection_interface.GetUsableDisks()
self.disk_selection_module.on_storage_changed(create_storage())
self.assertEqual(self.disk_selection_interface.GetUsableDisks(), [])
| atodorov/anaconda | tests/nosetests/pyanaconda_tests/module_disk_select_test.py | Python | gpl-2.0 | 5,967 |
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This is the Create_Modify_Interface function (along with its helpers).
It is used by WebSubmit for the "Modify Bibliographic Information" action.
"""
__revision__ = "$Id$"
import os
import re
import time
import pprint
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionError
from invenio.websubmit_functions.Retrieve_Data import Get_Field
from invenio.errorlib import register_exception
def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""):
"""Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory.
Delete the text file after having read-in its value.
This function is called on the reload of the modify-record page. This way, the field in question
can be populated with the value last entered by the user (before reload), instead of always being
populated with the value still found in the DB.
"""
fld_val = ""
if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK):
fp = open( "%s/%s" % (cur_dir, fld), "r" )
fld_val = fp.read()
fp.close()
try:
os.unlink("%s/%s"%(cur_dir, fld))
except OSError:
# Cannot unlink file - ignore, let WebSubmit main handle this
pass
fld_val = fld_val.strip()
return fld_val
def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid):
"""Read a field's value from the record stored in the DB.
This function is called when the Create_Modify_Interface function is called for the first time
when modifying a given record, and field values must be retrieved from the database.
"""
fld_val = ""
if fieldcode != "":
for next_field_code in [x.strip() for x in fieldcode.split(",")]:
fld_val += "%s\n" % Get_Field(next_field_code, recid)
fld_val = fld_val.rstrip('\n')
return fld_val
def Create_Modify_Interface_transform_date(fld_val):
"""Accept a field's value as a string. If the value is a date in one of the following formats:
DD Mon YYYY (e.g. 23 Apr 2005)
YYYY-MM-DD (e.g. 2005-04-23)
...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005).
"""
if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y"))
except (ValueError, TypeError):
# bad date format:
pass
elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d"))
except (ValueError,TypeError):
# bad date format:
pass
return fld_val
def Create_Modify_Interface(parameters, curdir, form, user_info=None):
"""
Create an interface for the modification of a document, based on
the fields that the user has chosen to modify. This avoids having
to redefine a submission page for the modifications, but rely on
the elements already defined for the initial submission i.e. SBI
action (The only page that needs to be built for the modification
is the page letting the user specify a document to modify).
This function should be added at step 1 of your modification
workflow, after the functions that retrieves report number and
record id (Get_Report_Number, Get_Recid). Functions at step 2 are
the one executed upon successful submission of the form.
Create_Modify_Interface expects the following parameters:
* "fieldnameMBI" - the name of a text file in the submission
working directory that contains a list of the names of the
WebSubmit fields to include in the Modification interface.
These field names are separated by"\n" or "+".
Given the list of WebSubmit fields to be included in the
modification interface, the values for each field are retrieved
for the given record (by way of each WebSubmit field being
configured with a MARC Code in the WebSubmit database). An HTML
FORM is then created. This form allows a user to modify certain
field values for a record.
The file referenced by 'fieldnameMBI' is usually generated from a
multiple select form field): users can then select one or several
fields to modify
Note that the function will display WebSubmit Response elements,
but will not be able to set an initial value: this must be done by
the Response element iteself.
Additionally the function creates an internal field named
'Create_Modify_Interface_DONE' on the interface, that can be
retrieved in curdir after the form has been submitted.
This flag is an indicator for the function that displayed values
should not be retrieved from the database, but from the submitted
values (in case the page is reloaded). You can also rely on this
value when building your WebSubmit Response element in order to
retrieve value either from the record, or from the submission
directory.
"""
global sysno,rn
t = ""
# variables declaration
fieldname = parameters['fieldnameMBI']
# Path of file containing fields to modify
the_globals = {
'doctype' : doctype,
'action' : action,
'act' : action, ## for backward compatibility
'step' : step,
'access' : access,
'ln' : ln,
'curdir' : curdir,
'uid' : user_info['uid'],
'uid_email' : user_info['email'],
'rn' : rn,
'last_step' : last_step,
'action_score' : action_score,
'__websubmit_in_jail__' : True,
'form': form,
'sysno': sysno,
'user_info' : user_info,
'__builtins__' : globals()['__builtins__'],
'Request_Print': Request_Print
}
if os.path.exists("%s/%s" % (curdir, fieldname)):
fp = open( "%s/%s" % (curdir, fieldname), "r" )
fieldstext = fp.read()
fp.close()
fieldstext = re.sub("\+","\n", fieldstext)
fields = fieldstext.split("\n")
else:
res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,))
if len(res) == 1:
fields = res[0][0].replace(" ", "")
fields = re.findall("<optionvalue=.*>", fields)
regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""")
fields = [regexp.search(x) for x in fields]
fields = [x.group("value") for x in fields if x is not None]
fields = [x for x in fields if x not in ("Select", "select")]
else:
raise InvenioWebSubmitFunctionError("cannot find fields to modify")
#output some text
t = t+"<CENTER bgcolor=\"white\">The document <B>%s</B> has been found in the database.</CENTER><br />Please modify the following fields:<br />Then press the 'END' button at the bottom of the page<br />\n" % rn
for field in fields:
subfield = ""
value = ""
marccode = ""
text = ""
# retrieve and display the modification text
t = t + "<FONT color=\"darkblue\">\n"
res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res)>0:
t = t + "<small>%s</small> </FONT>\n" % res[0][0]
# retrieve the marc code associated with the field
res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
marccode = res[0][0]
# then retrieve the previous value of the field
if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")):
# Page has been reloaded - get field value from text file on server, not from DB record
value = Create_Modify_Interface_getfieldval_fromfile(curdir, field)
else:
# First call to page - get field value from DB record
value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno)
# If field is a date value, transform date into format DD/MM/YYYY:
value = Create_Modify_Interface_transform_date(value)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
element_type = res[0][3]
numcols = res[0][6]
numrows = res[0][5]
size = res[0][4]
maxlength = res[0][7]
val = res[0][8]
fidesc = res[0][9]
if element_type == "T":
text = "<TEXTAREA name=\"%s\" rows=%s cols=%s wrap>%s</TEXTAREA>" % (field, numrows, numcols, value)
elif element_type == "F":
text = "<INPUT TYPE=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength)
elif element_type == "I":
value = re.sub("[\n\r\t]+", "", value)
text = "<INPUT name=\"%s\" size=%s value=\"%s\"> " % (field, size, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "H":
text = "<INPUT type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "S":
values = re.split("[\n\r]+", value)
text = fidesc
if re.search("%s\[\]" % field, fidesc):
multipletext = "[]"
else:
multipletext = ""
if len(values) > 0 and not(len(values) == 1 and values[0] == ""):
text += "<SCRIPT>\n"
text += "var i = 0;\n"
text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext)
text += "max = el.length;\n"
for val in values:
text += "var found = 0;\n"
text += "var i=0;\n"
text += "while (i != max) {\n"
text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % (val, val)
text += " el.options[i].selected = true;\n"
text += " found = 1;\n"
text += " }\n"
text += " i=i+1;\n"
text += "}\n"
#text += "if (found == 0) {\n"
#text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n"
#text += "}\n"
text += "</SCRIPT>\n"
elif element_type == "D":
text = fidesc
elif element_type == "R":
try:
co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec")
## Note this exec is safe WRT global variable because the
## Create_Modify_Interface has already been parsed by
## execfile within a protected environment.
the_globals['text'] = ''
exec co in the_globals
text = the_globals['text']
except:
msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals()))
register_exception(req=None, alert_admin=True, prefix=msg)
raise InvenioWebSubmitFunctionError(msg)
else:
text = "%s: unknown field type" % field
t = t + "<small>%s</small>" % text
# output our flag field
t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />'
# output some more text
t = t + "<br /><br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"END\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>"
return t
| pombredanne/invenio | modules/websubmit/lib/functions/Create_Modify_Interface.py | Python | gpl-2.0 | 12,904 |
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
receiveRecordEvents = False
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
recordings = NavigationInstance.instance.getRecordings()
if not recordings: # no more recordings exist
rec_time = NavigationInstance.instance.RecordTimer.getNextRecordingTime()
if rec_time > 0 and (rec_time - time()) < 360:
print "another recording starts in", rec_time - time(), "seconds... do not shutdown yet"
else:
print "no starting records in the next 360 seconds... immediate shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop(default_yes = True):
if not RecordTimerEntry.receiveRecordEvents:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
# send normal notification for the case the user leave the standby now..
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1, onSessionOpenCallback=RecordTimerEntry.stopTryQuitMainloop, default_yes = default_yes)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.log_entries = []
self.resetState()
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
begin_shortdate = strftime("%Y%m%d", localtime(self.begin))
print "begin_date: ", begin_date
print "service_name: ", service_name
print "name:", self.name
print "description: ", self.description
filename = begin_date + " - " + service_name
if self.name:
if config.usage.setup_level.index >= 2: # expert+
if config.recording.filename_composition.value == "short":
filename = begin_shortdate + " - " + self.name
elif config.recording.filename_composition.value == "long":
filename += " - " + self.name + " - " + self.description
else:
filename += " - " + self.name # standard
else:
filename += " - " + self.name
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname or not Directories.fileExists(self.dirname, 'w'):
if self.dirname:
self.dirnameHadToFallback = True
dirname = defaultMoviePath()
else:
dirname = self.dirname
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
#begin_date + " - " + service_name + description)
def tryPrepare(self):
if self.justplay:
return True
else:
self.calculateFilename()
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
return False
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
self.description = evt.getShortDescription()
if self.description == "":
description = evt.getExtendedDescription()
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + ".ts", self.begin, self.end, event_id, self.name.replace("\n", ""), self.description.replace("\n", ""), ' '.join(self.tags))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + ".ts", "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle()
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if self.first_try_prepare:
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
self.log(11, "wakeup and zap")
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
#wakeup standby
Screens.Standby.inStandby.Power()
else:
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
record_res = self.record_service.start()
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
return True
elif next_state == self.StateEnded:
old_end = self.end
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if self.afterEvent == AFTEREVENT.STANDBY:
if not Screens.Standby.inStandby: # not already in standby
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nBox to standby. Do that now?"), timeout = 20)
elif self.afterEvent == AFTEREVENT.DEEPSTANDBY:
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
RecordTimerEntry.TryQuitMainloop() # start shutdown handling without screen
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour Box. Shutdown now?"), timeout = 20)
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin -30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
new_end = simulTimerList[1].begin
del simulTimerList
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
del dummyentry
if new_end <= time():
return False
self.end = new_end
return True
def sendStandbyNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
if answer == True:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
if config.usage.show_message_when_recording_starts.value:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 8)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit);
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecording(self):
isRunning = False
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
isRunning = True
return isRunning
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename, "w")
for x in list:
file.write(x)
file.close()
def getNextZapTime(self):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now:
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): #wird von loadTimer mit dosave=False aufgerufen
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if ignoreTSC != True:
print "timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInTimer(self, eventid, begin, duration, service):
time_match = 0
chktime = None
chktimecmp = None
chktimecmp_end = None
end = begin + duration
refstr = str(service)
for x in self.timer_list:
check = x.service_ref.ref.toString() == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid: # check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
if x.repeated != 0:
if chktime is None:
chktime = localtime(begin)
chktimecmp = chktime.tm_wday * 1440 + chktime.tm_hour * 60 + chktime.tm_min
chktimecmp_end = chktimecmp + (duration / 60)
time = localtime(x.begin)
for y in (0, 1, 2, 3, 4, 5, 6):
if x.repeated & (1 << y) and (x.begin <= begin or begin <= x.begin <= end):
timecmp = y * 1440 + time.tm_hour * 60 + time.tm_min
if timecmp <= chktimecmp < (timecmp + ((x.end - x.begin) / 60)):
time_match = ((timecmp + ((x.end - x.begin) / 60)) - chktimecmp) * 60
elif chktimecmp <= timecmp < chktimecmp_end:
time_match = (chktimecmp_end - timecmp) * 60
else: #if x.eit is None:
if begin <= x.begin <= end:
diff = end - x.begin
if time_match < diff:
time_match = diff
elif x.begin <= begin <= x.end:
diff = x.end - begin
if time_match < diff:
time_match = diff
if time_match:
break
return time_match
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
| openpli-arm/enigma2-arm | RecordTimer.py | Python | gpl-2.0 | 26,731 |
import unittest
import mock
import blivet
from pykickstart.constants import CLEARPART_TYPE_ALL, CLEARPART_TYPE_LINUX, CLEARPART_TYPE_NONE
from parted import PARTITION_NORMAL
from blivet.flags import flags
DEVICE_CLASSES = [
blivet.devices.DiskDevice,
blivet.devices.PartitionDevice
]
@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
class ClearPartTestCase(unittest.TestCase):
def setUp(self):
flags.testing = True
def test_should_clear(self):
""" Test the Blivet.should_clear method. """
b = blivet.Blivet()
DiskDevice = blivet.devices.DiskDevice
PartitionDevice = blivet.devices.PartitionDevice
# sda is a disk with an existing disklabel containing two partitions
sda = DiskDevice("sda", size=100000, exists=True)
sda.format = blivet.formats.get_format("disklabel", device=sda.path,
exists=True)
sda.format._parted_disk = mock.Mock()
sda.format._parted_device = mock.Mock()
sda.format._parted_disk.configure_mock(partitions=[])
b.devicetree._add_device(sda)
# sda1 is a partition containing an existing ext4 filesystem
sda1 = PartitionDevice("sda1", size=500, exists=True,
parents=[sda])
sda1._parted_partition = mock.Mock(**{'type': PARTITION_NORMAL,
'getFlag.return_value': 0})
sda1.format = blivet.formats.get_format("ext4", mountpoint="/boot",
device=sda1.path,
exists=True)
b.devicetree._add_device(sda1)
# sda2 is a partition containing an existing vfat filesystem
sda2 = PartitionDevice("sda2", size=10000, exists=True,
parents=[sda])
sda2._parted_partition = mock.Mock(**{'type': PARTITION_NORMAL,
'getFlag.return_value': 0})
sda2.format = blivet.formats.get_format("vfat", mountpoint="/foo",
device=sda2.path,
exists=True)
b.devicetree._add_device(sda2)
# sdb is an unpartitioned disk containing an xfs filesystem
sdb = DiskDevice("sdb", size=100000, exists=True)
sdb.format = blivet.formats.get_format("xfs", device=sdb.path,
exists=True)
b.devicetree._add_device(sdb)
# sdc is an unformatted/uninitialized/empty disk
sdc = DiskDevice("sdc", size=100000, exists=True)
b.devicetree._add_device(sdc)
# sdd is a disk containing an existing disklabel with no partitions
sdd = DiskDevice("sdd", size=100000, exists=True)
sdd.format = blivet.formats.get_format("disklabel", device=sdd.path,
exists=True)
b.devicetree._add_device(sdd)
#
# clearpart type none
#
b.config.clear_part_type = CLEARPART_TYPE_NONE
self.assertFalse(b.should_clear(sda1),
msg="type none should not clear any partitions")
self.assertFalse(b.should_clear(sda2),
msg="type none should not clear any partitions")
b.config.initialize_disks = False
self.assertFalse(b.should_clear(sda),
msg="type none should not clear non-empty disks")
self.assertFalse(b.should_clear(sdb),
msg="type none should not clear formatting from "
"unpartitioned disks")
self.assertFalse(b.should_clear(sdc),
msg="type none should not clear empty disk without "
"initlabel")
self.assertFalse(b.should_clear(sdd),
msg="type none should not clear empty partition table "
"without initlabel")
b.config.initialize_disks = True
self.assertFalse(b.should_clear(sda),
msg="type none should not clear non-empty disks even "
"with initlabel")
self.assertFalse(b.should_clear(sdb),
msg="type non should not clear formatting from "
"unpartitioned disks even with initlabel")
self.assertTrue(b.should_clear(sdc),
msg="type none should clear empty disks when initlabel "
"is set")
self.assertTrue(b.should_clear(sdd),
msg="type none should clear empty partition table when "
"initlabel is set")
#
# clearpart type linux
#
b.config.clear_part_type = CLEARPART_TYPE_LINUX
self.assertTrue(b.should_clear(sda1),
msg="type linux should clear partitions containing "
"ext4 filesystems")
self.assertFalse(b.should_clear(sda2),
msg="type linux should not clear partitions "
"containing vfat filesystems")
b.config.initialize_disks = False
self.assertFalse(b.should_clear(sda),
msg="type linux should not clear non-empty disklabels")
self.assertTrue(b.should_clear(sdb),
msg="type linux should clear linux-native whole-disk "
"formatting regardless of initlabel setting")
self.assertFalse(b.should_clear(sdc),
msg="type linux should not clear unformatted disks "
"unless initlabel is set")
self.assertFalse(b.should_clear(sdd),
msg="type linux should not clear disks with empty "
"partition tables unless initlabel is set")
b.config.initialize_disks = True
self.assertFalse(b.should_clear(sda),
msg="type linux should not clear non-empty disklabels")
self.assertTrue(b.should_clear(sdb),
msg="type linux should clear linux-native whole-disk "
"formatting regardless of initlabel setting")
self.assertTrue(b.should_clear(sdc),
msg="type linux should clear unformatted disks when "
"initlabel is set")
self.assertTrue(b.should_clear(sdd),
msg="type linux should clear disks with empty "
"partition tables when initlabel is set")
sda1.protected = True
self.assertFalse(b.should_clear(sda1),
msg="protected devices should never be cleared")
self.assertFalse(b.should_clear(sda),
msg="disks containing protected devices should never "
"be cleared")
sda1.protected = False
#
# clearpart type all
#
b.config.clear_part_type = CLEARPART_TYPE_ALL
self.assertTrue(b.should_clear(sda1),
msg="type all should clear all partitions")
self.assertTrue(b.should_clear(sda2),
msg="type all should clear all partitions")
b.config.initialize_disks = False
self.assertTrue(b.should_clear(sda),
msg="type all should initialize all disks")
self.assertTrue(b.should_clear(sdb),
msg="type all should initialize all disks")
self.assertTrue(b.should_clear(sdc),
msg="type all should initialize all disks")
self.assertTrue(b.should_clear(sdd),
msg="type all should initialize all disks")
b.config.initialize_disks = True
self.assertTrue(b.should_clear(sda),
msg="type all should initialize all disks")
self.assertTrue(b.should_clear(sdb),
msg="type all should initialize all disks")
self.assertTrue(b.should_clear(sdc),
msg="type all should initialize all disks")
self.assertTrue(b.should_clear(sdd),
msg="type all should initialize all disks")
sda1.protected = True
self.assertFalse(b.should_clear(sda1),
msg="protected devices should never be cleared")
self.assertFalse(b.should_clear(sda),
msg="disks containing protected devices should never "
"be cleared")
sda1.protected = False
#
# clearpart type list
#
# TODO
def tearDown(self):
flags.testing = False
def test_initialize_disk(self):
"""
magic partitions
non-empty partition table
"""
pass
def test_recursive_remove(self):
"""
protected device at various points in stack
"""
pass
| atodorov/blivet | tests/clearpart_test.py | Python | gpl-2.0 | 9,228 |
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <[email protected]>
#
# Entry-type module for 'u-boot-nodtb.bin'
#
from entry import Entry
from blob import Entry_blob
class Entry_u_boot_spl_nodtb(Entry_blob):
def __init__(self, image, etype, node):
Entry_blob.__init__(self, image, etype, node)
def GetDefaultFilename(self):
return 'spl/u-boot-spl-nodtb.bin'
| ev3dev/u-boot | tools/binman/etype/u_boot_spl_nodtb.py | Python | gpl-2.0 | 431 |
# Working Unit Test Benches for Network Simulator
# Last Revised: 14 November 2015 by Sushant Sundaresh & Sith Domrongkitchaiporn
'''
IMPORTANT: Please turn off logging (MEASUREMENT_ENABLE = False) in constants.py
before running these testbenches.
'''
# Unit Testing Framework
import unittest
# Test Modules
import reporter, node, host, link, router
import flow, event_simulator, event, events
import link, link_buffer, packet
import constants
from static_flow_test_node import *
import visualize
class testMeasurementAnalysis (unittest.TestCase):
'''
Tests visualize.py time-averaging function
'''
def test_time_averaging (self):
self.assertTrue(visualize.test_windowed_time_average())
class TestStaticDataSinkFlow (unittest.TestCase):
'''
### Might break for dynamic TCP ###
if this is implemented on receiver side as well
Create Flow Data Sink
Create Static_Data_Sink_Test_Node
Tell Flow its number or expected packets
Create Event Simulator
For now:
Ask flow to receive a packet, check that Ack has same packet ID
Ask flow to receive the same packet again, should get same result.
'''
sim = "" # event simulator
f = "" # flow, data source, static
n = "" # test node
def setUp (self):
self.f = flow.Data_Sink("f1sink","h2","h1",\
3*constants.DATA_PACKET_BITWIDTH, 1.0)
self.n = Static_Data_Sink_Test_Node ("h2","f1sink")
self.sim = event_simulator.Event_Simulator({"f1sink":self.f,"h2":self.n})
self.f.set_flow_size(2)
def test_basic_ack (self):
packets = [ packet.Packet("f1source","h1","h2","",0,0), \
packet.Packet("f1source","h1","h2","",1,0)]
self.n.receive(packets[0])
self.assertEqual(self.n.head_of_tx_buff(),0)
self.n.receive(packets[1])
self.assertEqual(self.n.head_of_tx_buff(),1)
# Two packets received, two packets acknowledged
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
# Repeated packets just get repeated acks
self.n.receive(packets[1])
self.assertEqual(self.n.head_of_tx_buff(),1)
class TestStaticDataSourceFlow (unittest.TestCase):
'''
### Will break for dynamic TCP ###
Assumes Flow (Data Source) Window Size
hard-coded to 2
Create Flow Data Source
Create Static_Data_Source_Test_Node
Create Event Simulator
Start Flow -> pokes tcp -> sends two packets to Node
Check that these were sent to Node
Fake Acks through Node to Flow
Check that this updates Node Tx_Buffer (more sends from Flow)
Check what Timeout Does
'''
sim = "" # event simulator
f = "" # flow, data source, static
n = "" # test node
def setUp (self):
self.f = flow.Data_Source("f1","h1","h2",\
3*constants.DATA_PACKET_BITWIDTH, 1.0)
self.n = Static_Data_Source_Test_Node ("h1","f1")
self.sim = event_simulator.Event_Simulator({"f1":self.f,"h1":self.n})
def test_static_flow_source (self):
# The first static flow source implementation
# just has packets/acks have the same id.
# There is no chance of 'duplicate acks' to indicate loss
self.f.start() # do this manually so don't have to run simulator
self.assertEqual(self.n.head_of_tx_buff(),0)
packet1 = self.n.tx_buff[0]
self.assertEqual(self.n.head_of_tx_buff(),1)
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
self.n.receive(packet.Packet("","h2","h1",\
constants.DATA_PACKET_ACKNOWLEDGEMENT_TYPE,\
0,constants.DATA_ACK_BITWIDTH))
self.assertEqual(self.n.head_of_tx_buff(),2)
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
self.f.time_out(packet1)
# check that next packet has id 1
self.assertEqual(self.n.head_of_tx_buff(),1)
class TestLinkTransmissionEvents(unittest.TestCase):
sim = "" # simulator
link = "" # link
lNode = "" # left node
rNode = "" # right node
lPs = [] # left packets
rPs = [] # right packets
# Create Event Simulator
# Create Link & Nodes (not Hosts, so don't need working Flow) on either side
# Create three packets from either side, to the other, and send them.
def setUp (self):
self.lNode = node.Node("h1")
self.rNode = node.Node("h2")
# don't need flow, as no packet timeouts created to callback to flow
# and node receive is a dummy function
for i in 1, 2, 3:
self.lPs.append(packet.Packet("","h1","h2","data",i,1000)) # 1000kbit
self.rPs.append(packet.Packet("","h2","h1","data",i,1000))
self.link = link.Link("l1", "h1", "h2", 1000.0, 10.0, 3000.0)
# 1000kbit/ms, 10 ms prop delay, 3000kbit buffers
self.sim = event_simulator.Event_Simulator({"l1":self.link, \
"h1":self.lNode, \
"h2":self.rNode})
# Order the packet sends 2L-2R-L-R
# Run Sim Forward
# Watch for transmission events in EventSimulator, with proper timestamp
# Watch for propagation events in EventSimulator, with proper timestamp
# Make sure these are sequential, with only one Tx event at a time in
# the queue, and two propagations in each direction chained, and one isolated.
# Note this tests most events we're trying to deal with.
def test_packet_callbacks_and_timing (self):
self.link.send(self.rPs.pop(0),"h2") # right going packets
# are favored in time tie breaks
self.link.send(self.rPs.pop(0),"h2")
self.link.send(self.rPs.pop(0),"h2")
self.link.send(self.lPs.pop(0),"h1")
# all have timestamp 0.0
# so link should switch directions
# between each packet
# Confirm Handle_Packet_Transmission events show up in EventSim
# with proper timestamps
self.assertTrue(self.sim.get_current_time() == 0)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 1)
# right packet1 load
# into channel at
# 1ms going h2->h1
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 11)
# propagation done
# direction switched
# next packet loaded
# LTR
self.assertTrue(self.link.transmission_direction == constants.LTR)
# next event is a load (12)
# then a propagation (22)
# then
# the next event should be
# both remaining h2 packets
# loaded, as left buffer
# is empty
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 12)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 22)
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event()
self.sim.run_next_event() # two loads
self.assertTrue(self.sim.get_current_time() == 24)
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event() # two propagations
self.sim.run_next_event()
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.assertTrue(self.sim.get_current_time() == 34)
class TestLinkBuffer(unittest.TestCase):
# test variables
l = "" # a link buffer
p = "" # a packet exactly half the size of the buffer
s = "" # event simulator
def setUp (self):
c = 100 # buffer capacity in bits
self.s = event_simulator.Event_Simulator({})
self.l = link_buffer.LinkBuffer(c)
self.l.set_event_simulator(self.s)
self.p = packet.Packet("","","","","",c/2)
def test_enqueue_dequeue (self):
self.assertTrue(self.l.can_enqueue(self.p))
self.l.enqueue(self.p)
self.assertTrue(self.l.can_enqueue(self.p))
self.l.enqueue(self.p)
self.assertFalse(self.l.can_enqueue(self.p))
self.l.enqueue(self.p) # dropped
self.l.enqueue(self.p) # dropped
self.assertTrue(self.l.can_dequeue())
self.assertTrue( isinstance(self.l.dequeue(),packet.Packet) )
self.assertTrue(self.l.can_dequeue())
self.assertTrue( isinstance(self.l.dequeue(),packet.Packet) )
self.assertFalse(self.l.can_dequeue())
with self.assertRaises(ValueError):
self.l.dequeue()
class TestReporter(unittest.TestCase):
# Set ID of reporter
def test_get_id(self):
ID = "H1"
r = reporter.Reporter(ID)
r.log("Hello World!")
self.assertEqual(r.get_id(), ID)
class TestNode(unittest.TestCase):
# Set ID of node through super initialiation
def test_init(self):
ID = "H2"
n = node.Node(ID)
n.log("Hello World!")
self.assertEqual(n.get_id(), ID)
# Should not break, as receive is a dummy function
def test_receive(self):
ID = "H2"
n = node.Node(ID)
n.receive(0)
class TestEventSimulator(unittest.TestCase):
def test_init_and_basic_simulation (self):
e = event_simulator.Event_Simulator({"h1":host.Host("h1",["l1"]),\
"h2":host.Host("h2",["l1"]),\
"f1":flow.Data_Source("f1", "h1", "h2", 20, 1)})
self.assertEqual(e.get_current_time(), 0.0)
self.assertFalse(e.are_flows_done())
self.assertEqual(e.get_element("h1").get_id(), "h1")
self.assertEqual(e.get_element("h2").get_id(), "h2")
self.assertEqual(e.get_element("f1").get_id(), "f1")
e.request_event(event.Event().set_completion_time(1.0))
e.request_event(event.Event().set_completion_time(2.0))
e.request_event(event.Event().set_completion_time(0.5))
e.request_event(event.Event().set_completion_time(1.5))
e.request_event(event.Event().set_completion_time(0.2))
''' Now event heap should be ordered 0.2, 0.5, 1, 1.5, 2 '''
e.run_next_event()
self.assertEqual(e.get_current_time(), 0.2)
e.run_next_event()
self.assertEqual(e.get_current_time(), 0.5)
e.run_next_event()
self.assertEqual(e.get_current_time(), 1.0)
e.run_next_event()
self.assertEqual(e.get_current_time(), 1.5)
e.run_next_event()
self.assertEqual(e.get_current_time(), 2.0)
class TestHost(unittest.TestCase):
# Set ID of host through super initialiation
def test_init(self):
ID = "H1"
Links = ["L1"]
h = host.Host(ID,Links)
h.log("Hello World!")
self.assertEqual(h.get_id(), ID)
with self.assertRaises(ValueError):
h2 = host.Host(ID,["L1","L2"])
class TestLink(unittest.TestCase):
ID = ""
left = ""
right = ""
rate = ""
delay = ""
buff = ""
l = ""
def setUp(self):
self.ID = "L1"
self.left = "H1"
self.right = "H2"
self.rate = "10"
self.delay = "10"
self.buff = "64"
self.l = link.Link(self.ID,self.left,self.right,self.rate,self.delay,self.buff)
# Set ID of link through super initialiation
def test_get_id(self):
self.assertEqual(self.l.get_id(), self.ID)
def test_get_left(self):
self.assertEqual(self.l.get_left(),self.left)
def test_get_right(self):
self.assertEqual(self.l.get_right(),self.right)
def test_get_rate(self):
self.assertEqual(self.l.get_rate(),float(self.rate))
def test_get_delay(self):
self.assertEqual(self.l.get_delay(),float(self.delay))
def test_get_buff(self):
self.assertEqual(self.l.get_buff(),float(self.buff) * 8.0) # bytes to bits
class TestRouter(unittest.TestCase):
# Set ID of link through super initialiation
def test_init(self):
ID = "R1"
links = ["H1","H2","H3"]
r = router.Router(ID,links)
self.assertEqual(r.get_id(), ID)
self.assertEqual(r.get_link(),links)
class TestFlow(unittest.TestCase):
# Set ID of link through super initialiation
def test_init(self):
ID = "F1"
source = "H1"
dest = "H2"
size = "20"
start = "1"
f = flow.Flow(ID,source,dest,size,start)
self.assertEqual(f.get_id(), ID)
self.assertEqual(f.get_source(), source)
self.assertEqual(f.get_dest(), dest)
self.assertEqual(f.get_size(), int(size) * 8.0 * 1000.0) # MByte -> KBit
self.assertEqual(f.get_start(), int(start) * 1000) # s to ms
# Run Specific Tests
if __name__ == "__main__":
reporter_suite = unittest.TestLoader().loadTestsFromTestCase(TestReporter)
node_suite = unittest.TestLoader().loadTestsFromTestCase(TestNode)
host_suite = unittest.TestLoader().loadTestsFromTestCase(TestHost)
link_suite = unittest.TestLoader().loadTestsFromTestCase(TestLink)
router_suite = unittest.TestLoader().loadTestsFromTestCase(TestRouter)
flow_suite = unittest.TestLoader().loadTestsFromTestCase(TestFlow)
sim_suite = unittest.TestLoader().loadTestsFromTestCase(TestEventSimulator)
linkbuffer_suite = unittest.TestLoader().loadTestsFromTestCase(TestLinkBuffer)
link_tx_suite = unittest.TestLoader().loadTestsFromTestCase(TestLinkTransmissionEvents)
static_flow_data_source_suite = \
unittest.TestLoader().loadTestsFromTestCase(TestStaticDataSourceFlow)
static_flow_data_sink_suite = \
unittest.TestLoader().loadTestsFromTestCase(TestStaticDataSinkFlow)
visualize_suite = \
unittest.TestLoader().loadTestsFromTestCase(testMeasurementAnalysis)
test_suites = [reporter_suite, node_suite, host_suite, link_suite,\
router_suite, flow_suite, sim_suite, linkbuffer_suite,\
link_tx_suite,static_flow_data_source_suite,\
static_flow_data_sink_suite, visualize_suite]
for suite in test_suites:
unittest.TextTestRunner(verbosity=2).run(suite)
print "\n\n\n" | sssundar/NetworkSimulator | Code/Python/unit_test_benches.py | Python | gpl-2.0 | 12,927 |
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
# www.pythonhosted.org/setuptools/setuptools.html
setup(
name="lines",
version="1.4.0",
description="Program for plotting powder diffraction patterns and background subtraction",
author="Stef Smeets",
author_email="[email protected]",
license="GPL",
url="https://github.com/stefsmeets/lines",
classifiers=[
'Programming Language :: Python :: 2.7',
],
packages=["lines", "zeolite_database"],
install_requires=[
"matplotlib<3.0",
"numpy>=1.10",
"scipy>=0.16",
],
package_data={
"": ["LICENCE", "readme.md", "setup.py"],
"zeolite_database": ["*.cif"],
},
entry_points={
'console_scripts': [
'lines = lines.lines:main',
'cif2xy = lines.cif2xy:main',
]
}
)
| stefsmeets/lines | setup.py | Python | gpl-2.0 | 911 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
[tests/stdlib/test_help.py]
Test the help command.
"""
import unittest
#import os
#from ergonomica import ergo, ENV
class TestHelp(unittest.TestCase):
"""Tests the 'help' command."""
def test_list_commands(self):
"""
Tests listing all commands using the 'help commands' command.
"""
| ergonomica/ergonomica | tests/stdlib/test_help.py | Python | gpl-2.0 | 375 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Source : Les recettes Python de Tyrtamos
http://python.jpvweb.com/mesrecettespython/doku.php?id=date_de_paques
"""
class jourferie:
def datepaques(self,an):
"""Calcule la date de Pâques d'une année donnée an (=nombre entier)"""
a=an//100
b=an%100
c=(3*(a+25))//4
d=(3*(a+25))%4
e=(8*(a+11))//25
f=(5*a+b)%19
g=(19*f+c-e)%30
h=(f+11*g)//319
j=(60*(5-d)+b)//4
k=(60*(5-d)+b)%4
m=(2*j-k-g+h)%7
n=(g-h+m+114)//31
p=(g-h+m+114)%31
jour=p+1
mois=n
return [jour, mois, an]
def dateliste(self,c, sep='/'):
"""Transforme une date chaîne 'j/m/a' en une date liste [j,m,a]"""
j, m, a = c.split(sep)
return [int(j), int(m), int(a)]
def datechaine(self,d, sep='/'):
"""Transforme une date liste=[j,m,a] en une date chaîne 'jj/mm/aaaa'"""
return ("%02d" + sep + "%02d" + sep + "%0004d") % (d[0], d[1], d[2])
def jourplus(self,d, n=1):
"""Donne la date du nième jour suivant d=[j, m, a] (n>=0)"""
j, m, a = d
fm = [0,31,28,31,30,31,30,31,31,30,31,30,31]
if (a%4==0 and a%100!=0) or a%400==0: # bissextile?
fm[2] = 29
for i in xrange(0,n):
j += 1
if j > fm[m]:
j = 1
m += 1
if m>12:
m = 1
a += 1
return [j,m,a]
def jourmoins(self,d, n=-1):
"""Donne la date du nième jour précédent d=[j, m, a] (n<=0)"""
j, m, a = d
fm = [0,31,28,31,30,31,30,31,31,30,31,30,31]
if (a%4==0 and a%100!=0) or a%400==0: # bissextile?
fm[2] = 29
for i in xrange(0,abs(n)):
j -= 1
if j < 1:
m -= 1
if m<1:
m = 12
a -= 1
j = fm[m]
return [j,m,a]
def numjoursem(self,d):
"""Donne le numéro du jour de la semaine d'une date d=[j,m,a]
lundi=1, mardi=2, ..., dimanche=7
Algorithme de Maurice Kraitchik (1882?1957)"""
j, m, a = d
if m<3:
m += 12
a -= 1
n = (j +2*m + (3*(m+1))//5 +a + a//4 - a//100 + a//400 +2) % 7
return [6, 7, 1, 2, 3, 4, 5][n]
def joursem(self,d):
"""Donne le jour de semaine en texte à partir de son numéro
lundi=1, mardi=2, ..., dimanche=7"""
return ["", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi",
"dimanche"][self.numjoursem(d)]
def joursferiesliste(self,an, sd=0):
"""Liste des jours fériés France en date-liste de l'année an (nb entier).
sd=0 (=defaut): tous les jours fériés.
sd=1: idem sans les sammedis-dimanches.
sd=2: tous + les 2 jours fériés supplémentaires d'Alsace-Moselle.
sd=3: idem sd=2 sans les samedis-dimanches"""
F = [] # =liste des dates des jours feries en date-liste d=[j,m,a]
L = [] # =liste des libelles du jour ferie
dp = self.datepaques(an)
# Jour de l'an
d = [1,1,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Jour de l'an")
# Vendredi saint (pour l'Alsace-Moselle)
d = self.jourmoins(dp, -2)
if (sd==0) or (sd==2):
#if sd>=2:
F.append(d)
L.append(u"Vendredi saint (Alsace-Moselle)")
# Dimanche de Paques
d = dp
if (sd==0) or (sd==2):
F.append(d)
L.append(u"Dimanche de Paques")
# Lundi de Paques
d = self.jourplus(dp, +1)
F.append(d)
L.append(u"Lundi de Paques")
# Fête du travail
d = [1,5,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Fete du travail")
# Victoire des allies 1945
d = [8,5,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Victoire des allies 1945")
# Jeudi de l'Ascension
d = self.jourplus(dp, +39)
F.append(d)
L.append(u"Jeudi de l'Ascension")
# Dimanche de Pentecote
d = self.jourplus(dp, +49)
if (sd==0) or (sd==2):
F.append(d)
L.append(u"Dimanche de Pentecote")
# Lundi de Pentecote
d = self.jourplus(d, +1)
F.append(d)
L.append(u"Lundi de Pentecote")
# Fete Nationale
d = [14,7,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Fete Nationale")
# Assomption
d = [15,8,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Assomption")
# Toussaint
d = [1,11,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Toussaint")
# Armistice 1918
d = [11,11,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Armistice 1918")
# Jour de Noel
d = [25,12,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Jour de Noel")
# Saint Etienne Alsace
d = [26,12,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Saint-Etienne (Alsace)")
return F, L
def joursferies(self,an, sd=0, sep='/'):
"""Liste des jours fériés France en date-chaine de l'année an (nb entier).
sd=0 (=defaut): tous les jours fériés.
sd=1: idem sans les sammedis-dimanches.
sd=2: tous + les 2 jours fériés supplémentaires d'Alsace-Moselle.
sd=3: idem sd=2 sans les samedis-dimanches"""
C = []
J = []
F, L = self.joursferiesliste(an, sd)
for i in xrange(0,len(F)):
C.append(self.datechaine(F[i])) # conversion des dates-liste en dates-chaine
J.append(self.joursem(F[i])) # ajout du jour de semaine
return C, J, L
def estferie(self,d,sd=0):
"""estferie(d,sd=0): => dit si une date d=[j,m,a] donnée est fériée France
si la date est fériée, renvoie son libellé
sinon, renvoie une chaine vide"""
j,m,a = d
F, L = self.joursferiesliste(a, sd)
for i in xrange(0, len(F)):
if j==F[i][0] and m==F[i][1] and a==F[i][2]:
return L[i]
return "False"
| guiguiabloc/api-domogeek | Holiday.py | Python | gpl-2.0 | 6,861 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Make osversion.osmajor_id non-NULLable
Revision ID: 5ab66e956c6b
Revises: 286ed23a5c1b
Create Date: 2017-12-20 15:54:38.825703
"""
# revision identifiers, used by Alembic.
revision = '5ab66e956c6b'
down_revision = '286ed23a5c1b'
from alembic import op
from sqlalchemy import Integer
def upgrade():
op.alter_column('osversion', 'osmajor_id', existing_type=Integer, nullable=False)
def downgrade():
op.alter_column('osversion', 'osmajor_id', existing_type=Integer, nullable=True)
| beaker-project/beaker | Server/bkr/server/alembic/versions/5ab66e956c6b_osversion_osmajor_id_non_nullable.py | Python | gpl-2.0 | 743 |
"""
Functions performing URL trimming and cleaning
"""
## This file is available from https://github.com/adbar/courlan
## under GNU GPL v3 license
import logging
import re
from collections import OrderedDict
from urllib.parse import parse_qs, urlencode, urlparse, ParseResult
from .filters import validate_url
from .settings import ALLOWED_PARAMS, CONTROL_PARAMS,\
TARGET_LANG_DE, TARGET_LANG_EN
PROTOCOLS = re.compile(r'https?://')
SELECTION = re.compile(r'(https?://[^">&? ]+?)(?:https?://)|(?:https?://[^/]+?/[^/]+?[&?]u(rl)?=)(https?://[^"> ]+)')
MIDDLE_URL = re.compile(r'https?://.+?(https?://.+?)(?:https?://|$)')
NETLOC_RE = re.compile(r'(?<=\w):(?:80|443)')
PATH1 = re.compile(r'/+')
PATH2 = re.compile(r'^(?:/\.\.(?![^/]))+')
def clean_url(url, language=None):
'''Helper function: chained scrubbing and normalization'''
try:
return normalize_url(scrub_url(url), language)
except (AttributeError, ValueError):
return None
def scrub_url(url):
'''Strip unnecessary parts and make sure only one URL is considered'''
# trim
# https://github.com/cocrawler/cocrawler/blob/main/cocrawler/urls.py
# remove leading and trailing white space and unescaped control chars
url = url.strip('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \r\n')
# clean the input string
url = url.replace('[ \t]+', '')
# <![CDATA[http://www.urbanlife.de/item/260-bmw-i8-hybrid-revolution-unter-den-sportwagen.html]]>
if url.startswith('<![CDATA['): # re.match(r'<!\[CDATA\[', url):
url = url.replace('<![CDATA[', '') # url = re.sub(r'^<!\[CDATA\[', '', url)
url = url.replace(']]>', '') # url = re.sub(r'\]\]>$', '', url)
# markup rests
url = re.sub(r'</?a>', '', url)
# &
if '&' in url:
url = url.replace('&', '&')
#if '"' in link:
# link = link.split('"')[0]
# double/faulty URLs
protocols = PROTOCOLS.findall(url)
if len(protocols) > 1 and not 'web.archive.org' in url:
logging.debug('double url: %s %s', len(protocols), url)
match = SELECTION.match(url)
if match and validate_url(match.group(1))[0] is True:
url = match.group(1)
logging.debug('taking url: %s', url)
else:
match = MIDDLE_URL.match(url)
if match and validate_url(match.group(1))[0] is True:
url = match.group(1)
logging.debug('taking url: %s', url)
# too long and garbled URLs e.g. due to quotes URLs
# https://github.com/cocrawler/cocrawler/blob/main/cocrawler/urls.py
if len(url) > 500: # arbitrary choice
match = re.match(r'(.*?)[<>"\'\r\n ]', url)
if match:
url = match.group(1)
if len(url) > 500:
logging.debug('invalid-looking link %s of length %d',
url[:50] + '...', len(url))
# trailing ampersand
url = url.strip('&')
# trailing slashes in URLs without path or in embedded URLs
if url.count('/') == 3 or url.count('://') > 1:
url = url.rstrip('/')
# lower
# url = url.lower()
return url
def clean_query(parsed_url, strict=False, language=None):
'''Strip unwanted query elements'''
if len(parsed_url.query) > 0:
qdict = parse_qs(parsed_url.query)
newqdict = OrderedDict()
for qelem in sorted(qdict.keys()):
teststr = qelem.lower()
# control param
if strict is True and \
teststr not in ALLOWED_PARAMS and teststr not in CONTROL_PARAMS:
continue
# control language
if language is not None and teststr in CONTROL_PARAMS:
found_lang = str(qdict[qelem][0])
if (language == 'de' and found_lang not in TARGET_LANG_DE) or \
(language == 'en' and found_lang not in TARGET_LANG_EN) or \
found_lang != language:
logging.debug('bad lang: %s %s %s', language, qelem, found_lang)
raise ValueError
# insert
newqdict[qelem] = qdict[qelem]
newstring = urlencode(newqdict, doseq=True)
parsed_url = parsed_url._replace(query=newstring)
return parsed_url
def normalize_url(parsed_url, strict=False, language=None):
'''Takes a URL string or a parsed URL and returns a (basically) normalized URL string'''
if not isinstance(parsed_url, ParseResult):
parsed_url = urlparse(parsed_url)
# port
if parsed_url.port is not None and parsed_url.port in (80, 443):
parsed_url = parsed_url._replace(netloc=NETLOC_RE.sub('', parsed_url.netloc))
# path: https://github.com/saintamh/alcazar/blob/master/alcazar/utils/urls.py
newpath = PATH1.sub('/', parsed_url.path)
# Leading /../'s in the path are removed
newpath = PATH2.sub('', newpath)
# fragment
if strict is True:
newfragment = ''
else:
newfragment = parsed_url.fragment
# lowercase + remove fragments
parsed_url = parsed_url._replace(
scheme=parsed_url.scheme.lower(),
netloc=parsed_url.netloc.lower(),
path=newpath,
fragment=newfragment
)
# strip unwanted query elements
parsed_url = clean_query(parsed_url, strict, language)
# rebuild
return parsed_url.geturl()
| adbar/url-tools | courlan/clean.py | Python | gpl-2.0 | 5,520 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:sw=4:expandtab:
# Copyright 2008 Mark Mitchell
# License: see __license__ below.
__doc__ = """
Reads a GraphicsMagick source file and parses the specially formatted
comment blocks which precede each function and writes the information
obtained from the comment block into a reStructuredText file.
Usage:
format_c_api_docs.py [options] SRCFILE OUTFILE
SRCFILE is the path to a Graphicsmagick API .c file.
For example: ./magick/animate.c
OUTFILE is the path where the reStructuredText file is written.
Options:
-h --help -- Print this help message
-w --whatis-file -- The path to a file containing "whatis" information for
the source files. The format of this file is:
* one line per source file
* source filename (without directory paths) and whatis text
are separated by whitespace
* blank lines are ignored
* lines starting with '#' are ignored
-i --include-rst -- Comma-separated list of file paths to be objects of reST
..include:: directives inserted in OUTFILE.
The default is the single file 'api_hyperlinks.rst'
Example of whatis file format:
animate.c Interactively animate an image sequence
annotate.c Annotate an image with text
"""
__copyright__ = "2008, Mark Mitchell"
__license__ = """
Copyright 2008, Mark Mitchell
Permission is hereby granted, free of charge, to any person obtaining
a copy of this Software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind,
express or implied, including but not limited to the warranties of
merchantability, fitness for a particular purpose and noninfringement.
In no event shall the authors or copyright holders be liable for any
claim, damages or other liability, whether in an action of contract,
tort or otherwise, arising from, out of or in connection with Software
or the use or other dealings in the Software.
"""
import sys
import getopt
import os, os.path
import re
import textwrap
# Key words to replace with HTML links
keywords = {
'AffineMatrix' : '`AffineMatrix`_',
'BlobInfo' : '`BlobInfo`_',
'Cache' : '`Cache`_',
'ChannelType' : '`ChannelType`_',
'ChromaticityInfo' : '`ChromaticityInfo`_',
'ClassType' : '`ClassType`_',
'ClipPathUnits' : '`ClipPathUnits`_',
'ColorPacket' : '`ColorPacket`_',
'ColorspaceType' : '`ColorspaceType`_',
'ComplianceType' : '`ComplianceType`_',
'CompositeOperator' : '`CompositeOperator`_',
'CompressionType' : '`CompressionType`_',
'DecorationType' : '`DecorationType`_',
'DrawContext' : '`DrawContext`_',
'DrawInfo' : '`DrawInfo`_',
'ErrorHandler' : '`ErrorHandler`_',
'ExceptionInfo' : '`ExceptionInfo`_',
'ExceptionType' : '`ExceptionType`_',
'FillRule' : '`FillRule`_',
'FilterTypes' : '`FilterTypes`_',
'FrameInfo' : '`FrameInfo`_',
'GravityType' : '`GravityType`_',
'Image' : '`Image`_',
'ImageInfo' : '`ImageInfo`_',
'ImageType' : '`ImageType`_',
'InterlaceType' : '`InterlaceType`_',
'LayerType' : '`LayerType`_',
'MagickInfo' : '`MagickInfo`_',
'MonitorHandler' : '`MonitorHandler`_',
'MontageInfo' : '`MontageInfo`_',
'NoiseType' : '`NoiseType`_',
'PaintMethod' : '`PaintMethod`_',
'PixelPacket' : '`PixelPacket`_',
'PointInfo' : '`PointInfo`_',
'ProfileInfo' : '`ProfileInfo`_',
'QuantizeInfo' : '`QuantizeInfo`_',
'Quantum' : '`Quantum`_',
'QuantumType' : '`QuantumType`_',
'RectangleInfo' : '`RectangleInfo`_',
'RegistryType' : '`RegistryType`_',
'RenderingIntent' : '`RenderingIntent`_',
'ResolutionType' : '`ResolutionType`_',
'ResourceType' : '`ResourceType`_',
'SegmentInfo' : '`SegmentInfo`_',
'SignatureInfo' : '`SignatureInfo`_',
'StorageType' : '`StorageType`_',
'StreamHandler' : '`StreamHandler`_',
'StretchType' : '`StretchType`_',
'StyleType' : '`StyleType`_',
'TypeMetric' : '`TypeMetric`_',
'ViewInfo' : '`ViewInfo`_',
'VirtualPixelMethod' : '`VirtualPixelMethod`_',
'MagickXResourceInfo' : '`MagickXResourceInfo`_',
}
state_init = 0
state_found_fcncomment = 1
state_found_fcntitle = 2
state_found_fcndoc = 3
state_more_prototype = 4
state_found_prototype = 5
state_found_private = 6
state_parmdescr = 7
def warn(msg):
print >> sys.stderr, msg
def debugtrace(msg):
print >> sys.stdout, msg
def nodebugtrace(msg):
pass
dtrace = nodebugtrace
#dtrace = debugtrace
# extract and save function title. example:
# + X M a g i c k C o m m a n d %
# % X A n i m a t e B a c k g r o u n d I m a g e %
# Lines starting with '+' are private APIs which should not appear in
# in the output.
re_func_title = re.compile(r'^[+|%]\s+((\w )+)\s*%')
def proto_pretty(line):
"""fixes up inconsistent spaces in C function prototypes"""
line = re.sub(r',', ' , ', line)
line = re.sub(r'\(', ' ( ', line)
line = re.sub(r'\)', ' ) ', line)
line = re.sub(r'\*', ' * ', line)
line = re.sub(r'\s+', ' ', line)
line = re.sub(r'\(\s+\*', '(*', line)
line = re.sub(r' ,', ',', line)
line = re.sub(r' \(', '(', line)
line = re.sub(r'\) ', ')', line)
line = re.sub(r' \* ', ' *', line)
line = re.sub('^\s*', '', line)
return line
class Paragraph:
"Paragraphs consist of one or more lines of text."
def __init__(self):
self.lines = []
def __str__(self):
#return '\n'.join(self.lines)
return '\n'.join([line.strip() for line in self.lines])
class Prototype:
def __init__(self):
self.lines = []
def __str__(self):
proto = ' '.join(self.lines)
proto = proto_pretty(proto)
# escape all the '*' chars
proto = re.sub(r'\*', '\\*', proto)
# escape all the '_' chars
proto = re.sub(r'_', '\\_', proto)
# now replace keywords with hyperlinks
for k,v in keywords.iteritems():
proto = re.sub(r'^%s ' % k, '%s ' % v, proto)
proto = re.sub(r' %s ' % k, ' %s ' % v, proto)
# make some attempt to wrap the text nicely
openparen_index = proto.find('(')
if openparen_index > 0:
fcn = proto[:openparen_index+1]
indent_len = len(fcn) + 3
toomuch = (2 * fcn.count('\\')) + (3 * fcn.count('`_'))
if toomuch > 0: # account for the space following the opening paren
toomuch -= 1
indent_len -= toomuch
params = proto[openparen_index+1:].split(',')
params = [p.strip() for p in params]
max_param_len = 0
for x in params:
if len(x) > max_param_len:
max_param_len = len(x)
wrap_width = max(96, max_param_len + indent_len)
proto_lines = []
line = fcn + ' '
while params:
x = params.pop(0)
if len(line) + len(x) > wrap_width:
proto_lines.append(line)
line = ' ' * indent_len
line += x
if params:
line += ', '
proto_lines.append(line)
proto = '\n '.join(proto_lines)
return ".. parsed-literal::\n\n %s" % proto
class ListItem:
"""List items are used for parameter descriptions, and consist of the
parameter name and one or more lines of description text."""
def __init__(self, name):
self.name = name
self.lines = []
def __str__(self):
s = []
s.append('%s:' % self.name)
for line in self.lines:
s.append(' %s' % line.strip())
return '\n'.join(s)
class Function:
def __init__(self, name):
self.name = name
self.prototype = None
# Description is a list, the items of which are either Paragraph or
# ListItem or Prototype instances.
self.description = []
def __str__(self):
lines = []
lines.append('')
lines.append('')
lines.append(self.name)
lines.append('=' * len(self.name))
lines.append('')
lines.append('Synopsis')
lines.append('--------')
lines.append(str(self.prototype))
lines.append('')
lines.append('Description')
lines.append('-----------')
for item in self.description:
lines.append(str(item))
lines.append('')
return '\n'.join(lines)
def parse(srcfilepath):
list_item = None
proto = None
para = None
func = None
functions = []
state = state_init
linecnt = 0
ftitle = None
f = file(srcfilepath, 'r')
for line in f:
linecnt += 1
if not (line.startswith('%') or line.startswith('+') or re.search(r'\*/', line)):
continue
line = line.strip()
if state == state_init:
# Find first line of function title/comment block
if line.startswith('%%%%%%%%'):
dtrace('Line %d: start of function comment block ############' % linecnt)
state = state_found_fcncomment
continue
elif state == state_found_fcncomment:
# Search for the function name, with spaces between each letter
if line.startswith('%%%%%%%%'):
warn('Line %d: WARNING: no function name found, found start of function comment block instead.' % linecnt)
state = state_init
continue
m = re_func_title.search(line)
if m:
if line.startswith('+'):
dtrace('Line %d: private API' % linecnt)
# private API, skip it
state = state_found_private
else:
# public API, process it
ftitle = re.sub(' ', '', m.group(1))
dtrace('Line %d: public API %s' % (linecnt, ftitle))
func = Function(ftitle)
functions.append(func)
state = state_found_fcntitle
continue
elif state == state_found_private:
# skip to end of function title block
if line.startswith('%%%%%%%%'):
dtrace('Line %d: end of private function comment block' % linecnt)
state = state_init
continue
elif state == state_found_fcntitle:
# skip to first line following end of function title block.
# lines of the function title block start with and end with '%'.
if not re.match(r'%.+%', line):
dtrace('Line %d: end of public function comment block %s' % (linecnt, ftitle))
state = state_found_fcndoc
# fall through
elif state == state_found_fcndoc:
# extract function prototype
if line.startswith('% '):
line = re.sub(r'^%\s{0,2}', '', line, 1)
# if empty args (), it's not the prototype, but the one-line summary
if re.search(r'%s\(\)' % ftitle, line):
if para is None:
dtrace('Line %d: found_fcndoc start paragraph ()' % linecnt)
para = Paragraph()
func.description.append(para)
para.lines.append(line)
# is this only line of prototype?
elif re.search(r'%s\([^)]+\)$' % ftitle, line):
if para:
dtrace('Line %d: found_fcndoc end paragraph by proto ()' % linecnt)
para = None
dtrace('Line %d: one-line prototype' % linecnt)
proto = Prototype()
proto.lines.append(line)
func.description.append(proto)
func.prototype = proto
proto = None
state = state_found_prototype
# is this first line of multiline prototype?
elif re.search(r'%s\([^)]*$' % ftitle, line):
if para:
dtrace('Line %d: found_fcndoc end paragraph by proto (' % linecnt)
para = None
dtrace('Line %d: first line of multi-line prototype' % linecnt)
proto = Prototype()
proto.lines.append(line)
func.description.append(proto)
func.prototype = proto
state = state_more_prototype
else:
if para is None:
dtrace('Line %d: found_fcndoc start paragraph' % linecnt)
para = Paragraph()
func.description.append(para)
para.lines.append(line)
else:
if line.startswith('%%%%%%%%'):
warn('Line %d: WARNING: no prototype found for %s, found start of function comment block instead.' % (linecnt, ftitle))
state = state_found_fcncomment
continue
if line.strip() == '%':
# empty line terminates paragraph
if para:
dtrace('Line %d: found_fcndoc end paragraph by blank line' % linecnt)
para = None
if proto:
dtrace('Line %d: found_fcndoc end proto by blank line' % linecnt)
proto = None
continue
elif state == state_more_prototype:
if re.match(r'%.+%', line):
# really this should raise a warning of "incomplete prototype"
continue
line = re.sub(r'^%\s{0,2}', '', line, 1)
if re.search(r'^\s*$', line):
dtrace('Line %d: end of more prototype' % linecnt)
state = state_found_prototype
else:
func.prototype.lines.append(line)
continue
elif state == state_found_prototype:
dtrace('Line %d: found prototype of function %s' % (linecnt, ftitle))
func.prototype.lines.append(';')
#print 'Function %s' % func.name
#print 'Synopsis'
#print ' '.join(func.prototype)
#print
# Process parm description.
# Description consists of two kinds of texts: paragraphs, and lists.
# Lists consist of list items. List items are one or more lines.
# List items are separated by blank lines. The first line of a list
# item starts with 'o '.
# Paragraphs consist of one or more lines which don't start with 'o '.
# Paragraphs are separated from each other and from adjacent list items
# by blank lines.
# In theory, a line which starts with 'o ' which is not preceded by a
# blank line is illegal syntax.
para = None
state = state_parmdescr
# fall through
elif state == state_parmdescr:
if line.endswith('*/'):
# end of function comment block
dtrace('Line %d: end of parmdescr ************' % linecnt)
if list_item:
func.description.append(list_item)
list_item = None
if para:
func.description.append(para)
dtrace('Line %d: parmdescr end paragraph ()' % linecnt)
para = None
func = None
state = state_init
continue
line = re.sub(r'^%\s{0,2}', '', line, 1)
if line:
# look for list item, which starts with 'o'
m = re.search(r'^\s+o\s+([^:]+:|o|[0-9]\.)\s(.*)', line)
if m:
# first line of list item
if list_item: # if blank lines separate list items, this should never evaluate true
dtrace('Line %d: surprising end of list item' % linecnt)
func.description.append(list_item)
list_item = None
dtrace('Line %d: start list item' % linecnt)
list_item = ListItem(m.group(1).strip().rstrip(':'))
list_item.lines.append(m.group(2))
else:
# either a line of paragraph or subsequent line of list item
if list_item:
# subsequent line of list item
list_item.lines.append(line)
else:
# line of paragraph
if list_item: # if blank lines after list items, this should never evaluate true
dtrace('Line %d: end of list item, end of list' % linecnt)
func.description.append(list_item)
list_item = None
if para is None:
dtrace('Line %d: parmdescr start paragraph' % linecnt)
para = Paragraph()
para.lines.append(line)
else:
# empty line, two cases:
# 1. terminate multi-line list item
# 2. terminate multi-line paragraph
if list_item:
dtrace('Line %d: parmdescr end of list item by blank line' % linecnt)
func.description.append(list_item)
list_item = None
elif para:
# terminate any paragraph
dtrace('Line %d: parmdescr end of paragraph by blank line' % linecnt)
func.description.append(para)
para = None
continue
f.close()
return functions
def process_srcfile(srcfilepath, basename, whatis, outfile, include_rst):
"""outfile is a file object open for writing"""
functions = parse(srcfilepath)
print >> outfile, "=" * len(basename)
print >> outfile, basename
print >> outfile, "=" * len(basename)
if whatis:
print >> outfile, "-" * len(whatis)
print >> outfile, whatis
print >> outfile, "-" * len(whatis)
print >> outfile
print >> outfile, '.. contents:: :depth: 1'
print >> outfile
for x in include_rst:
print >> outfile, '.. include:: %s' % x
print >> outfile
# print all functions found in this source file
for func in functions:
print >> outfile, func
#para = para.strip() # trim leading and trailing whitespace
#para = re.sub(r'\s+', ' ', para) # canonicalize inner whitespace
#para = re.sub(r"""([a-zA-Z0-9][.!?][)'"]*) """, '\1 ', para) # Fix sentence ends
def find_val(key, keyval_file):
val = None
f = file(keyval_file, 'r')
cnt = 0
for line in f:
cnt += 1
if not line.strip():
continue
if line.startswith('#'):
continue
try:
k, v = line.split(None, 1)
except ValueError:
print >> sys.stderr, "Line %u of %s: improper format" % (cnt, keyval_file)
return None
if k == key:
val = v
break
f.close()
return val.strip()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
# parse command line options
try:
opts, posn_args = getopt.getopt(argv, 'hw:i:',
['help',
'whatis-file=',
'include-rst=',
])
except getopt.GetoptError, msg:
print msg
print __doc__
return 1
# process options
whatis_file = None
include_rst = ['api_hyperlinks.rst']
for opt, val in opts:
if opt in ("-h", "--help"):
print __doc__
return 0
if opt in ("-w", "--whatis-file"):
whatis_file = val
if opt in ("-i", "--include-rst"):
include_rst = [x for x in val.split(',') if x]
if len(posn_args) != 2:
print >> sys.stderr, 'Missing arguments'
print >> sys.stderr, __doc__
return 1
srcfile_path = posn_args[0]
outfile_path = posn_args[1]
srcfile = os.path.basename(srcfile_path)
base, ext = os.path.splitext(srcfile)
if whatis_file:
whatis = find_val(srcfile, whatis_file)
else:
whatis = None
fout = file(outfile_path, 'w')
process_srcfile(srcfile_path, base, whatis, fout, include_rst)
fout.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| kazuyaujihara/osra_vs | GraphicsMagick/scripts/format_c_api_doc.py | Python | gpl-2.0 | 21,967 |
import sys
def bye():
sys.exit(40) # Crucial error: abort now!
try:
bye()
except Exception:
print('got it') # Oops--we ignored the exit
print('continuing...')
| simontakite/sysadmin | pythonscripts/learningPython/exiter2.py | Python | gpl-2.0 | 208 |
#!/usr/bin/env python
################################################################################
#
# Project Euler - Problem 6
#
# The sum of the squares of the first ten natural numbers is,
#
# 1^2 + 2^2 + ... + 10^2 = 385
# The square of the sum of the first ten natural numbers is,
#
# (1 + 2 + ... + 10)^2 = 552 = 3025
# Hence the difference between the sum of the squares of the first ten natural
# numbers and the square of the sum is 3025 - 385 = 2640
#
# Find the difference between the sum of the squares of the first one hundred
# natural numbers and the square of the sum.
#
# Joaquin Derrac - [email protected]
#
################################################################################
if __name__ == "__main__":
sum_one_hundred = sum([x for x in range(1, 101)])
sum_one_hundred_squared = sum_one_hundred * sum_one_hundred
sum_squared = sum([x ** 2 for x in range(1, 101)])
solution = sum_one_hundred_squared - sum_squared
print(solution)
| carrdelling/project_euler | problem6.py | Python | gpl-2.0 | 996 |
#!/usr/bin/python
# Ubuntu Tweak - PyGTK based desktop configure tool
#
# Copyright (C) 2007-2008 TualatriX <[email protected]>
#
# Ubuntu Tweak is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Ubuntu Tweak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ubuntu Tweak; if not, write to the Free Software Foundation, Inc.,
import os
import gtk
import gconf
from common.settings import *
from common.factory import GconfKeys
class Config:
#FIXME The class should be generic config getter and setter
__client = gconf.Client()
def set_value(self, key, value):
if not key.startswith("/"):
key = GconfKeys.keys[key]
if type(value) == int:
self.__client.set_int(key, value)
elif type(value) == float:
self.__client.set_float(key, value)
elif type(value) == str:
self.__client.set_string(key, value)
elif type(value) == bool:
self.__client.set_bool(key, value)
def get_value(self, key, default = None):
if not key.startswith("/"):
key = GconfKeys.keys[key]
try:
value = self.__client.get_value(key)
except:
if default is not None:
self.set_value(key, default)
return default
else:
return None
else:
return value
def set_pair(self, key, type1, type2, value1, value2):
if not key.startswith("/"):
key = GconfKeys.keys[key]
self.__client.set_pair(key, type1, type2, value1, value2)
def get_pair(self, key):
if not key.startswith("/"):
key = GconfKeys.keys[key]
value = self.__client.get(key)
if value:
return value.to_string().strip('()').split(',')
else:
return (0, 0)
def get_string(self, key):
if not key.startswith("/"):
key = GconfKeys.keys[key]
string = self.get_value(key)
if string:
return string
else:
return '0'
def get_client(self):
return self.__client
class TweakSettings:
'''Manage the settings of ubuntu tweak'''
config = Config()
url = 'tweak_url'
version = 'tweak_version'
toolbar_size = 'toolbar_size'
toolbar_color = 'toolbar_color'
toolbar_font_color = 'toolbar_font_color'
window_size= 'window_size'
window_height = 'window_height'
window_width = 'window_width'
show_donate_notify = 'show_donate_notify'
default_launch = 'default_launch'
check_update = 'check_update'
power_user = 'power_user'
need_save = True
@classmethod
def get_power_user(cls):
return cls.config.get_value(cls.power_user, default=False)
@classmethod
def set_power_user(cls, bool):
cls.config.set_value(cls.power_user, bool)
@classmethod
def get_check_update(cls):
return cls.config.get_value(cls.check_update, default = True)
@classmethod
def set_check_update(cls, bool):
cls.config.set_value(cls.check_update, bool)
@classmethod
def get_toolbar_color(cls, instance = False):
color = cls.config.get_value(cls.toolbar_color)
if color == None:
if instance:
return gtk.gdk.Color(32767, 32767, 32767)
return (0.5, 0.5, 0.5)
else:
try:
color = gtk.gdk.color_parse(color)
if instance:
return color
red, green, blue = color.red/65535.0, color.green/65535.0, color.blue/65535.0
return (red, green, blue)
except:
return (0.5, 0.5, 0.5)
@classmethod
def set_toolbar_color(cls, color):
cls.config.set_value(cls.toolbar_color, color)
@classmethod
def get_toolbar_font_color(cls, instance = False):
color = cls.config.get_value(cls.toolbar_font_color)
if color == None:
if instance:
return gtk.gdk.Color(65535, 65535, 65535)
return (1, 1, 1)
else:
try:
color = gtk.gdk.color_parse(color)
if instance:
return color
red, green, blue = color.red/65535.0, color.green/65535.0, color.blue/65535.0
return (red, green, blue)
except:
return (1, 1, 1)
@classmethod
def set_toolbar_font_color(cls, color):
cls.config.set_value(cls.toolbar_font_color, color)
@classmethod
def set_default_launch(cls, id):
cls.config.set_value(cls.default_launch, id)
@classmethod
def get_default_launch(cls):
return cls.config.get_value(cls.default_launch)
@classmethod
def set_show_donate_notify(cls, bool):
return cls.config.set_value(cls.show_donate_notify, bool)
@classmethod
def get_show_donate_notify(cls):
value = cls.config.get_value(cls.show_donate_notify, default = True)
return value
@classmethod
def set_url(cls, url):
return cls.config.set_value(cls.url, url)
@classmethod
def get_url(cls):
return cls.config.get_string(cls.url)
@classmethod
def set_version(cls, version):
return cls.config.set_value(cls.version, version)
@classmethod
def get_version(cls):
return cls.config.get_string(cls.version)
@classmethod
def set_paned_size(cls, size):
cls.config.set_value(cls.toolbar_size, size)
@classmethod
def get_paned_size(cls):
position = cls.config.get_value(cls.toolbar_size)
if position:
return position
else:
return 150
@classmethod
def set_window_size(cls, width, height):
cls.config.set_value(cls.window_width, width)
cls.config.set_value(cls.window_height, height)
@classmethod
def get_window_size(cls):
width = cls.config.get_value(cls.window_width)
height = cls.config.get_value(cls.window_height)
if width and height:
height, width = int(height), int(width)
return (width, height)
else:
return (740, 480)
@classmethod
def get_icon_theme(cls):
return cls.config.get_value('/desktop/gnome/interface/icon_theme')
if __name__ == '__main__':
print Config().get_value('show_donate_notify')
| tualatrix/ubuntu-tweak-old | src/common/config.py | Python | gpl-2.0 | 6,864 |
# Copyright (C) 2013-2021 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import xorn.storage
rev = xorn.storage.Revision()
ob0 = rev.add_object(xorn.storage.Line())
ob1, = rev.get_objects()
ob2 = rev.add_object(xorn.storage.Line())
assert ob0 is not ob1
assert ob0 == ob1
assert hash(ob0) == hash(ob1)
assert ob0 is not ob2
assert ob0 != ob2
assert hash(ob0) != hash(ob2)
assert ob1 is not ob2
assert ob1 != ob2
assert hash(ob1) != hash(ob2)
| rlutz/xorn | tests/cpython/storage/ob_equality.py | Python | gpl-2.0 | 1,119 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'c:/steganography/main.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1024, 576)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.group_image = QtGui.QGroupBox(self.centralwidget)
self.group_image.setGeometry(QtCore.QRect(10, 10, 1001, 291))
self.group_image.setObjectName(_fromUtf8("group_image"))
self.lbl_image = QtGui.QLabel(self.group_image)
self.lbl_image.setGeometry(QtCore.QRect(180, 20, 451, 261))
self.lbl_image.setAutoFillBackground(False)
self.lbl_image.setFrameShape(QtGui.QFrame.Panel)
self.lbl_image.setFrameShadow(QtGui.QFrame.Raised)
self.lbl_image.setText(_fromUtf8(""))
self.lbl_image.setScaledContents(True)
self.lbl_image.setObjectName(_fromUtf8("lbl_image"))
self.lbl_filename = QtGui.QLabel(self.group_image)
self.lbl_filename.setGeometry(QtCore.QRect(10, 20, 161, 21))
self.lbl_filename.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_filename.setObjectName(_fromUtf8("lbl_filename"))
self.btn_load = QtGui.QPushButton(self.group_image)
self.btn_load.setGeometry(QtCore.QRect(10, 50, 161, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_load.setFont(font)
self.btn_load.setObjectName(_fromUtf8("btn_load"))
self.lbl_spacing = QtGui.QLabel(self.group_image)
self.lbl_spacing.setGeometry(QtCore.QRect(20, 150, 71, 21))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_spacing.setFont(font)
self.lbl_spacing.setObjectName(_fromUtf8("lbl_spacing"))
self.box_spacing = QtGui.QSpinBox(self.group_image)
self.box_spacing.setGeometry(QtCore.QRect(90, 150, 71, 22))
self.box_spacing.setMinimum(1)
self.box_spacing.setMaximum(100)
self.box_spacing.setProperty("value", 32)
self.box_spacing.setObjectName(_fromUtf8("box_spacing"))
self.radio_decode = QtGui.QRadioButton(self.group_image)
self.radio_decode.setGeometry(QtCore.QRect(20, 120, 151, 17))
self.radio_decode.setChecked(False)
self.radio_decode.setObjectName(_fromUtf8("radio_decode"))
self.radio_encode = QtGui.QRadioButton(self.group_image)
self.radio_encode.setGeometry(QtCore.QRect(20, 90, 141, 17))
self.radio_encode.setChecked(True)
self.radio_encode.setObjectName(_fromUtf8("radio_encode"))
self.verticalLayoutWidget = QtGui.QWidget(self.group_image)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(640, 20, 160, 131))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.layout_labels = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.layout_labels.setSpacing(12)
self.layout_labels.setObjectName(_fromUtf8("layout_labels"))
self.lbl_height = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_height.setFont(font)
self.lbl_height.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_height.setObjectName(_fromUtf8("lbl_height"))
self.layout_labels.addWidget(self.lbl_height)
self.lbl_width = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_width.setFont(font)
self.lbl_width.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_width.setObjectName(_fromUtf8("lbl_width"))
self.layout_labels.addWidget(self.lbl_width)
self.lbl_format = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_format.setFont(font)
self.lbl_format.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_format.setObjectName(_fromUtf8("lbl_format"))
self.layout_labels.addWidget(self.lbl_format)
self.lbl_size = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_size.setFont(font)
self.lbl_size.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_size.setObjectName(_fromUtf8("lbl_size"))
self.layout_labels.addWidget(self.lbl_size)
self.lbl_max_length = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_max_length.setFont(font)
self.lbl_max_length.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_max_length.setObjectName(_fromUtf8("lbl_max_length"))
self.layout_labels.addWidget(self.lbl_max_length)
self.verticalLayoutWidget_2 = QtGui.QWidget(self.group_image)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(810, 20, 181, 130))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.layout_values = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.layout_values.setSpacing(12)
self.layout_values.setObjectName(_fromUtf8("layout_values"))
self.lbl_height_value = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(9)
self.lbl_height_value.setFont(font)
self.lbl_height_value.setObjectName(_fromUtf8("lbl_height_value"))
self.layout_values.addWidget(self.lbl_height_value)
self.lbl_width_value = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(9)
self.lbl_width_value.setFont(font)
self.lbl_width_value.setObjectName(_fromUtf8("lbl_width_value"))
self.layout_values.addWidget(self.lbl_width_value)
self.lbl_format_value = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(9)
self.lbl_format_value.setFont(font)
self.lbl_format_value.setObjectName(_fromUtf8("lbl_format_value"))
self.layout_values.addWidget(self.lbl_format_value)
self.lbl_size_value = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(9)
self.lbl_size_value.setFont(font)
self.lbl_size_value.setObjectName(_fromUtf8("lbl_size_value"))
self.layout_values.addWidget(self.lbl_size_value)
self.lbl_max_length_value = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(9)
self.lbl_max_length_value.setFont(font)
self.lbl_max_length_value.setObjectName(_fromUtf8("lbl_max_length_value"))
self.layout_values.addWidget(self.lbl_max_length_value)
self.lbl_spacing_info = QtGui.QLabel(self.group_image)
self.lbl_spacing_info.setGeometry(QtCore.QRect(20, 180, 141, 71))
self.lbl_spacing_info.setWordWrap(True)
self.lbl_spacing_info.setObjectName(_fromUtf8("lbl_spacing_info"))
self.lbl_status = QtGui.QLabel(self.group_image)
self.lbl_status.setGeometry(QtCore.QRect(640, 160, 351, 121))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_status.setFont(font)
self.lbl_status.setFrameShape(QtGui.QFrame.Panel)
self.lbl_status.setFrameShadow(QtGui.QFrame.Sunken)
self.lbl_status.setLineWidth(2)
self.lbl_status.setScaledContents(False)
self.lbl_status.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_status.setWordWrap(True)
self.lbl_status.setIndent(-1)
self.lbl_status.setObjectName(_fromUtf8("lbl_status"))
self.group_message = QtGui.QGroupBox(self.centralwidget)
self.group_message.setGeometry(QtCore.QRect(10, 310, 1001, 261))
self.group_message.setObjectName(_fromUtf8("group_message"))
self.text_message = QtGui.QTextEdit(self.group_message)
self.text_message.setGeometry(QtCore.QRect(180, 20, 811, 191))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(9)
self.text_message.setFont(font)
self.text_message.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.text_message.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.text_message.setObjectName(_fromUtf8("text_message"))
self.btn_load_text_file = QtGui.QPushButton(self.group_message)
self.btn_load_text_file.setGeometry(QtCore.QRect(10, 22, 161, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_load_text_file.setFont(font)
self.btn_load_text_file.setObjectName(_fromUtf8("btn_load_text_file"))
self.lbl_num_characters = QtGui.QLabel(self.group_message)
self.lbl_num_characters.setGeometry(QtCore.QRect(180, 220, 811, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(10)
self.lbl_num_characters.setFont(font)
self.lbl_num_characters.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_num_characters.setObjectName(_fromUtf8("lbl_num_characters"))
self.lbl_message_info = QtGui.QLabel(self.group_message)
self.lbl_message_info.setGeometry(QtCore.QRect(10, 60, 151, 91))
self.lbl_message_info.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.lbl_message_info.setWordWrap(True)
self.lbl_message_info.setObjectName(_fromUtf8("lbl_message_info"))
self.lbl_allowed_symbols = QtGui.QLabel(self.group_message)
self.lbl_allowed_symbols.setGeometry(QtCore.QRect(20, 140, 151, 101))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.lbl_allowed_symbols.setFont(font)
self.lbl_allowed_symbols.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_allowed_symbols.setWordWrap(True)
self.lbl_allowed_symbols.setObjectName(_fromUtf8("lbl_allowed_symbols"))
self.btn_process = QtGui.QPushButton(self.group_message)
self.btn_process.setGeometry(QtCore.QRect(830, 220, 161, 31))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.btn_process.setFont(font)
self.btn_process.setAcceptDrops(False)
self.btn_process.setAutoFillBackground(False)
self.btn_process.setAutoDefault(True)
self.btn_process.setDefault(True)
self.btn_process.setObjectName(_fromUtf8("btn_process"))
self.lbl_spacing_info_2 = QtGui.QLabel(self.centralwidget)
self.lbl_spacing_info_2.setGeometry(QtCore.QRect(890, 0, 131, 20))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(109, 109, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(109, 109, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.lbl_spacing_info_2.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(7)
self.lbl_spacing_info_2.setFont(font)
self.lbl_spacing_info_2.setWordWrap(True)
self.lbl_spacing_info_2.setObjectName(_fromUtf8("lbl_spacing_info_2"))
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Nick\'s Image Steganography", None))
self.group_image.setTitle(_translate("MainWindow", "Image Settings", None))
self.lbl_filename.setText(_translate("MainWindow", "<no image selected>", None))
self.btn_load.setText(_translate("MainWindow", "Load Image", None))
self.lbl_spacing.setText(_translate("MainWindow", "Spacing:", None))
self.box_spacing.setToolTip(_translate("MainWindow", "Default: 32", None))
self.radio_decode.setText(_translate("MainWindow", "Decode Image", None))
self.radio_encode.setText(_translate("MainWindow", "Encode Message", None))
self.lbl_height.setText(_translate("MainWindow", "Height:", None))
self.lbl_width.setText(_translate("MainWindow", "Width:", None))
self.lbl_format.setText(_translate("MainWindow", "Format:", None))
self.lbl_size.setText(_translate("MainWindow", "Size:", None))
self.lbl_max_length.setText(_translate("MainWindow", "Max Message Length:", None))
self.lbl_height_value.setText(_translate("MainWindow", "0 px", None))
self.lbl_width_value.setText(_translate("MainWindow", "0 px", None))
self.lbl_format_value.setText(_translate("MainWindow", "NONE", None))
self.lbl_size_value.setText(_translate("MainWindow", "0 bytes", None))
self.lbl_max_length_value.setText(_translate("MainWindow", "0 characters", None))
self.lbl_spacing_info.setText(_translate("MainWindow", "This value selects how many pixels are skipped for every encoded pixel. Lower values will affect the image more.", None))
self.lbl_status.setText(_translate("MainWindow", "This mode allows you to select an image file and enter a message below. When you are finished, click Process.", None))
self.group_message.setTitle(_translate("MainWindow", "Message", None))
self.btn_load_text_file.setText(_translate("MainWindow", "Load Text File", None))
self.lbl_num_characters.setText(_translate("MainWindow", "0 / 0 characters", None))
self.lbl_message_info.setText(_translate("MainWindow", "Enter the message you would like to encode into the box. Whitespace characters will be converted into spaces. English letters, numbers, and spaces are supported, plus the following characters: ", None))
self.lbl_allowed_symbols.setText(_translate("MainWindow", "!\"#$%&\'()\\ *+-,/:;<=> ?@[]^_`{|}~", None))
self.btn_process.setText(_translate("MainWindow", "Process", None))
self.lbl_spacing_info_2.setText(_translate("MainWindow", "Copyright © 2015 Nick Klose", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| nklose/Steganography | gui_main.py | Python | gpl-2.0 | 15,883 |
__author__ = 'george'
from baseclass import Plugin
import time
from apscheduler.scheduler import Scheduler
class AreWeDone(Plugin):
def __init__(self, skype):
super(AreWeDone, self).__init__(skype)
self.command = "arewedoneyet"
self.sched = Scheduler()
self.sched.start()
self.sched.add_cron_job(self.set_topic, hour="*", minute=2, day_of_week="monun")
def message_received(self, args, status, msg):
cur_time = time.localtime()
if cur_time.tm_mday == 31 or cur_time.tm_mday == 1:
time_left = 1 - cur_time.tm_mday % 31
hours_left = 23 - cur_time.tm_hour
mins_left = 59 - cur_time.tm_min
msg.Chat.SendMessage("%d days, %d hours and %d mins left until we are done" % (time_left, hours_left, mins_left))
print "%d days, %d hours and %d mins left until we are done" % (time_left, hours_left, mins_left)
else:
msg.Chat.SendMessage("You are now done. Please visit http://www.nav.no for more information")
def set_topic(self):
channel = "#stigrk85/$jvlomax;b43a0c90a2592b9b"
chat = self.skype.Chat(channel)
cur_time = time.localtime()
days_left = 1 - cur_time.tm_mday % 31
time_left = 24 - cur_time.tm_hour + days_left * 24
if cur_time.tm_hour >= 21 or cur_time.tm_hour < 6:
tod = "night"
else:
tod= "day"
if days_left > 0:
left = "second"
else:
left = "final"
if cur_time.tm_mday == 1:
chat.SendMessage("/topic {} of the {} day - {} hours remain".format(tod, left, time_left))
else:
chat.SendMessage("Congratulations, You have survived. Please visit http://www.nav.no for more information".format(tod, left, time_left))
| jvlomax/Beaker-bot | plugins/arewedone.py | Python | gpl-2.0 | 1,837 |
ucodes = {
"U0001" : "High Speed CAN Communication Bus" ,
"U0002" : "High Speed CAN Communication Bus (Performance)" ,
"U0003" : "High Speed CAN Communication Bus (Open)" ,
"U0004" : "High Speed CAN Communication Bus (Low)" ,
"U0005" : "High Speed CAN Communication Bus (High)" ,
"U0006" : "High Speed CAN Communication Bus (Open)" ,
"U0007" : "High Speed CAN Communication Bus (Low)" ,
"U0008" : "High Speed CAN Communication Bus (High)" ,
"U0009" : "High Speed CAN Communication Bus (shorted to Bus)" ,
"U0010" : "Medium Speed CAN Communication Bus" ,
"U0011" : "Medium Speed CAN Communication Bus (Performance)" ,
"U0012" : "Medium Speed CAN Communication Bus (Open)" ,
"U0013" : "Medium Speed CAN Communication Bus (Low)" ,
"U0014" : "Medium Speed CAN Communication Bus (High)" ,
"U0015" : "Medium Speed CAN Communication Bus (Open)" ,
"U0016" : "Medium Speed CAN Communication Bus (Low)" ,
"U0017" : "Medium Speed CAN Communication Bus (High)" ,
"U0018" : "Medium Speed CAN Communication Bus (shorted to Bus)" ,
"U0019" : "Low Speed CAN Communication Bus" ,
"U0020" : "Low Speed CAN Communication Bus (Performance)" ,
"U0021" : "Low Speed CAN Communication Bus (Open)" ,
"U0022" : "Low Speed CAN Communication Bus (Low)" ,
"U0023" : "Low Speed CAN Communication Bus (High)" ,
"U0024" : "Low Speed CAN Communication Bus (Open)" ,
"U0025" : "Low Speed CAN Communication Bus (Low)" ,
"U0026" : "Low Speed CAN Communication Bus (High)" ,
"U0027" : "Low Speed CAN Communication Bus (shorted to Bus)" ,
"U0028" : "Vehicle Communication Bus A" ,
"U0029" : "Vehicle Communication Bus A (Performance)" ,
"U0030" : "Vehicle Communication Bus A (Open)" ,
"U0031" : "Vehicle Communication Bus A (Low)" ,
"U0032" : "Vehicle Communication Bus A (High)" ,
"U0033" : "Vehicle Communication Bus A (Open)" ,
"U0034" : "Vehicle Communication Bus A (Low)" ,
"U0035" : "Vehicle Communication Bus A (High)" ,
"U0036" : "Vehicle Communication Bus A (shorted to Bus A)" ,
"U0037" : "Vehicle Communication Bus B" ,
"U0038" : "Vehicle Communication Bus B (Performance)" ,
"U0039" : "Vehicle Communication Bus B (Open)" ,
"U0040" : "Vehicle Communication Bus B (Low)" ,
"U0041" : "Vehicle Communication Bus B (High)" ,
"U0042" : "Vehicle Communication Bus B (Open)" ,
"U0043" : "Vehicle Communication Bus B (Low)" ,
"U0044" : "Vehicle Communication Bus B (High)" ,
"U0045" : "Vehicle Communication Bus B (shorted to Bus B)" ,
"U0046" : "Vehicle Communication Bus C" ,
"U0047" : "Vehicle Communication Bus C (Performance)" ,
"U0048" : "Vehicle Communication Bus C (Open)" ,
"U0049" : "Vehicle Communication Bus C (Low)" ,
"U0050" : "Vehicle Communication Bus C (High)" ,
"U0051" : "Vehicle Communication Bus C (Open)" ,
"U0052" : "Vehicle Communication Bus C (Low)" ,
"U0053" : "Vehicle Communication Bus C (High)" ,
"U0054" : "Vehicle Communication Bus C (shorted to Bus C)" ,
"U0055" : "Vehicle Communication Bus D" ,
"U0056" : "Vehicle Communication Bus D (Performance)" ,
"U0057" : "Vehicle Communication Bus D (Open)" ,
"U0058" : "Vehicle Communication Bus D (Low)" ,
"U0059" : "Vehicle Communication Bus D (High)" ,
"U0060" : "Vehicle Communication Bus D (Open)" ,
"U0061" : "Vehicle Communication Bus D (Low)" ,
"U0062" : "Vehicle Communication Bus D (High)" ,
"U0063" : "Vehicle Communication Bus D (shorted to Bus D)" ,
"U0064" : "Vehicle Communication Bus E" ,
"U0065" : "Vehicle Communication Bus E (Performance)" ,
"U0066" : "Vehicle Communication Bus E (Open)" ,
"U0067" : "Vehicle Communication Bus E (Low)" ,
"U0068" : "Vehicle Communication Bus E (High)" ,
"U0069" : "Vehicle Communication Bus E (Open)" ,
"U0070" : "Vehicle Communication Bus E (Low)" ,
"U0071" : "Vehicle Communication Bus E (High)" ,
"U0072" : "Vehicle Communication Bus E (shorted to Bus E)" ,
"U0073" : "Control Module Communication Bus Off" ,
"U0074" : "Reserved by J2012" ,
"U0075" : "Reserved by J2012" ,
"U0076" : "Reserved by J2012" ,
"U0077" : "Reserved by J2012" ,
"U0078" : "Reserved by J2012" ,
"U0079" : "Reserved by J2012" ,
"U0080" : "Reserved by J2012" ,
"U0081" : "Reserved by J2012" ,
"U0082" : "Reserved by J2012" ,
"U0083" : "Reserved by J2012" ,
"U0084" : "Reserved by J2012" ,
"U0085" : "Reserved by J2012" ,
"U0086" : "Reserved by J2012" ,
"U0087" : "Reserved by J2012" ,
"U0088" : "Reserved by J2012" ,
"U0089" : "Reserved by J2012" ,
"U0090" : "Reserved by J2012" ,
"U0091" : "Reserved by J2012" ,
"U0092" : "Reserved by J2012" ,
"U0093" : "Reserved by J2012" ,
"U0094" : "Reserved by J2012" ,
"U0095" : "Reserved by J2012" ,
"U0096" : "Reserved by J2012" ,
"U0097" : "Reserved by J2012" ,
"U0098" : "Reserved by J2012" ,
"U0099" : "Reserved by J2012" ,
"U0100" : "Lost Communication With ECM/PCM A" ,
"U0101" : "Lost Communication with TCM" ,
"U0102" : "Lost Communication with Transfer Case Control Module" ,
"U0103" : "Lost Communication With Gear Shift Module" ,
"U0104" : "Lost Communication With Cruise Control Module" ,
"U0105" : "Lost Communication With Fuel Injector Control Module" ,
"U0106" : "Lost Communication With Glow Plug Control Module" ,
"U0107" : "Lost Communication With Throttle Actuator Control Module" ,
"U0108" : "Lost Communication With Alternative Fuel Control Module" ,
"U0109" : "Lost Communication With Fuel Pump Control Module" ,
"U0110" : "Lost Communication With Drive Motor Control Module" ,
"U0111" : "Lost Communication With Battery Energy Control Module 'A'" ,
"U0112" : "Lost Communication With Battery Energy Control Module 'B'" ,
"U0113" : "Lost Communication With Emissions Critical Control Information" ,
"U0114" : "Lost Communication With Four-Wheel Drive Clutch Control Module" ,
"U0115" : "Lost Communication With ECM/PCM B" ,
"U0116" : "Reserved by J2012" ,
"U0117" : "Reserved by J2012" ,
"U0118" : "Reserved by J2012" ,
"U0119" : "Reserved by J2012" ,
"U0120" : "Reserved by J2012" ,
"U0121" : "Lost Communication With Anti-Lock Brake System (ABS) Control Module" ,
"U0122" : "Lost Communication With Vehicle Dynamics Control Module" ,
"U0123" : "Lost Communication With Yaw Rate Sensor Module" ,
"U0124" : "Lost Communication With Lateral Acceleration Sensor Module" ,
"U0125" : "Lost Communication With Multi-axis Acceleration Sensor Module" ,
"U0126" : "Lost Communication With Steering Angle Sensor Module" ,
"U0127" : "Lost Communication With Tire Pressure Monitor Module" ,
"U0128" : "Lost Communication With Park Brake Control Module" ,
"U0129" : "Lost Communication With Brake System Control Module" ,
"U0130" : "Lost Communication With Steering Effort Control Module" ,
"U0131" : "Lost Communication With Power Steering Control Module" ,
"U0132" : "Lost Communication With Ride Level Control Module" ,
"U0133" : "Reserved by J2012" ,
"U0134" : "Reserved by J2012" ,
"U0135" : "Reserved by J2012" ,
"U0136" : "Reserved by J2012" ,
"U0137" : "Reserved by J2012" ,
"U0138" : "Reserved by J2012" ,
"U0139" : "Reserved by J2012" ,
"U0140" : "Lost Communication With Body Control Module" ,
"U0141" : "Lost Communication With Body Control Module 'A'" ,
"U0142" : "Lost Communication With Body Control Module 'B'" ,
"U0143" : "Lost Communication With Body Control Module 'C'" ,
"U0144" : "Lost Communication With Body Control Module 'D'" ,
"U0145" : "Lost Communication With Body Control Module 'E'" ,
"U0146" : "Lost Communication With Gateway 'A'" ,
"U0147" : "Lost Communication With Gateway 'B'" ,
"U0148" : "Lost Communication With Gateway 'C'" ,
"U0149" : "Lost Communication With Gateway 'D'" ,
"U0150" : "Lost Communication With Gateway 'E'" ,
"U0151" : "Lost Communication With Restraints Control Module" ,
"U0152" : "Lost Communication With Side Restraints Control Module Left" ,
"U0153" : "Lost Communication With Side Restraints Control Module Right" ,
"U0154" : "Lost Communication With Restraints Occupant Sensing Control Module" ,
"U0155" : "Lost Communication With Instrument Panel Cluster (IPC) Control Module" ,
"U0156" : "Lost Communication With Information Center 'A'" ,
"U0157" : "Lost Communication With Information Center 'B'" ,
"U0158" : "Lost Communication With Head Up Display" ,
"U0159" : "Lost Communication With Parking Assist Control Module" ,
"U0160" : "Lost Communication With Audible Alert Control Module" ,
"U0161" : "Lost Communication With Compass Module" ,
"U0162" : "Lost Communication With Navigation Display Module" ,
"U0163" : "Lost Communication With Navigation Control Module" ,
"U0164" : "Lost Communication With HVAC Control Module" ,
"U0165" : "Lost Communication With HVAC Control Module Rear" ,
"U0166" : "Lost Communication With Auxiliary Heater Control Module" ,
"U0167" : "Lost Communication With Vehicle Immobilizer Control Module" ,
"U0168" : "Lost Communication With Vehicle Security Control Module" ,
"U0169" : "Lost Communication With Sunroof Control Module" ,
"U0170" : "Lost Communication With 'Restraints System Sensor A'" ,
"U0171" : "Lost Communication With 'Restraints System Sensor B'" ,
"U0172" : "Lost Communication With 'Restraints System Sensor C'" ,
"U0173" : "Lost Communication With 'Restraints System Sensor D'" ,
"U0174" : "Lost Communication With 'Restraints System Sensor E'" ,
"U0175" : "Lost Communication With 'Restraints System Sensor F'" ,
"U0176" : "Lost Communication With 'Restraints System Sensor G'" ,
"U0177" : "Lost Communication With 'Restraints System Sensor H'" ,
"U0178" : "Lost Communication With 'Restraints System Sensor I'" ,
"U0179" : "Lost Communication With 'Restraints System Sensor J'" ,
"U0180" : "Lost Communication With Automatic Lighting Control Module" ,
"U0181" : "Lost Communication With Headlamp Leveling Control Module" ,
"U0182" : "Lost Communication With Lighting Control Module Front" ,
"U0183" : "Lost Communication With Lighting Control Module Rear" ,
"U0184" : "Lost Communication With Radio" ,
"U0185" : "Lost Communication With Antenna Control Module" ,
"U0186" : "Lost Communication With Audio Amplifier" ,
"U0187" : "Lost Communication With Digital Disc Player/Changer Module 'A'" ,
"U0188" : "Lost Communication With Digital Disc Player/Changer Module 'B'" ,
"U0189" : "Lost Communication With Digital Disc Player/Changer Module 'C'" ,
"U0190" : "Lost Communication With Digital Disc Player/Changer Module 'D'" ,
"U0191" : "Lost Communication With Television" ,
"U0192" : "Lost Communication With Personal Computer" ,
"U0193" : "Lost Communication With 'Digital Audio Control Module A'" ,
"U0194" : "Lost Communication With 'Digital Audio Control Module B'" ,
"U0195" : "Lost Communication With Subscription Entertainment Receiver Module" ,
"U0196" : "Lost Communication With Rear Seat Entertainment Control Module" ,
"U0197" : "Lost Communication With Telephone Control Module" ,
"U0198" : "Lost Communication With Telematic Control Module" ,
"U0199" : "Lost Communication With 'Door Control Module A'" ,
"U0200" : "Lost Communication With 'Door Control Module B'" ,
"U0201" : "Lost Communication With 'Door Control Module C'" ,
"U0202" : "Lost Communication With 'Door Control Module D'" ,
"U0203" : "Lost Communication With 'Door Control Module E'" ,
"U0204" : "Lost Communication With 'Door Control Module F'" ,
"U0205" : "Lost Communication With 'Door Control Module G'" ,
"U0206" : "Lost Communication With Folding Top Control Module" ,
"U0207" : "Lost Communication With Moveable Roof Control Module" ,
"U0208" : "Lost Communication With 'Seat Control Module A'" ,
"U0209" : "Lost Communication With 'Seat Control Module B'" ,
"U0210" : "Lost Communication With 'Seat Control Module C'" ,
"U0211" : "Lost Communication With 'Seat Control Module D'" ,
"U0212" : "Lost Communication With Steering Column Control Module" ,
"U0213" : "Lost Communication With Mirror Control Module" ,
"U0214" : "Lost Communication With Remote Function Actuation" ,
"U0215" : "Lost Communication With 'Door Switch A'" ,
"U0216" : "Lost Communication With 'Door Switch B'" ,
"U0217" : "Lost Communication With 'Door Switch C'" ,
"U0218" : "Lost Communication With 'Door Switch D'" ,
"U0219" : "Lost Communication With 'Door Switch E'" ,
"U0220" : "Lost Communication With 'Door Switch F'" ,
"U0221" : "Lost Communication With 'Door Switch G'" ,
"U0222" : "Lost Communication With 'Door Window Motor A'" ,
"U0223" : "Lost Communication With 'Door Window Motor B'" ,
"U0224" : "Lost Communication With 'Door Window Motor C'" ,
"U0225" : "Lost Communication With 'Door Window Motor D'" ,
"U0226" : "Lost Communication With 'Door Window Motor E'" ,
"U0227" : "Lost Communication With 'Door Window Motor F'" ,
"U0228" : "Lost Communication With 'Door Window Motor G'" ,
"U0229" : "Lost Communication With Heated Steering Wheel Module" ,
"U0230" : "Lost Communication With Rear Gate Module" ,
"U0231" : "Lost Communication With Rain Sensing Module" ,
"U0232" : "Lost Communication With Side Obstacle Detection Control Module Left" ,
"U0233" : "Lost Communication With Side Obstacle Detection Control Module Right" ,
"U0234" : "Lost Communication With Convenience Recall Module" ,
"U0235" : "Lost Communication With Cruise Control Front Distance Range Sensor" ,
"U0300" : "Internal Control Module Software Incompatibility" ,
"U0301" : "Software Incompatibility with ECM/PCM" ,
"U0302" : "Software Incompatibility with Transmission Control Module" ,
"U0303" : "Software Incompatibility with Transfer Case Control Module" ,
"U0304" : "Software Incompatibility with Gear Shift Control Module" ,
"U0305" : "Software Incompatibility with Cruise Control Module" ,
"U0306" : "Software Incompatibility with Fuel Injector Control Module" ,
"U0307" : "Software Incompatibility with Glow Plug Control Module" ,
"U0308" : "Software Incompatibility with Throttle Actuator Control Module" ,
"U0309" : "Software Incompatibility with Alternative Fuel Control Module" ,
"U0310" : "Software Incompatibility with Fuel Pump Control Module" ,
"U0311" : "Software Incompatibility with Drive Motor Control Module" ,
"U0312" : "Software Incompatibility with Battery Energy Control Module A" ,
"U0313" : "Software Incompatibility with Battery Energy Control Module B" ,
"U0314" : "Software Incompatibility with Four-Wheel Drive Clutch Control Module" ,
"U0315" : "Software Incompatibility with Anti-Lock Brake System Control Module" ,
"U0316" : "Software Incompatibility with Vehicle Dynamics Control Module" ,
"U0317" : "Software Incompatibility with Park Brake Control Module" ,
"U0318" : "Software Incompatibility with Brake System Control Module" ,
"U0319" : "Software Incompatibility with Steering Effort Control Module" ,
"U0320" : "Software Incompatibility with Power Steering Control Module" ,
"U0321" : "Software Incompatibility with Ride Level Control Module" ,
"U0322" : "Software Incompatibility with Body Control Module" ,
"U0323" : "Software Incompatibility with Instrument Panel Control Module" ,
"U0324" : "Software Incompatibility with HVAC Control Module" ,
"U0325" : "Software Incompatibility with Auxiliary Heater Control Module" ,
"U0326" : "Software Incompatibility with Vehicle Immobilizer Control Module" ,
"U0327" : "Software Incompatibility with Vehicle Security Control Module" ,
"U0328" : "Software Incompatibility with Steering Angle Sensor Module" ,
"U0329" : "Software Incompatibility with Steering Column Control Module" ,
"U0330" : "Software Incompatibility with Tire Pressure Monitor Module" ,
"U0331" : "Software Incompatibility with Body Control Module 'A'" ,
"U0400" : "Invalid Data Received" ,
"U0401" : "Invalid Data Received From ECM/PCM" ,
"U0402" : "Invalid Data Received From Transmission Control Module" ,
"U0403" : "Invalid Data Received From Transfer Case Control Module" ,
"U0404" : "Invalid Data Received From Gear Shift Control Module" ,
"U0405" : "Invalid Data Received From Cruise Control Module" ,
"U0406" : "Invalid Data Received From Fuel Injector Control Module" ,
"U0407" : "Invalid Data Received From Glow Plug Control Module" ,
"U0408" : "Invalid Data Received From Throttle Actuator Control Module" ,
"U0409" : "Invalid Data Received From Alternative Fuel Control Module" ,
"U0410" : "Invalid Data Received From Fuel Pump Control Module" ,
"U0411" : "Invalid Data Received From Drive Motor Control Module" ,
"U0412" : "Invalid Data Received From Battery Energy Control Module A" ,
"U0413" : "Invalid Data Received From Battery Energy Control Module B" ,
"U0414" : "Invalid Data Received From Four-Wheel Drive Clutch Control Module" ,
"U0415" : "Invalid Data Received From Anti-Lock Brake System Control Module" ,
"U0416" : "Invalid Data Received From Vehicle Dynamics Control Module" ,
"U0417" : "Invalid Data Received From Park Brake Control Module" ,
"U0418" : "Invalid Data Received From Brake System Control Module" ,
"U0419" : "Invalid Data Received From Steering Effort Control Module" ,
"U0420" : "Invalid Data Received From Power Steering Control Module" ,
"U0421" : "Invalid Data Received From Ride Level Control Module" ,
"U0422" : "Invalid Data Received From Body Control Module" ,
"U0423" : "Invalid Data Received From Instrument Panel Control Module" ,
"U0424" : "Invalid Data Received From HVAC Control Module" ,
"U0425" : "Invalid Data Received From Auxiliary Heater Control Module" ,
"U0426" : "Invalid Data Received From Vehicle Immobilizer Control Module" ,
"U0427" : "Invalid Data Received From Vehicle Security Control Module" ,
"U0428" : "Invalid Data Received From Steering Angle Sensor Module" ,
"U0429" : "Invalid Data Received From Steering Column Control Module" ,
"U0430" : "Invalid Data Received From Tire Pressure Monitor Module" ,
"U0431" : "Invalid Data Received From Body Control Module 'A'"
}
| lkarsten/pyobd | network_codes.py | Python | gpl-2.0 | 18,096 |
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Copyright Mercurial Contributors
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from typing import TypeVar, Callable, List, Tuple, Optional
from . import mdiff
from .thirdparty import attr
F = TypeVar("F")
L = TypeVar("L")
def annotate(
base: F,
parents: Callable[[F], List[F]],
decorate: Callable[[F], Tuple[List[L], bytes]],
diffopts: mdiff.diffopts,
skip: Optional[Callable[[F], bool]] = None,
) -> Tuple[List[L], bytes]:
"""annotate algorithm
base: starting point, usually a fctx.
parents: get parents from F.
decorate: get (lines, text) from F.
Return (lines, text) for 'base'.
"""
# This algorithm would prefer to be recursive, but Python is a
# bit recursion-hostile. Instead we do an iterative
# depth-first search.
# 1st DFS pre-calculates pcache and needed
visit = [base]
pcache = {}
needed = {base: 1}
while visit:
f = visit.pop()
if f in pcache:
continue
pl = parents(f)
pcache[f] = pl
for p in pl:
needed[p] = needed.get(p, 0) + 1
if p not in pcache:
visit.append(p)
# 2nd DFS does the actual annotate
visit[:] = [base]
hist = {}
while visit:
f = visit[-1]
if f in hist:
visit.pop()
continue
ready = True
pl = pcache[f]
for p in pl:
if p not in hist:
ready = False
visit.append(p)
if ready:
visit.pop()
curr = decorate(f)
skipchild = False
if skip is not None:
skipchild = skip(f)
curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, diffopts)
for p in pl:
if needed[p] == 1:
del hist[p]
del needed[p]
else:
needed[p] -= 1
hist[f] = curr
del pcache[f]
return hist[base]
def _annotatepair(parents, childfctx, child, skipchild, diffopts):
r"""
Given parent and child fctxes and annotate data for parents, for all lines
in either parent that match the child, annotate the child with the parent's
data.
Additionally, if `skipchild` is True, replace all other lines with parent
annotate data as well such that child is never blamed for any lines.
See test-annotate.py for unit tests.
"""
pblocks = [
(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
for parent in parents
]
if skipchild:
# Need to iterate over the blocks twice -- make it a list
pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
# Mercurial currently prefers p2 over p1 for annotate.
# TODO: change this?
for parent, blocks in pblocks:
for (a1, a2, b1, b2), t in blocks:
# Changed blocks ('!') or blocks made only of blank lines ('~')
# belong to the child.
if t == "=":
child[0][b1:b2] = parent[0][a1:a2]
if skipchild:
# Now try and match up anything that couldn't be matched,
# Reversing pblocks maintains bias towards p2, matching above
# behavior.
pblocks.reverse()
# The heuristics are:
# * Work on blocks of changed lines (effectively diff hunks with -U0).
# This could potentially be smarter but works well enough.
# * For a non-matching section, do a best-effort fit. Match lines in
# diff hunks 1:1, dropping lines as necessary.
# * Repeat the last line as a last resort.
# First, replace as much as possible without repeating the last line.
remaining = [(parent, []) for parent, _blocks in pblocks]
for idx, (parent, blocks) in enumerate(pblocks):
for (a1, a2, b1, b2), _t in blocks:
if a2 - a1 >= b2 - b1:
for bk in range(b1, b2):
if child[0][bk].fctx == childfctx:
ak = min(a1 + (bk - b1), a2 - 1)
child[0][bk] = attr.evolve(parent[0][ak], skip=True)
else:
remaining[idx][1].append((a1, a2, b1, b2))
# Then, look at anything left, which might involve repeating the last
# line.
for parent, blocks in remaining:
for a1, a2, b1, b2 in blocks:
for bk in range(b1, b2):
if child[0][bk].fctx == childfctx:
ak = min(a1 + (bk - b1), a2 - 1)
child[0][bk] = attr.evolve(parent[0][ak], skip=True)
return child
| facebookexperimental/eden | eden/scm/edenscm/mercurial/annotate.py | Python | gpl-2.0 | 4,963 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file exports all NetworkInspectors.
"""
import os
import glob
import nupic
# Import NetworkInspector and NetworkInspectorHandler
from nupic.analysis.inspectors.network.NetworkInspector import *
# Create networkInspectors as a list of all network inspector subclasses
files = [os.path.splitext(os.path.split(x)[1])[0] for x in
glob.glob(os.path.join(os.path.split(__file__)[0], '*.py'))]
files.remove('__init__')
files.remove('NetworkInspector')
#files = [(f, f[:-1]) for f in files if f.endswith('2')]
files = [(f, f) for f in files]
for f in files:
exec('from nupic.analysis.inspectors.network.%s import %s' % (f[0], f[1]))
networkInspectors = map(eval, [f[1] for f in files]) | tkaitchuck/nupic | py/nupic/analysis/inspectors/network/__init__.py | Python | gpl-3.0 | 1,690 |
## Copyright 2009 Laurent Bovet <[email protected]>
## Jordi Puigsegur <[email protected]>
##
## This file is part of wfrog
##
## wfrog is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import log
import yaml
import inspect
import sys
import os.path
import copy
from Cheetah.Template import Template
wfrog_version = "0.8.2.99-git"
class Configurer(object):
default_filename = None
module_map = None
log_configurer = log.LogConfigurer()
logger = logging.getLogger('config')
def __init__(self, module_map):
self.module_map = module_map
self.extensions = {}
def add_options(self, opt_parser):
opt_parser.add_option("-f", "--config", dest="config",
help="Configuration file (in yaml)", metavar="CONFIG_FILE")
opt_parser.add_option("-s", "--settings", dest="settings",
help="Settings file (in yaml)", metavar="SETTINGS_FILE")
opt_parser.add_option("-H", action="store_true", dest="help_list", help="Gives help on the configuration file and the list of possible config !elements in the yaml config file")
opt_parser.add_option("-E", dest="help_element", metavar="ELEMENT", help="Gives help about a config !element")
opt_parser.add_option("-e", "--extensions", dest="extension_names", metavar="MODULE1,MODULE2,...", help="Comma-separated list of modules containing custom configuration elements")
self.log_configurer.add_options(opt_parser)
def configure(self, options, component, config_file, settings_file=None, embedded=False):
self.config_file = config_file
self.settings_file = settings_file
if options.extension_names:
for ext in options.extension_names.split(","):
self.logger.debug("Loading extension module '"+ext+"'")
self.extensions[ext]=__import__(ext)
if options.help_list:
if component.__doc__ is not None:
print component.__doc__
for (k,v) in self.module_map:
print k
print "-"*len(k) +"\n"
self.print_help(v)
if options.extension_names:
print "Extensions"
print "----------\n"
for ext in self.extensions:
print "[" + ext + "]"
print
self.print_help(self.extensions[ext])
# Adds logger documentation
print self.log_configurer.__doc__
print " Use option -H ELEMENT for help on a particular !element"
sys.exit()
if options.help_element:
element = options.help_element
if element[0] is not '!':
element = '!' + element
desc = {}
for(k,v) in self.module_map:
desc.update(self.get_help_desc(v))
if len(desc) == 0:
for ext in self.extensions:
desc.update(self.get_help_desc(self.extensions[ext]))
if desc.has_key(element):
print
print element + " [" + desc[element][1] +"]"
print " " + desc[element][0]
print
else:
print "Element "+element+" not found or not documented"
sys.exit()
if not embedded and options.config:
self.config_file = options.config
settings_warning=False
if self.settings_file is None:
if options.settings is not None:
self.settings_file = options.settings
else:
settings_warning=True
self.settings_file = os.path.dirname(self.config_file)+'/../../wfcommon/config/default-settings.yaml'
settings = yaml.load( file(self.settings_file, 'r') )
variables = {}
variables['settings']=settings
config = yaml.load( str(Template(file=file(self.config_file, "r"), searchList=[variables])))
if settings is not None:
context = copy.deepcopy(settings)
else:
context = {}
context['_yaml_config_file'] = self.config_file
context['os']=sys.platform
if not embedded:
self.log_configurer.configure(options, config, context)
self.logger.info("Starting wfrog " + wfrog_version)
if settings_warning:
self.logger.warn('User settings are missing. Loading default ones. Run \'wfrog -S\' for user settings setup.')
self.logger.info("Loaded settings file " + os.path.normpath(self.settings_file))
self.logger.debug('Loaded settings %s', repr(settings))
self.logger.debug("Loaded config file " + os.path.normpath(self.config_file))
if config.has_key('init'):
for k,v in config['init'].iteritems():
self.logger.debug("Initializing "+k)
try:
v.init(context=context)
except AttributeError:
pass # In case the element has not init method
return ( config, context )
def print_help(self, module):
desc = self.get_help_desc(module, summary=True)
sorted = desc.keys()
sorted.sort()
for k in sorted:
print k
print " " + desc[k][0]
print
def get_help_desc(self, module, summary=False):
self.logger.debug("Getting info on module '"+module.__name__+"'")
elements = inspect.getmembers(module, lambda l : inspect.isclass(l) and yaml.YAMLObject in inspect.getmro(l))
desc={}
for element in elements:
self.logger.debug("Getting doc of "+element[0])
# Gets the documentation of the first superclass
superclass = inspect.getmro(element[1])[1]
fulldoc=superclass.__doc__
# Add the doc of the super-super-class if _element_doc is
if hasattr(inspect.getmro(superclass)[1], "_element_doc") and inspect.getmro(superclass)[1].__doc__ is not None:
fulldoc = fulldoc + inspect.getmro(superclass)[1].__doc__
firstline=fulldoc.split(".")[0]
self.logger.debug(firstline)
module_name = module.__name__.split('.')[-1]
if summary:
desc[element[1].yaml_tag] = [ firstline, module_name ]
else:
desc[element[1].yaml_tag] = [ fulldoc, module_name ]
return desc
| wfrog/wfrog | wfcommon/config.py | Python | gpl-3.0 | 7,089 |
"""Multidict implementation.
HTTP Headers and URL query string require specific data structure:
multidict. It behaves mostly like a dict but it can have
several values for the same key.
"""
import os
__all__ = ('MultiDictProxy', 'CIMultiDictProxy',
'MultiDict', 'CIMultiDict', 'upstr', 'istr')
__version__ = '2.1.5'
if bool(os.environ.get('MULTIDICT_NO_EXTENSIONS')):
from ._multidict_py import (MultiDictProxy,
CIMultiDictProxy,
MultiDict,
CIMultiDict,
upstr, istr)
else:
try:
from ._multidict import (MultiDictProxy,
CIMultiDictProxy,
MultiDict,
CIMultiDict,
upstr, istr)
except ImportError: # pragma: no cover
from ._multidict_py import (MultiDictProxy,
CIMultiDictProxy,
MultiDict,
CIMultiDict,
upstr, istr)
| DivineHime/seishirou | lib/multidict/__init__.py | Python | gpl-3.0 | 1,162 |
""" DIRAC FileCatalog mix-in class to manage directory metadata
"""
# pylint: disable=protected-access
import six
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Time import queryTime
class DirectoryMetadata(object):
def __init__(self, database=None):
self.db = database
def setDatabase(self, database):
self.db = database
##############################################################################
#
# Manage Metadata fields
#
def addMetadataField(self, pName, pType, credDict):
"""Add a new metadata parameter to the Metadata Database.
:param str pName: parameter name
:param str pType: parameter type in the MySQL notation
:return: S_OK/S_ERROR, Value - comment on a positive result
"""
result = self.db.fmeta.getFileMetadataFields(credDict)
if not result["OK"]:
return result
if pName in result["Value"]:
return S_ERROR("The metadata %s is already defined for Files" % pName)
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
if pName in result["Value"]:
if pType.lower() == result["Value"][pName].lower():
return S_OK("Already exists")
return S_ERROR(
"Attempt to add an existing metadata with different type: %s/%s" % (pType, result["Value"][pName])
)
valueType = pType
if pType.lower()[:3] == "int":
valueType = "INT"
elif pType.lower() == "string":
valueType = "VARCHAR(128)"
elif pType.lower() == "float":
valueType = "FLOAT"
elif pType.lower() == "date":
valueType = "DATETIME"
elif pType == "MetaSet":
valueType = "VARCHAR(64)"
req = "CREATE TABLE FC_Meta_%s ( DirID INTEGER NOT NULL, Value %s, PRIMARY KEY (DirID), INDEX (Value) )" % (
pName,
valueType,
)
result = self.db._query(req)
if not result["OK"]:
return result
result = self.db.insertFields("FC_MetaFields", ["MetaName", "MetaType"], [pName, pType])
if not result["OK"]:
return result
metadataID = result["lastRowId"]
result = self.__transformMetaParameterToData(pName)
if not result["OK"]:
return result
return S_OK("Added new metadata: %d" % metadataID)
def deleteMetadataField(self, pName, credDict):
"""Remove metadata field
:param str pName: meta parameter name
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
req = "DROP TABLE FC_Meta_%s" % pName
result = self.db._update(req)
error = ""
if not result["OK"]:
error = result["Message"]
req = "DELETE FROM FC_MetaFields WHERE MetaName='%s'" % pName
result = self.db._update(req)
if not result["OK"]:
if error:
result["Message"] = error + "; " + result["Message"]
return result
def getMetadataFields(self, credDict):
"""Get all the defined metadata fields
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value is the metadata:metadata type dictionary
"""
return self._getMetadataFields(credDict)
def _getMetadataFields(self, credDict):
"""Get all the defined metadata fields as they are defined in the database
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value is the metadata:metadata type dictionary
"""
req = "SELECT MetaName,MetaType FROM FC_MetaFields"
result = self.db._query(req)
if not result["OK"]:
return result
metaDict = {}
for row in result["Value"]:
metaDict[row[0]] = row[1]
return S_OK(metaDict)
def addMetadataSet(self, metaSetName, metaSetDict, credDict):
"""Add a new metadata set with the contents from metaSetDict
:param str metaSetName: metaSet name
:param dict metaSetDict: contents of the meta set definition
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaTypeDict = result["Value"]
# Check the sanity of the metadata set contents
for key in metaSetDict:
if key not in metaTypeDict:
return S_ERROR("Unknown key %s" % key)
result = self.db.insertFields("FC_MetaSetNames", ["MetaSetName"], [metaSetName])
if not result["OK"]:
return result
metaSetID = result["lastRowId"]
req = "INSERT INTO FC_MetaSets (MetaSetID,MetaKey,MetaValue) VALUES %s"
vList = []
for key, value in metaSetDict.items():
vList.append("(%d,'%s','%s')" % (metaSetID, key, str(value)))
vString = ",".join(vList)
result = self.db._update(req % vString)
return result
def getMetadataSet(self, metaSetName, expandFlag, credDict):
"""Get fully expanded contents of the metadata set
:param str metaSetName: metaSet name
:param bool expandFlag: flag to whether to expand the metaset recursively
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value dictionary of the meta set definition contents
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaTypeDict = result["Value"]
req = "SELECT S.MetaKey,S.MetaValue FROM FC_MetaSets as S, FC_MetaSetNames as N "
req += "WHERE N.MetaSetName='%s' AND N.MetaSetID=S.MetaSetID" % metaSetName
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK({})
resultDict = {}
for key, value in result["Value"]:
if key not in metaTypeDict:
return S_ERROR("Unknown key %s" % key)
if expandFlag:
if metaTypeDict[key] == "MetaSet":
result = self.getMetadataSet(value, expandFlag, credDict)
if not result["OK"]:
return result
resultDict.update(result["Value"])
else:
resultDict[key] = value
else:
resultDict[key] = value
return S_OK(resultDict)
#############################################################################################
#
# Set and get directory metadata
#
#############################################################################################
def setMetadata(self, dPath, metaDict, credDict):
"""Set the value of a given metadata field for the the given directory path
:param str dPath: directory path
:param dict metaDict: dictionary with metadata
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
result = self.db.dtree.findDir(dPath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dPath)
dirID = result["Value"]
dirmeta = self.getDirectoryMetadata(dPath, credDict, ownData=False)
if not dirmeta["OK"]:
return dirmeta
for metaName, metaValue in metaDict.items():
if metaName not in metaFields:
result = self.setMetaParameter(dPath, metaName, metaValue, credDict)
if not result["OK"]:
return result
continue
# Check that the metadata is not defined for the parent directories
if metaName in dirmeta["Value"]:
return S_ERROR("Metadata conflict detected for %s for directory %s" % (metaName, dPath))
result = self.db.insertFields("FC_Meta_%s" % metaName, ["DirID", "Value"], [dirID, metaValue])
if not result["OK"]:
if result["Message"].find("Duplicate") != -1:
req = "UPDATE FC_Meta_%s SET Value='%s' WHERE DirID=%d" % (metaName, metaValue, dirID)
result = self.db._update(req)
if not result["OK"]:
return result
else:
return result
return S_OK()
def removeMetadata(self, dPath, metaData, credDict):
"""Remove the specified metadata for the given directory
:param str dPath: directory path
:param dict metaData: metadata dictionary
:param dict credDict: client credential dictionary
:return: standard Dirac result object
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
result = self.db.dtree.findDir(dPath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dPath)
dirID = result["Value"]
failedMeta = {}
for meta in metaData:
if meta in metaFields:
# Indexed meta case
req = "DELETE FROM FC_Meta_%s WHERE DirID=%d" % (meta, dirID)
result = self.db._update(req)
if not result["OK"]:
failedMeta[meta] = result["Value"]
else:
# Meta parameter case
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s' AND DirID=%d" % (meta, dirID)
result = self.db._update(req)
if not result["OK"]:
failedMeta[meta] = result["Value"]
if failedMeta:
metaExample = list(failedMeta)[0]
result = S_ERROR("Failed to remove %d metadata, e.g. %s" % (len(failedMeta), failedMeta[metaExample]))
result["FailedMetadata"] = failedMeta
else:
return S_OK()
def setMetaParameter(self, dPath, metaName, metaValue, credDict):
"""Set an meta parameter - metadata which is not used in the the data
search operations
:param str dPath: directory name
:param str metaName: meta parameter name
:param str metaValue: meta parameter value
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
result = self.db.dtree.findDir(dPath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dPath)
dirID = result["Value"]
result = self.db.insertFields(
"FC_DirMeta", ["DirID", "MetaKey", "MetaValue"], [dirID, metaName, str(metaValue)]
)
return result
def getDirectoryMetaParameters(self, dpath, credDict, inherited=True):
"""Get meta parameters for the given directory
:param str dPath: directory name
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value dictionary of meta parameters
"""
if inherited:
result = self.db.dtree.getPathIDs(dpath)
if not result["OK"]:
return result
pathIDs = result["Value"]
dirID = pathIDs[-1]
else:
result = self.db.dtree.findDir(dpath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dpath)
dirID = result["Value"]
pathIDs = [dirID]
if len(pathIDs) > 1:
pathString = ",".join([str(x) for x in pathIDs])
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID in (%s)" % pathString
else:
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID=%d " % dirID
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK({})
metaDict = {}
for _dID, key, value in result["Value"]:
if key in metaDict:
if isinstance(metaDict[key], list):
metaDict[key].append(value)
else:
metaDict[key] = [metaDict[key]].append(value)
else:
metaDict[key] = value
return S_OK(metaDict)
def getDirectoryMetadata(self, path, credDict, inherited=True, ownData=True):
"""Get metadata for the given directory aggregating metadata for the directory itself
and for all the parent directories if inherited flag is True. Get also the non-indexed
metadata parameters.
:param str path: directory name
:param dict credDict: client credential dictionary
:param bool inherited: flag to include metadata from the parent directories
:param bool ownData: flag to include metadata for the directory itself
:return: S_OK/S_ERROR, Value dictionary of metadata
"""
result = self.db.dtree.getPathIDs(path)
if not result["OK"]:
return result
pathIDs = result["Value"]
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
metaDict = {}
metaOwnerDict = {}
metaTypeDict = {}
dirID = pathIDs[-1]
if not inherited:
pathIDs = pathIDs[-1:]
if not ownData:
pathIDs = pathIDs[:-1]
pathString = ",".join([str(x) for x in pathIDs])
for meta in metaFields:
req = "SELECT Value,DirID FROM FC_Meta_%s WHERE DirID in (%s)" % (meta, pathString)
result = self.db._query(req)
if not result["OK"]:
return result
if len(result["Value"]) > 1:
return S_ERROR("Metadata conflict for %s for directory %s" % (meta, path))
if result["Value"]:
metaDict[meta] = result["Value"][0][0]
if int(result["Value"][0][1]) == dirID:
metaOwnerDict[meta] = "OwnMetadata"
else:
metaOwnerDict[meta] = "ParentMetadata"
metaTypeDict[meta] = metaFields[meta]
# Get also non-searchable data
result = self.getDirectoryMetaParameters(path, credDict, inherited)
if result["OK"]:
metaDict.update(result["Value"])
for meta in result["Value"]:
metaOwnerDict[meta] = "OwnParameter"
result = S_OK(metaDict)
result["MetadataOwner"] = metaOwnerDict
result["MetadataType"] = metaTypeDict
return result
def __transformMetaParameterToData(self, metaName):
"""Relocate the meta parameters of all the directories to the corresponding
indexed metadata table
:param str metaName: name of the parameter to transform
:return: S_OK/S_ERROR
"""
req = "SELECT DirID,MetaValue from FC_DirMeta WHERE MetaKey='%s'" % metaName
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK()
dirDict = {}
for dirID, meta in result["Value"]:
dirDict[dirID] = meta
dirList = list(dirDict)
# Exclude child directories from the list
for dirID in dirList:
result = self.db.dtree.getSubdirectoriesByID(dirID)
if not result["OK"]:
return result
if not result["Value"]:
continue
childIDs = list(result["Value"])
for childID in childIDs:
if childID in dirList:
del dirList[dirList.index(childID)]
insertValueList = []
for dirID in dirList:
insertValueList.append("( %d,'%s' )" % (dirID, dirDict[dirID]))
req = "INSERT INTO FC_Meta_%s (DirID,Value) VALUES %s" % (metaName, ", ".join(insertValueList))
result = self.db._update(req)
if not result["OK"]:
return result
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s'" % metaName
result = self.db._update(req)
return result
############################################################################################
#
# Find directories corresponding to the metadata
#
def __createMetaSelection(self, value, table=""):
"""Create an SQL selection element for the given meta value
:param dict value: dictionary with selection instructions suitable for the database search
:param str table: table name
:return: selection string
"""
if isinstance(value, dict):
selectList = []
for operation, operand in value.items():
if operation in [">", "<", ">=", "<="]:
if isinstance(operand, list):
return S_ERROR("Illegal query: list of values for comparison operation")
if isinstance(operand, six.integer_types):
selectList.append("%sValue%s%d" % (table, operation, operand))
elif isinstance(operand, float):
selectList.append("%sValue%s%f" % (table, operation, operand))
else:
selectList.append("%sValue%s'%s'" % (table, operation, operand))
elif operation == "in" or operation == "=":
if isinstance(operand, list):
vString = ",".join(["'" + str(x) + "'" for x in operand])
selectList.append("%sValue IN (%s)" % (table, vString))
else:
selectList.append("%sValue='%s'" % (table, operand))
elif operation == "nin" or operation == "!=":
if isinstance(operand, list):
vString = ",".join(["'" + str(x) + "'" for x in operand])
selectList.append("%sValue NOT IN (%s)" % (table, vString))
else:
selectList.append("%sValue!='%s'" % (table, operand))
selectString = " AND ".join(selectList)
elif isinstance(value, list):
vString = ",".join(["'" + str(x) + "'" for x in value])
selectString = "%sValue in (%s)" % (table, vString)
else:
if value == "Any":
selectString = ""
else:
selectString = "%sValue='%s' " % (table, value)
return S_OK(selectString)
def __findSubdirByMeta(self, metaName, value, pathSelection="", subdirFlag=True):
"""Find directories for the given metaName datum. If the the metaName datum type is a list,
combine values in OR. In case the metaName datum is 'Any', finds all the subdirectories
for which the metaName datum is defined at all.
:param str metaName: metadata name
:param dict,list value: dictionary with selection instructions suitable for the database search
:param str pathSelection: directory path selection string
:param bool subdirFlag: fla to include subdirectories
:return: S_OK/S_ERROR, Value list of found directories
"""
result = self.__createMetaSelection(value, "M.")
if not result["OK"]:
return result
selectString = result["Value"]
req = " SELECT M.DirID FROM FC_Meta_%s AS M" % metaName
if pathSelection:
req += " JOIN ( %s ) AS P WHERE M.DirID=P.DirID" % pathSelection
if selectString:
if pathSelection:
req += " AND %s" % selectString
else:
req += " WHERE %s" % selectString
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK([])
dirList = []
for row in result["Value"]:
dirID = row[0]
dirList.append(dirID)
# if subdirFlag:
# result = self.db.dtree.getSubdirectoriesByID( dirID )
# if not result['OK']:
# return result
# dirList += result['Value']
if subdirFlag:
result = self.db.dtree.getAllSubdirectoriesByID(dirList)
if not result["OK"]:
return result
dirList += result["Value"]
return S_OK(dirList)
def __findSubdirMissingMeta(self, metaName, pathSelection):
"""Find directories not having the given meta datum defined
:param str metaName: metadata name
:param str pathSelection: directory path selection string
:return: S_OK,S_ERROR , Value list of directories
"""
result = self.__findSubdirByMeta(metaName, "Any", pathSelection)
if not result["OK"]:
return result
dirList = result["Value"]
table = self.db.dtree.getTreeTable()
dirString = ",".join([str(x) for x in dirList])
if dirList:
req = "SELECT DirID FROM %s WHERE DirID NOT IN ( %s )" % (table, dirString)
else:
req = "SELECT DirID FROM %s" % table
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK([])
dirList = [x[0] for x in result["Value"]]
return S_OK(dirList)
def __expandMetaDictionary(self, metaDict, credDict):
"""Update the dictionary with metadata query by expand metaSet type metadata
:param dict metaDict: metaDict to be expanded
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR , Value dictionary of metadata
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaTypeDict = result["Value"]
resultDict = {}
extraDict = {}
for key, value in metaDict.items():
if key not in metaTypeDict:
# return S_ERROR( 'Unknown metadata field %s' % key )
extraDict[key] = value
continue
keyType = metaTypeDict[key]
if keyType != "MetaSet":
resultDict[key] = value
else:
result = self.getMetadataSet(value, True, credDict)
if not result["OK"]:
return result
mDict = result["Value"]
for mk, mv in mDict.items():
if mk in resultDict:
return S_ERROR("Contradictory query for key %s" % mk)
else:
resultDict[mk] = mv
result = S_OK(resultDict)
result["ExtraMetadata"] = extraDict
return result
def __checkDirsForMetadata(self, metaName, value, pathString):
"""Check if any of the given directories conform to the given metadata
:param str metaName: matadata name
:param dict,list value: dictionary with selection instructions suitable for the database search
:param str pathString: string of comma separated directory names
:return: S_OK/S_ERROR, Value directory ID
"""
result = self.__createMetaSelection(value, "M.")
if not result["OK"]:
return result
selectString = result["Value"]
if selectString:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE %s AND M.DirID IN (%s)" % (
metaName,
selectString,
pathString,
)
else:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE M.DirID IN (%s)" % (metaName, pathString)
result = self.db._query(req)
if not result["OK"]:
return result
elif not result["Value"]:
return S_OK(None)
elif len(result["Value"]) > 1:
return S_ERROR("Conflict in the directory metadata hierarchy")
else:
return S_OK(result["Value"][0][0])
@queryTime
def findDirIDsByMetadata(self, queryDict, path, credDict):
"""Find Directories satisfying the given metadata and being subdirectories of
the given path
:param dict queryDict: dictionary containing query data
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value list of selected directory IDs
"""
pathDirList = []
pathDirID = 0
pathString = "0"
if path != "/":
result = self.db.dtree.getPathIDs(path)
if not result["OK"]:
# as result[Value] is already checked in getPathIDs
return result
pathIDs = result["Value"]
pathDirID = pathIDs[-1]
pathString = ",".join([str(x) for x in pathIDs])
result = self.__expandMetaDictionary(queryDict, credDict)
if not result["OK"]:
return result
metaDict = result["Value"]
# Now check the meta data for the requested directory and its parents
finalMetaDict = dict(metaDict)
for meta in metaDict:
result = self.__checkDirsForMetadata(meta, metaDict[meta], pathString)
if not result["OK"]:
return result
elif result["Value"] is not None:
# Some directory in the parent hierarchy is already conforming with the
# given metadata, no need to check it further
del finalMetaDict[meta]
if finalMetaDict:
pathSelection = ""
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True, requestString=True)
if not result["OK"]:
return result
pathSelection = result["Value"]
dirList = []
first = True
for meta, value in finalMetaDict.items():
if value == "Missing":
result = self.__findSubdirMissingMeta(meta, pathSelection)
else:
result = self.__findSubdirByMeta(meta, value, pathSelection)
if not result["OK"]:
return result
mList = result["Value"]
if first:
dirList = mList
first = False
else:
newList = []
for d in dirList:
if d in mList:
newList.append(d)
dirList = newList
else:
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True)
if not result["OK"]:
return result
pathDirList = list(result["Value"])
finalList = []
dirSelect = False
if finalMetaDict:
dirSelect = True
finalList = dirList
if pathDirList:
finalList = list(set(dirList) & set(pathDirList))
else:
if pathDirList:
dirSelect = True
finalList = pathDirList
result = S_OK(finalList)
if finalList:
result["Selection"] = "Done"
elif dirSelect:
result["Selection"] = "None"
else:
result["Selection"] = "All"
return result
@queryTime
def findDirectoriesByMetadata(self, queryDict, path, credDict):
"""Find Directory names satisfying the given metadata and being subdirectories of
the given path
:param dict queryDict: dictionary containing query data
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value list of selected directory paths
"""
result = self.findDirIDsByMetadata(queryDict, path, credDict)
if not result["OK"]:
return result
dirIDList = result["Value"]
dirNameDict = {}
if dirIDList:
result = self.db.dtree.getDirectoryPaths(dirIDList)
if not result["OK"]:
return result
dirNameDict = result["Value"]
elif result["Selection"] == "None":
dirNameDict = {0: "None"}
elif result["Selection"] == "All":
dirNameDict = {0: "All"}
return S_OK(dirNameDict)
def findFilesByMetadata(self, metaDict, path, credDict):
"""Find Files satisfying the given metadata
:param dict metaDict: dictionary with the selection metadata
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value list files in selected directories
"""
result = self.findDirectoriesByMetadata(metaDict, path, credDict)
if not result["OK"]:
return result
dirDict = result["Value"]
dirList = list(dirDict)
fileList = []
result = self.db.dtree.getFilesInDirectory(dirList, credDict)
if not result["OK"]:
return result
for _fileID, dirID, fname in result["Value"]:
fileList.append(dirDict[dirID] + "/" + os.path.basename(fname))
return S_OK(fileList)
def findFileIDsByMetadata(self, metaDict, path, credDict, startItem=0, maxItems=25):
"""Find Files satisfying the given metadata
:param dict metaDict: dictionary with the selection metadata
:param str path: starting directory path
:param dict credDict: client credential dictionary
:param int startItem: offset in the file list
:param int maxItems: max number of files to rteurn
:return: S_OK/S_ERROR, Value list file IDs in selected directories
"""
result = self.findDirIDsByMetadata(metaDict, path, credDict)
if not result["OK"]:
return result
dirList = result["Value"]
return self.db.dtree.getFileIDsInDirectoryWithLimits(dirList, credDict, startItem, maxItems)
################################################################################################
#
# Find metadata compatible with other metadata in order to organize dynamically updated metadata selectors
def __findCompatibleDirectories(self, metaName, value, fromDirs):
"""Find directories compatible with the given metaName datum.
Optionally limit the list of compatible directories to only those in the
fromDirs list
:param str metaName: metadata name
:param dict,list value: dictionary with selection instructions suitable for the database search
:param list fromDirs: list of directories to choose from
:return: S_OK/S_ERROR, Value list of selected directories
"""
# The directories compatible with the given metaName datum are:
# - directory for which the datum is defined
# - all the subdirectories of the above directory
# - all the directories in the parent hierarchy of the above directory
# Find directories defining the metaName datum and their subdirectories
result = self.__findSubdirByMeta(metaName, value, subdirFlag=False)
if not result["OK"]:
return result
selectedDirs = result["Value"]
if not selectedDirs:
return S_OK([])
result = self.db.dtree.getAllSubdirectoriesByID(selectedDirs)
if not result["OK"]:
return result
subDirs = result["Value"]
# Find parent directories of the directories defining the metaName datum
parentDirs = []
for psub in selectedDirs:
result = self.db.dtree.getPathIDsByID(psub)
if not result["OK"]:
return result
parentDirs += result["Value"]
# Constrain the output to only those that are present in the input list
resDirs = parentDirs + subDirs + selectedDirs
if fromDirs:
resDirs = list(set(resDirs) & set(fromDirs))
return S_OK(resDirs)
def __findDistinctMetadata(self, metaList, dList):
"""Find distinct metadata values defined for the list of the input directories.
Limit the search for only metadata in the input list
:param list metaList: list of metadata names
:param list dList: list of directories to limit the selection
:return: S_OK/S_ERROR, Value dictionary of metadata
"""
if dList:
dString = ",".join([str(x) for x in dList])
else:
dString = None
metaDict = {}
for meta in metaList:
req = "SELECT DISTINCT(Value) FROM FC_Meta_%s" % meta
if dString:
req += " WHERE DirID in (%s)" % dString
result = self.db._query(req)
if not result["OK"]:
return result
if result["Value"]:
metaDict[meta] = []
for row in result["Value"]:
metaDict[meta].append(row[0])
return S_OK(metaDict)
def getCompatibleMetadata(self, queryDict, path, credDict):
"""Get distinct metadata values compatible with the given already defined metadata
:param dict queryDict: dictionary containing query data
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value dictionary of metadata
"""
pathDirID = 0
if path != "/":
result = self.db.dtree.findDir(path)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % path)
pathDirID = int(result["Value"])
pathDirs = []
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True)
if not result["OK"]:
return result
if result["Value"]:
pathDirs = list(result["Value"])
result = self.db.dtree.getPathIDsByID(pathDirID)
if not result["OK"]:
return result
if result["Value"]:
pathDirs += result["Value"]
# Get the list of metadata fields to inspect
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
comFields = list(metaFields)
# Commented out to return compatible data also for selection metadata
# for m in metaDict:
# if m in comFields:
# del comFields[comFields.index( m )]
result = self.__expandMetaDictionary(queryDict, credDict)
if not result["OK"]:
return result
metaDict = result["Value"]
fromList = pathDirs
anyMeta = True
if metaDict:
anyMeta = False
for meta, value in metaDict.items():
result = self.__findCompatibleDirectories(meta, value, fromList)
if not result["OK"]:
return result
cdirList = result["Value"]
if cdirList:
fromList = cdirList
else:
fromList = []
break
if anyMeta or fromList:
result = self.__findDistinctMetadata(comFields, fromList)
else:
result = S_OK({})
return result
def removeMetadataForDirectory(self, dirList, credDict):
"""Remove all the metadata for the given directory list
:param list dirList: list of directory paths
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value Successful/Failed dictionaries
"""
if not dirList:
return S_OK({"Successful": {}, "Failed": {}})
failed = {}
successful = {}
dirs = dirList
if not isinstance(dirList, list):
dirs = [dirList]
dirListString = ",".join([str(d) for d in dirs])
# Get the list of metadata fields to inspect
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
for meta in metaFields:
req = "DELETE FROM FC_Meta_%s WHERE DirID in ( %s )" % (meta, dirListString)
result = self.db._query(req)
if not result["OK"]:
failed[meta] = result["Message"]
else:
successful[meta] = "OK"
return S_OK({"Successful": successful, "Failed": failed})
| DIRACGrid/DIRAC | src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/DirectoryMetadata/DirectoryMetadata.py | Python | gpl-3.0 | 37,350 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Written by Bram Cohen
import re
from BitTorrent import BTFailure
allowed_path_re = re.compile(r'^[^/\\.~][^/\\]*$')
ints = (long, int)
def check_info(info, check_paths=True):
if type(info) != dict:
raise BTFailure, 'bad metainfo - not a dictionary'
pieces = info.get('pieces')
if type(pieces) != str or len(pieces) % 20 != 0:
raise BTFailure, 'bad metainfo - bad pieces key'
piecelength = info.get('piece length')
if type(piecelength) not in ints or piecelength <= 0:
raise BTFailure, 'bad metainfo - illegal piece length'
name = info.get('name')
if type(name) != str:
raise BTFailure, 'bad metainfo - bad name'
if not allowed_path_re.match(name):
raise BTFailure, 'name %s disallowed for security reasons' % name
if info.has_key('files') == info.has_key('length'):
raise BTFailure, 'single/multiple file mix'
if info.has_key('length'):
length = info.get('length')
if type(length) not in ints or length < 0:
raise BTFailure, 'bad metainfo - bad length'
else:
files = info.get('files')
if type(files) != list:
raise BTFailure, 'bad metainfo - "files" is not a list of files'
for f in files:
if type(f) != dict:
raise BTFailure, 'bad metainfo - bad file value'
length = f.get('length')
if type(length) not in ints or length < 0:
raise BTFailure, 'bad metainfo - bad length'
path = f.get('path')
if type(path) != list or path == []:
raise BTFailure, 'bad metainfo - bad path'
for p in path:
if type(p) != str:
raise BTFailure, 'bad metainfo - bad path dir'
if check_paths and not allowed_path_re.match(p):
raise BTFailure, 'path %s disallowed for security reasons' % p
f = ['/'.join(x['path']) for x in files]
f.sort()
i = iter(f)
try:
name2 = i.next()
while True:
name1 = name2
name2 = i.next()
if name2.startswith(name1):
if name1 == name2:
raise BTFailure, 'bad metainfo - duplicate path'
elif name2[len(name1)] == '/':
raise BTFailure('bad metainfo - name used as both '
'file and subdirectory name')
except StopIteration:
pass
def check_message(message, check_paths=True):
if type(message) != dict:
raise BTFailure, 'bad metainfo - wrong object type'
check_info(message.get('info'), check_paths)
if type(message.get('announce')) != str:
raise BTFailure, 'bad metainfo - no announce URL string'
def check_peers(message):
if type(message) != dict:
raise BTFailure
if message.has_key('failure reason'):
if type(message['failure reason']) != str:
raise BTFailure, 'non-text failure reason'
return
if message.has_key('warning message'):
if type(message['warning message']) != str:
raise BTFailure, 'non-text warning message'
peers = message.get('peers')
if type(peers) == list:
for p in peers:
if type(p) != dict:
raise BTFailure, 'invalid entry in peer list'
if type(p.get('ip')) != str:
raise BTFailure, 'invalid entry in peer list'
port = p.get('port')
if type(port) not in ints or p <= 0:
raise BTFailure, 'invalid entry in peer list'
if p.has_key('peer id'):
peerid = p.get('peer id')
if type(peerid) != str or len(peerid) != 20:
raise BTFailure, 'invalid entry in peer list'
elif type(peers) != str or len(peers) % 6 != 0:
raise BTFailure, 'invalid peer list'
interval = message.get('interval', 1)
if type(interval) not in ints or interval <= 0:
raise BTFailure, 'invalid announce interval'
minint = message.get('min interval', 1)
if type(minint) not in ints or minint <= 0:
raise BTFailure, 'invalid min announce interval'
if type(message.get('tracker id', '')) != str:
raise BTFailure, 'invalid tracker id'
npeers = message.get('num peers', 0)
if type(npeers) not in ints or npeers < 0:
raise BTFailure, 'invalid peer count'
dpeers = message.get('done peers', 0)
if type(dpeers) not in ints or dpeers < 0:
raise BTFailure, 'invalid seed count'
last = message.get('last', 0)
if type(last) not in ints or last < 0:
raise BTFailure, 'invalid "last" entry'
| santazhang/BitTorrent-4.0.0-GPL | BitTorrent/btformats.py | Python | gpl-3.0 | 5,378 |
from __future__ import absolute_import
from .MockPrinter import MockPrinter
import mock
from random import random
class M201_Tests(MockPrinter):
def setUp(self):
self.printer.path_planner.native_planner.setAcceleration = mock.Mock()
self.printer.axis_config = self.printer.AXIS_CONFIG_XY
self.printer.speed_factor = 1.0
def exercise(self):
values = {}
gcode = "M201"
for i, v in enumerate(self.printer.acceleration):
axis = self.printer.AXES[i]
values[axis] = round(random() * 9000.0, 0)
gcode += " {:s}{:.0f}".format(axis, values[axis])
self.execute_gcode(gcode)
return {
"values": values,
"call_args": self.printer.path_planner.native_planner.setAcceleration.call_args[0][0]
}
def test_gcodes_M201_all_axes_G21_mm(self):
test_data = self.exercise()
for i, axis in enumerate(self.printer.AXES):
expected = round(test_data["values"][axis] * self.printer.factor / 3600.0, 4)
result = test_data["call_args"][i]
self.assertEqual(expected, result,
axis + ": expected {:.0f} but got {:.0f}".format(expected, result))
def test_gcodes_M201_all_axes_G20_inches(self):
self.printer.factor = 25.4
test_data = self.exercise()
for i, axis in enumerate(self.printer.AXES):
expected = round(test_data["values"][axis] * self.printer.factor / 3600.0, 4)
result = test_data["call_args"][i]
self.assertEqual(expected, result,
axis + ": expected {:.0f} but got {:.0f}".format(expected, result))
def test_gcodes_M201_CoreXY(self):
self.printer.axis_config = self.printer.AXIS_CONFIG_CORE_XY
while True: # account for remote possibility of two equal random numbers for X and Y
test_data = self.exercise()
if test_data["values"]["X"] != test_data["values"]["Y"]:
break
self.assertEqual(
test_data["call_args"][0], test_data["call_args"][1],
"For CoreXY mechanics, X & Y values must match. But X={}, Y={} (mm/min / 3600)".format(
test_data["call_args"][0], test_data["call_args"][1]))
def test_gcodes_M201_H_belt(self):
self.printer.axis_config = self.printer.AXIS_CONFIG_H_BELT
while True: # account for remote possibility of two equal random numbers for X and Y
test_data = self.exercise()
if test_data["values"]["X"] != test_data["values"]["Y"]:
break
self.assertEqual(
test_data["call_args"][0], test_data["call_args"][1],
"For H-Belt mechanics, X & Y values must match. But X={}, Y={} (mm/min / 3600)".format(
test_data["call_args"][0], test_data["call_args"][1]))
def test_gcodes_M201_Delta(self):
self.printer.axis_config = self.printer.AXIS_CONFIG_DELTA
while True: # account for super, ultra-duper remote possibility of three equal random numbers for X , Y and Z
test_data = self.exercise()
if (test_data["values"]["X"] + test_data["values"]["Y"] + test_data["values"]["Y"]) != (
test_data["values"]["X"] * 3):
break
self.assertEqual(
test_data["call_args"][0] + test_data["call_args"][1] + test_data["call_args"][2],
test_data["call_args"][0] * 3,
"For CoreXY mechanics, X & Y values must match. But X={}, Y={} (mm/min / 3600)".format(
test_data["call_args"][0], test_data["call_args"][1], test_data["call_args"][2]))
| intelligent-agent/redeem | tests/gcode/test_M201.py | Python | gpl-3.0 | 3,414 |
###################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import logging
####################################################################################################
from ..Tools.StringTools import join_list, join_dict
from .NgSpice.Shared import NgSpiceShared
from .Server import SpiceServer
####################################################################################################
_module_logger = logging.getLogger(__name__)
####################################################################################################
class CircuitSimulation:
"""Define and generate the spice instruction to perform a circuit simulation.
.. warning:: In some cases NgSpice can perform several analyses one after the other. This case
is partially supported.
"""
_logger = _module_logger.getChild('CircuitSimulation')
##############################################
def __init__(self, circuit,
temperature=27,
nominal_temperature=27,
pipe=True,
):
self._circuit = circuit
self._options = {} # .options
self._initial_condition = {} # .ic
self._saved_nodes = ()
self._analysis_parameters = {}
self.temperature = temperature
self.nominal_temperature = nominal_temperature
if pipe:
self.options('NOINIT')
self.options(filetype='binary')
##############################################
@property
def circuit(self):
return self._circuit
##############################################
def options(self, *args, **kwargs):
for item in args:
self._options[str(item)] = None
for key, value in kwargs.items():
self._options[str(key)] = str(value)
##############################################
@property
def temperature(self):
return self._options['TEMP']
@temperature.setter
def temperature(self, value):
self._options['TEMP'] = value
##############################################
@property
def nominal_temperature(self):
return self._options['TNOM']
@nominal_temperature.setter
def nominal_temperature(self, value):
self._options['TNOM'] = value
##############################################
def initial_condition(self, **kwargs):
""" Set initial condition for voltage nodes.
Usage: initial_condition(node_name1=value, ...)
"""
for key, value in kwargs.items():
self._initial_condition['V({})'.format(str(key))] = str(value)
# Fixme: .nodeset
##############################################
def save(self, *args):
# Fixme: pass Node for voltage node, Element for source branch current, ...
"""Set the list of saved vectors.
If no *.save* line is given, then the default set of vectors is saved (node voltages and
voltage source branch currents). If *.save* lines are given, only those vectors specified
are saved.
Node voltages may be saved by giving the node_name or *v(node_name)*. Currents through an
independent voltage source (including inductor) are given by *i(source_name)* or
*source_name#branch*. Internal device data are accepted as *@dev[param]*.
If you want to save internal data in addition to the default vector set, add the parameter
*all* to the additional vectors to be saved.
"""
self._saved_nodes = list(args)
##############################################
@property
def save_currents(self):
""" Save all currents. """
return self._options.get('SAVECURRENTS', False)
@save_currents.setter
def save_currents(self, value):
if value:
self._options['SAVECURRENTS'] = True
else:
del self._options['SAVECURRENTS']
##############################################
def reset_analysis(self):
self._analysis_parameters.clear()
##############################################
def operating_point(self):
"""Compute the operating point of the circuit with capacitors open and inductors shorted."""
self._analysis_parameters['op'] = ''
##############################################
def dc_sensitivity(self, output_variable):
"""Compute the sensitivity of the DC operating point of a node voltage or voltage-source branch
current to all non-zero device parameters.
General form:
.. code::
.sens outvar
Examples:
.. code::
.SENS V(1, OUT)
.SENS I(VTEST)
"""
self._analysis_parameters['sens'] = (output_variable,)
##############################################
def ac_sensitivity(self, output_variable,
start_frequency, stop_frequency, number_of_points, variation):
"""Compute the sensitivity of the AC values of a node voltage or voltage-source branch
current to all non-zero device parameters.
General form:
.. code::
.sens outvar ac dec nd fstart fstop
.sens outvar ac oct no fstart fstop
.sens outvar ac lin np fstart fstop
Examples:
.. code::
.SENS V(OUT) AC DEC 10 100 100 k
"""
if variation not in ('dec', 'oct', 'lin'):
raise ValueError("Incorrect variation type")
self._analysis_parameters['sens'] = (output_variable,
variation, number_of_points, start_frequency, stop_frequency)
##############################################
def dc(self, **kwargs):
"""Compute the DC transfer fonction of the circuit with capacitors open and inductors shorted.
General form:
.. code::
.dc srcnam vstart vstop vincr [ src2 start2 stop2 incr2 ]
*srcnam* is the name of an independent voltage or current source, a resistor or the circuit
temperature. *vstart*, *vstop*, and *vincr* are the starting, final, and incrementing values
respectively.
A second source (*src2*) may optionally be specified with associated sweep parameters. In
this case, the first source is swept over its range for each value of the second source.
Examples:
.. code::
.dc VIN 0 .2 5 5.0 0.25
.dc VDS 0 10 .5 VGS 0 5 1
.dc VCE 0 10 .2 5 IB 0 10U 1U
.dc RLoad 1k 2k 100
.dc TEMP -15 75 5
"""
parameters = []
for variable, value_slice in kwargs.items():
variable_lower = variable.lower()
if variable_lower[0] in ('v', 'i', 'r') or variable_lower == 'temp':
parameters += [variable, value_slice.start, value_slice.stop, value_slice.step]
else:
raise NameError('Sweep variable must be a voltage/current source, '
'a resistor or the circuit temperature')
self._analysis_parameters['dc'] = parameters
##############################################
def ac(self, start_frequency, stop_frequency, number_of_points, variation):
# fixme: concise keyword ?
"""Perform a small-signal AC analysis of the circuit where all non-linear devices are linearized
around their actual DC operating point.
Note that in order for this analysis to be meaningful, at least one independent source must
have been specified with an AC value. Typically it does not make much sense to specify more
than one AC source. If you do, the result will be a superposition of all sources, thus
difficult to interpret.
Examples:
.. code::
.ac dec nd fstart fstop
.ac oct no fstart fstop
.ac lin np fstart fstop
The parameter *variation* must be either `dec`, `oct` or `lin`.
"""
if variation not in ('dec', 'oct', 'lin'):
raise ValueError("Incorrect variation type")
self._analysis_parameters['ac'] = (variation, number_of_points, start_frequency, stop_frequency)
##############################################
def transient(self, step_time, end_time, start_time=None, max_time=None,
use_initial_condition=False):
"""Perform a transient analysis of the circuit.
General Form:
.. code::
.tran tstep tstop <tstart <tmax>> <uic>
"""
if use_initial_condition:
uic = 'uic'
else:
uic = None
self._analysis_parameters['tran'] = (step_time, end_time, start_time, max_time, uic)
##############################################
def __str__(self):
netlist = str(self._circuit)
if self.options:
for key, value in self._options.items():
if value is not None:
netlist += '.options {} = {}\n'.format(key, value)
else:
netlist += '.options {}\n'.format(key)
if self.initial_condition:
netlist += '.ic ' + join_dict(self._initial_condition) + '\n'
if self._saved_nodes:
netlist += '.save ' + join_list(self._saved_nodes) + '\n'
for analysis, analysis_parameters in self._analysis_parameters.items():
netlist += '.' + analysis + ' ' + join_list(analysis_parameters) + '\n'
netlist += '.end\n'
return netlist
####################################################################################################
class CircuitSimulator(CircuitSimulation):
""" This class implements a circuit simulator. Each analysis mode is performed by a method that
return the measured probes.
For *ac* and *transient* analyses, the user must specify a list of nodes using the *probes* key
argument.
"""
_logger = _module_logger.getChild('CircuitSimulator')
##############################################
def _run(self, analysis_method, *args, **kwargs):
self.reset_analysis()
if 'probes' in kwargs:
self.save(* kwargs.pop('probes'))
method = getattr(CircuitSimulation, analysis_method)
method(self, *args, **kwargs)
self._logger.debug('desk\n' + str(self))
##############################################
def operating_point(self, *args, **kwargs):
return self._run('operating_point', *args, **kwargs)
##############################################
def dc(self, *args, **kwargs):
return self._run('dc', *args, **kwargs)
##############################################
def dc_sensitivity(self, *args, **kwargs):
return self._run('dc_sensitivity', *args, **kwargs)
##############################################
def ac(self, *args, **kwargs):
return self._run('ac', *args, **kwargs)
##############################################
def transient(self, *args, **kwargs):
return self._run('transient', *args, **kwargs)
####################################################################################################
class SubprocessCircuitSimulator(CircuitSimulator):
_logger = _module_logger.getChild('SubprocessCircuitSimulator')
##############################################
def __init__(self, circuit,
temperature=27,
nominal_temperature=27,
spice_command='ngspice',
):
# Fixme: kwargs
super().__init__(circuit, temperature, nominal_temperature, pipe=True)
self._spice_server = SpiceServer()
##############################################
def _run(self, analysis_method, *args, **kwargs):
super()._run(analysis_method, *args, **kwargs)
raw_file = self._spice_server(str(self))
self.reset_analysis()
# for field in raw_file.variables:
# print field
return raw_file.to_analysis(self._circuit)
####################################################################################################
class NgSpiceSharedCircuitSimulator(CircuitSimulator):
_logger = _module_logger.getChild('NgSpiceSharedCircuitSimulator')
##############################################
def __init__(self, circuit,
temperature=27,
nominal_temperature=27,
ngspice_shared=None,
):
# Fixme: kwargs
super().__init__(circuit, temperature, nominal_temperature, pipe=False)
if ngspice_shared is None:
self._ngspice_shared = NgSpiceShared(send_data=False)
else:
self._ngspice_shared = ngspice_shared
##############################################
def _run(self, analysis_method, *args, **kwargs):
super()._run(analysis_method, *args, **kwargs)
self._ngspice_shared.load_circuit(str(self))
self._ngspice_shared.run()
self._logger.debug(str(self._ngspice_shared.plot_names))
self.reset_analysis()
if analysis_method == 'dc':
plot_name = 'dc1'
elif analysis_method == 'ac':
plot_name = 'ac1'
elif analysis_method == 'transient':
plot_name = 'tran1'
else:
raise NotImplementedError
return self._ngspice_shared.plot(plot_name).to_analysis()
####################################################################################################
#
# End
#
####################################################################################################
| thomaslima/PySpice | PySpice/Spice/Simulation.py | Python | gpl-3.0 | 14,801 |
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import time
import datetime
import sqlite3
import sickbeard
from sickbeard import db
from sickbeard import logger
from sickbeard.common import Quality
from sickbeard import helpers, exceptions, show_name_helpers, scene_exceptions
from sickbeard import name_cache
from sickbeard.exceptions import ex
#import xml.etree.cElementTree as etree
import xml.dom.minidom
from lib.tvdb_api import tvdb_api, tvdb_exceptions
from sickbeard.completparser import CompleteParser
class CacheDBConnection(db.DBConnection):
def __init__(self, providerName):
db.DBConnection.__init__(self, "cache.db")
# Create the table if it's not already there
try:
sql = "CREATE TABLE "+providerName+" (name TEXT, season NUMERIC, episodes TEXT, tvrid NUMERIC, tvdbid NUMERIC, url TEXT, time NUMERIC, quality TEXT, release_group TEXT, proper NUMERIC);"
self.connection.execute(sql)
self.connection.commit()
except sqlite3.OperationalError, e:
if str(e) != "table "+providerName+" already exists":
raise
# Create the table if it's not already there
try:
sql = "CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);"
self.connection.execute(sql)
self.connection.commit()
except sqlite3.OperationalError, e:
if str(e) != "table lastUpdate already exists":
raise
class TVCache():
def __init__(self, provider):
self.provider = provider
self.providerID = self.provider.getID()
self.minTime = 10
def _getDB(self):
return CacheDBConnection(self.providerID)
def _clearCache(self):
myDB = self._getDB()
myDB.action("DELETE FROM "+self.providerID+" WHERE 1")
def _getRSSData(self):
data = None
return data
def _checkAuth(self, data):
return True
def _checkItemAuth(self, title, url):
return True
def updateCache(self):
if not self.shouldUpdate():
return
data = self._getRSSData()
# as long as the http request worked we count this as an update
if data:
self.setLastUpdate()
else:
return []
# now that we've loaded the current RSS feed lets delete the old cache
logger.log(u"Clearing "+self.provider.name+" cache and updating with new information")
self._clearCache()
if not self._checkAuth(data):
raise exceptions.AuthException("Your authentication info for "+self.provider.name+" is incorrect, check your config")
try:
parsedXML = xml.dom.minidom.parseString(data)
items = parsedXML.getElementsByTagName('item')
except Exception, e:
logger.log(u"Error trying to load "+self.provider.name+" RSS feed: "+ex(e), logger.ERROR)
logger.log(u"Feed contents: "+repr(data), logger.DEBUG)
return []
if parsedXML.documentElement.tagName != 'rss':
logger.log(u"Resulting XML from "+self.provider.name+" isn't RSS, not parsing it", logger.ERROR)
return []
for item in items:
self._parseItem(item)
def _translateLinkURL(self, url):
return url.replace('&','&')
def _parseItem(self, item):
"""Return None
parse a single rss feed item and add its info to the chache
will check for needed infos
"""
title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
url = helpers.get_xml_text(item.getElementsByTagName('link')[0])
self._checkItemAuth(title, url)
# we at least need a title and url, if one is missing stop
if not title or not url:
logger.log(u"The XML returned from the "+self.provider.name+" feed is incomplete, this result is unusable", logger.ERROR)
return
url = self._translateLinkURL(url)
logger.log(u"Adding item from RSS to cache: "+title, logger.DEBUG)
self._addCacheEntry(title, url)
def _getLastUpdate(self):
myDB = self._getDB()
sqlResults = myDB.select("SELECT time FROM lastUpdate WHERE provider = ?", [self.providerID])
if sqlResults:
lastTime = int(sqlResults[0]["time"])
else:
lastTime = 0
return datetime.datetime.fromtimestamp(lastTime)
def setLastUpdate(self, toDate=None):
if not toDate:
toDate = datetime.datetime.today()
myDB = self._getDB()
myDB.upsert("lastUpdate",
{'time': int(time.mktime(toDate.timetuple()))},
{'provider': self.providerID})
lastUpdate = property(_getLastUpdate)
def shouldUpdate(self):
# if we've updated recently then skip the update
if datetime.datetime.today() - self.lastUpdate < datetime.timedelta(minutes=self.minTime):
logger.log(u"Last update was too soon, using old cache: today()-"+str(self.lastUpdate)+"<"+str(datetime.timedelta(minutes=self.minTime)), logger.DEBUG)
return False
return True
def _addCacheEntry(self, name, url, season=None, episodes=None, tvdb_id=0, tvrage_id=0, quality=None, extraNames=[]):
"""Return False|None
Parse the name and try to get as much info out of it as we can
Will use anime regex's if this is called from fanzub
On a succesfull parse it will add the parsed infos into the cache.db
This dosen't mean the parsed result is usefull
"""
myDB = self._getDB()
show = None
if tvdb_id:
show = helpers.findCertainShow(sickbeard.showList, tvdb_id)
# if we don't have complete info then parse the filename to get it
for curName in [name] + extraNames:
cp = CompleteParser(show=show)
cpr = cp.parse(curName)
if cpr.sxxexx and cpr.parse_result:
break
else:
return False
episodeText = "|"+"|".join(map(str, cpr.episodes))+"|"
# get the current timestamp
curTimestamp = int(time.mktime(datetime.datetime.today().timetuple()))
myDB.action("INSERT INTO "+self.providerID+" (name, season, episodes, tvrid, tvdbid, url, time, quality, release_group, proper) VALUES (?,?,?,?,?,?,?,?,?,?)",
[name, cpr.season, episodeText, 0, cpr.tvdbid, url, curTimestamp, cpr.quality, cpr.release_group, int(cpr.is_proper)])
def searchCache(self, episode, manualSearch=False):
neededEps = self.findNeededEpisodes(episode, manualSearch)
return neededEps[episode]
def listPropers(self, date=None, delimiter="."):
myDB = self._getDB()
sql = "SELECT * FROM "+self.providerID+" WHERE proper = 1"
if date != None:
sql += " AND time >= "+str(int(time.mktime(date.timetuple())))
#return filter(lambda x: x['tvdbid'] != 0, myDB.select(sql))
return myDB.select(sql)
def findNeededEpisodes(self, episode = None, manualSearch=False):
neededEps = {}
if episode:
neededEps[episode] = []
myDB = self._getDB()
if not episode:
sqlResults = myDB.select("SELECT * FROM "+self.providerID)
else:
sqlResults = myDB.select("SELECT * FROM "+self.providerID+" WHERE tvdbid = ? AND season = ? AND episodes LIKE ?", [episode.show.tvdbid, episode.scene_season, "%|"+str(episode.scene_episode)+"|%"])
# for each cache entry
for curResult in sqlResults:
# skip non-tv crap (but allow them for Newzbin cause we assume it's filtered well)
if self.providerID != 'newzbin' and not show_name_helpers.filterBadReleases(curResult["name"]):
continue
# get the show object, or if it's not one of our shows then ignore it
showObj = helpers.findCertainShow(sickbeard.showList, int(curResult["tvdbid"]))
if not showObj:
continue
# get season and ep data (ignoring multi-eps for now)
curSeason = int(curResult["season"])
if curSeason == -1:
continue
curEp = curResult["episodes"].split("|")[1]
if not curEp:
continue
curEp = int(curEp)
curQuality = int(curResult["quality"])
# if the show says we want that episode then add it to the list
if not showObj.wantEpisode(curSeason, curEp, curQuality, manualSearch):
logger.log(u"Skipping "+curResult["name"]+" because we don't want an episode that's "+Quality.qualityStrings[curQuality], logger.DEBUG)
else:
if episode:
epObj = episode
else:
epObj = showObj.getEpisode(curSeason, curEp)
# build a result object
title = curResult["name"]
url = curResult["url"]
logger.log(u"Found result " + title + " at " + url)
result = self.provider.getResult([epObj])
result.url = url
result.name = title
result.quality = curQuality
result.release_group = curResult["release_group"]
# add it to the list
if epObj not in neededEps:
neededEps[epObj] = [result]
else:
neededEps[epObj].append(result)
return neededEps
| Pakoach/Sick-Beard-Animes | sickbeard/tvcache.py | Python | gpl-3.0 | 10,306 |
from DIRAC import S_ERROR, S_OK, gLogger
from DIRAC.DataManagementSystem.private.FTSAbstractPlacement import FTSAbstractPlacement, FTSRoute
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3Servers
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
import random
class FTS3Placement( FTSAbstractPlacement ):
"""
This class manages all the FTS strategies, routes and what not
"""
__serverPolicy = "Random"
__nextServerID = 0
__serverList = None
__maxAttempts = 0
def __init__( self, csPath = None, ftsHistoryViews = None ):
"""
Call the init of the parent, and initialize the list of FTS3 servers
"""
self.log = gLogger.getSubLogger( "FTS3Placement" )
super( FTS3Placement, self ).__init__( csPath = csPath, ftsHistoryViews = ftsHistoryViews )
srvList = getFTS3Servers()
if not srvList['OK']:
self.log.error( srvList['Message'] )
self.__serverList = srvList.get( 'Value', [] )
self.maxAttempts = len( self.__serverList )
self.rssClient = ResourceStatus()
def getReplicationTree( self, sourceSEs, targetSEs, size, strategy = None ):
""" For multiple source to multiple destination, find the optimal replication
strategy.
:param sourceSEs : list of source SE
:param targetSEs : list of destination SE
:param size : size of the File
:param strategy : which strategy to use
:returns S_OK(dict) < route name : { dict with key Ancestor, SourceSE, TargetSEtargetSE, Strategy } >
For the time being, we are waiting for FTS3 to provide advisory mechanisms. So we just use
simple techniques
"""
# We will use a single random source
sourceSE = random.choice( sourceSEs )
tree = {}
for targetSE in targetSEs:
tree["%s#%s" % ( sourceSE, targetSE )] = { "Ancestor" : False, "SourceSE" : sourceSE,
"TargetSE" : targetSE, "Strategy" : "FTS3Simple" }
return S_OK( tree )
def refresh( self, ftsHistoryViews ):
"""
Refresh, whatever that means... recalculate all what you need,
fetches the latest conf and what not.
"""
return super( FTS3Placement, self ).refresh( ftsHistoryViews = ftsHistoryViews )
def __failoverServerPolicy(self, attempt = 0):
"""
Returns always the server at a given position (normally the first one)
:param attempt: position of the server in the list
"""
if attempt >= len( self.__serverList ):
raise Exception( "FTS3Placement.__failoverServerPolicy: attempt to reach non existing server index" )
return self.__serverList[attempt]
def __sequenceServerPolicy( self ):
"""
Every time the this policy is called, return the next server on the list
"""
fts3server = self.__serverList[self.__nextServerID]
self.__nextServerID = ( self.__nextServerID + 1 ) % len( self.__serverList )
return fts3server
def __randomServerPolicy(self):
"""
return a random server from the list
"""
return random.choice( self.__serverList )
def __chooseFTS3Server( self ):
"""
Choose the appropriate FTS3 server depending on the policy
"""
fts3Server = None
attempt = 0
# FIXME : need to get real valeu from RSS
ftsServerStatus = True
while not fts3Server and attempt < self.maxAttempts:
if self.__serverPolicy == 'Random':
fts3Server = self.__randomServerPolicy()
elif self.__serverPolicy == 'Sequence':
fts3Server = self.__sequenceServerPolicy()
elif self.__serverPolicy == 'Failover':
fts3Server = self.__failoverServerPolicy( attempt = attempt )
else:
self.log.error( 'Unknown server policy %s. Using Random instead' % self.__serverPolicy )
fts3Server = self.__randomServerPolicy()
if not ftsServerStatus:
self.log.warn( 'FTS server %s is not in good shape. Choose another one' % fts3Server )
fts3Server = None
attempt += 1
# FIXME : I need to get the FTS server status from RSS
# ftsStatusFromRss = rss.ftsStatusOrSomethingLikeThat
if fts3Server:
return S_OK( fts3Server )
return S_ERROR ( "Could not find an FTS3 server (max attempt reached)" )
def findRoute( self, sourceSE, targetSE ):
""" Find the appropriate route from point A to B
:param sourceSE : source SE
:param targetSE : destination SE
:returns S_OK(FTSRoute)
"""
fts3server = self.__chooseFTS3Server()
if not fts3server['OK']:
return fts3server
fts3server = fts3server['Value']
route = FTSRoute( sourceSE, targetSE, fts3server )
return S_OK( route )
def isRouteValid( self, route ):
"""
FIXME: until RSS is ready, I check manually the status
In FTS3, all routes are valid a priori.
If a route was not valid for some reason, then FTS would know it
thanks to the blacklist sent by RSS, and would deal with it itself.
:param route : FTSRoute
:returns S_OK or S_ERROR(reason)
"""
rAccess = self.rssClient.getStorageElementStatus( route.sourceSE, "ReadAccess" )
self.log.debug( "se read %s %s" % ( route.sourceSE, rAccess ) )
if not rAccess["OK"]:
self.log.error( rAccess["Message"] )
return rAccess
if rAccess["Value"][route.sourceSE]["ReadAccess"] not in ( "Active", "Degraded" ):
return S_ERROR( "Source SE is not readable" )
wAccess = self.rssClient.getStorageElementStatus( route.targetSE, "WriteAccess" )
self.log.debug( "se write %s %s" % ( route.targetSE, wAccess ) )
if not wAccess["OK"]:
self.log.error( wAccess["Message"] )
return wAccess
if wAccess["Value"][route.targetSE]["WriteAccess"] not in ( "Active", "Degraded" ):
return S_ERROR( "Target SE is not writable" )
return S_OK()
| vmendez/DIRAC | DataManagementSystem/private/FTS3/FTS3Placement.py | Python | gpl-3.0 | 5,870 |
# Copyright (c) 2019 2021 by Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Isolate Python 2.6 and 2.7 version-specific semantic actions here.
"""
from uncompyle6.semantics.consts import TABLE_DIRECT
def customize_for_version26_27(self, version):
########################################
# Python 2.6+
# except <condition> as <var>
# vs. older:
# except <condition> , <var>
#
# For 2.6 we use the older syntax which
# matches how we parse this in bytecode
########################################
if version > (2, 6):
TABLE_DIRECT.update({
'except_cond2': ( '%|except %c as %c:\n', 1, 5 ),
# When a generator is a single parameter of a function,
# it doesn't need the surrounding parenethesis.
'call_generator': ('%c%P', 0, (1, -1, ', ', 100)),
})
else:
TABLE_DIRECT.update({
'testtrue_then': ( 'not %p', (0, 22) ),
})
# FIXME: this should be a transformation
def n_call(node):
mapping = self._get_mapping(node)
key = node
for i in mapping[1:]:
key = key[i]
pass
if key.kind == 'CALL_FUNCTION_1':
# A function with one argument. If this is a generator,
# no parenthesis is needed.
args_node = node[-2]
if args_node == 'expr':
n = args_node[0]
if n == 'generator_exp':
node.kind = 'call_generator'
pass
pass
self.default(node)
self.n_call = n_call
| rocky/python-uncompyle6 | uncompyle6/semantics/customize26_27.py | Python | gpl-3.0 | 2,225 |
# -*- coding: utf-8 -*-
#################### 本文件用于进行基本的 json urlencode 操作
import sys,re
import json
from jsonpath_rw import jsonpath, parse # pip2/pip3 install jsonpath_rw
from lxml import etree
import platform
sysstr = platform.system() ### 判断操作系统类型 Windows Linux . 本脚本函数入口, 统一以 LINUX 为准, 其后在函数内进行转换
######################################### JSON
# jsonpath 可以通过 firefox JSON-handle 插件获得, 或 https://jsonpath.curiousconcept.com/
jsondoc=None
##################### 返回对应 json 节点
def jsonnode(jsonstr,jsonpath):
if jsonpath[0:5]=="JSON.": ## 适应 JSON-handle 的 json path
jsonpath="$." + jsonpath[5:]
if jsonpath[:1]=="@": ## 直接叶子写法
jsonpath="$.." + jsonpath[1:] + "[0]"
#print(jsonpath)
try:
jsonpath_expr = parse(jsonpath)
except:
return None
global jsondoc
jsondoc = json.loads(jsonstr)
for match in jsonpath_expr.find(jsondoc):
node=match
return node
##################### JSON 读取指定节点
def readjson_old(jsonstr,jsonpath): ##### jsonpath_rw 中文内容节点的取值有点问题, 原始方法弃用
node=jsonnode(jsonstr,jsonpath)
try:
value=node.value
except:
print(u"没有找到JSON节点: " + jsonpath)
value=""
return value
def initjson(jsonstr,jsonpath): # 初始化 json 字符串
node=jsonnode(jsonstr,jsonpath)
### jsonpath_rw 中对应功能未实现写操作
### node.value=value
############# 使用正则处理
#print(dir(jsondoc))
if sys.version_info.major==2:
jsonstr=str(jsondoc).decode('unicode_escape')
else:
jsonstr=str(jsondoc)
jsonstr=jsonstr.replace("u'","'")
return(jsonstr,node)
def leftjsonpos(jsonstr,jsonpath): # 对应jsonpath值前的左侧位置
(jsonstr,node)=initjson(jsonstr,jsonpath)
try:
repath=str(node.full_path)
except:
print(u"没有找到JSON节点: " + jsonpath)
return(-1,"")
#print(repath)
### 通过正则表达式
if repath[len(repath)-4:]==".[0]": # 叶子节点的 .[0]
repath=repath[:len(repath)-4]
#print(jsonstr)
repathlist=repath.split(".")
#print(repathlist)
#左侧特征举例 (.*l1('|\"):(.*).*('|\")l1_1('|\"): ('|\"|\[\"|\[\'))
#{'l2': {'l2_3': {}, 'l2_2': True, 'l2_1': None}, 'l1': {'l1_1': ['中文测试', 'l1_1_2'], 'l1_2': {'l1_2_1': 121}}}
repath= "('|\"):(.*).*('|\")".join(repathlist) ## 单引号或双引号
repath=".*" + repath + "('|\"): ('|\"|\[\"|\[\')" ### 几种可能 ' " [" ['
#repath=".*" + repath + "('|\"): ('|\")"
repath="(" + repath + ")" ## 左侧特征
#print(repath)
matchs=re.match(repath,jsonstr,re.DOTALL) ### 最后一个参数解决换行问题
if matchs!=None:
leftstr=matchs.groups()[0] ## 左侧串
#print(leftstr)
return(len(leftstr),leftstr)
else:
### 没找到
print("没有找到JSON节点左侧边缘:" + jsonpath)
return -1
def rightjsonpos(jsonstr,jsonpath): # 对应jsonpath值后的右侧位置
(left,leftstr)=leftjsonpos(jsonstr,jsonpath) ## 左侧串位置
if left==-1:
print("没有找到JSON节点左侧边缘:" + jsonpath)
return -1
(jsonstr,node)=initjson(jsonstr,jsonpath) ## 格式化总体串(取位置,必须按格式化串操作)
### 右侧单引号或双引号
pos1=jsonstr.find("'", left+1)
pos2=jsonstr.find("\"", left+1)
if pos2==-1:
rightstr=jsonstr[pos1:] ## 右侧串
pos=pos1
elif pos1==-1:
rightstr=jsonstr[pos2:]
pos=pos2
elif pos1<pos2:
rightstr=jsonstr[pos1:]
pos=pos1
else:
rightstr=jsonstr[pos2:]
pos=pos2
return(pos,rightstr)
def readjson(jsonstr,jsonpath):
(left,leftstr)=leftjsonpos(jsonstr,jsonpath) ## 左侧串位置
if left==-1:
return ""
(right,rightstr)=rightjsonpos(jsonstr,jsonpath) ## 右侧串位置
if right==-1:
return ""
(jsonstr,node)=initjson(jsonstr,jsonpath) ## 格式化总体串(取位置,必须按格式化串操作)
#print(left)
#print(right)
ret=jsonstr[left:right]
return ret
#### 第二种纯字符串方法得到某个叶子节点方法, 相对稳健,只支持 @ 写法
def readjson_once(jsonstr,jsonpath):
if jsonpath[:1]=="@":
jsonpath=jsonpath[1:]
#print(jsonpath)
pos=jsonstr.find("\""+jsonpath+"\"")
tempstr=jsonstr[pos+len(jsonpath):] #查找并截取到尾部
#print(tempstr)
pos1=tempstr.find("}")
pos2=tempstr.find(",")
if pos1==-1:
pos=pos2
elif pos2==-1:
pos=pos1
else:
pos=min(pos1,pos2) # 结束出现的最先有效位置
#print(pos)
tempstr=tempstr[2:pos]
#print(tempstr)
tempstr=tempstr.replace(":","")
#print(tempstr)
if tempstr.find("\"")==-1: # 数字类型
text=str(int(tempstr))
else: #字符串
pos1=tempstr.find("\"")
pos2=tempstr[pos1+1:].find("\"")
#print(pos1,pos2)
text=tempstr[pos1+1:pos2+2]
return text
##################### JSON 写入指定节点 ----------------- 多个节点 [n] 暂时不能处理
def writejson(jsonstr, jsonpath, value):
(left,leftstr)=leftjsonpos(jsonstr,jsonpath) ## 左侧串
if left==-1:
return jsonstr
(right,rightstr)=rightjsonpos(jsonstr,jsonpath) ## 右侧串
if right==-1:
return jsonstr
#print(left)
#print(right)
#print("left: " + leftstr)
#print("right: " +rightstr)
res=leftstr +value + rightstr ## 左右拼加
### json 单引号变双引号
res=res.replace("'","\"")
return res
def writejson_ffile(files, jsonpath, value): ### 从 json 文件读取 并修改对应的值
data=open(files).read()
jsonstr=writejson(data, jsonpath, value)
return jsonstr
################################### URLCODE
def writeurlcode(data, path, value): ##### 修改某个值
vardata=path+"=" + readurlcode(data, path)
urlcodestr=data.replace(vardata, path+"=" + value)
urlcodestr=urlcodestr.replace("\n","")
urlcodestr=urlcodestr.replace("\r","")
return urlcodestr
def readurlcode(data, path): ##### 读取某个值
value=""
pos1=data.find(path+"=")
#print(pos1)
if pos1>=0 and pos1+4<len(data): ## 找到且不在末尾
pos2=data.find("&",pos1+4)
#print(pos2)
if pos2<0:
value=data[pos1+4:]
else:
value=data[pos1+4:pos2]
return value
def writeurlcode_ffile(files, path, value): ####### 从文件读取, 然后修改某个值
data=open(files).read()
data=data.replace("\n","")
data=data.replace("\r","")
urlcodestr=writejson(data, path, value)
return urlcodestr
#################################### HTML
def readhtml(data,xpath):
etrees=etree.HTML(data)
##### lxml 处理 xpath 特点
xpath=xpath.replace("html/body/","//") ### 不能写 html/body/ ,这是 firebug 的写法特点
xpaths=xpath.replace("/tbody/","/") ### 去掉所有 tbody
#print(xpaths)
ele= etrees.xpath(xpaths)
if len(ele)==0: ### 不能用 None 判断
xpaths=xpath ### 不去掉
ele= etrees.xpath(xpaths)
#####
types="" ## 暂时只支持取 text
#####
try:
if types=="":
values=ele[0].text ### 元素的 text
else:
values=str(etrees.xpath(xpaths+ "/@" + types)[0]) ### 元素的对应属性
#print(values)
except:
#print(u"** 数据截获异常 **")
values="" ## 没有这个元素则返回为空
if values==None or len(ele)==0:
print("HTML节点: " +xpath +" 查找失败.")
values=""
return values
################################### 自动区分类型
def whichtypes(data):
## 判断类型
#print(data)
xmlre=data.count('<')
jsonre=data.count('{')
urlcodere=data.count('=')
data=data.strip()
types=""
if data[:6]=="<?xml ":
types="xml"
elif data.find("<html")>=0 and data.find("</html>")>0:
types="html"
elif xmlre>jsonre and xmlre>urlcodere:
types="xml"
elif jsonre>xmlre and jsonre>urlcodere:
types="json"
elif urlcodere>xmlre and urlcodere>jsonre:
types="urlcode"
#print(types)
return types
def writenode(data, path,value):
## 判断类型
types=whichtypes(data)
if types=="xml":
data=writexml(data, path, value)
if types=="json":
data=writejson(data, path, value)
if types=="urlcode":
data=writeurlcode(data, path, value)
if types=="html":
hues.error("html格式只支持读节点")
return data
def readnode(data,path):
## 判断类型
types=whichtypes(data)
if types=="":
print("数据类型识别错误")
return ""
value=""
if types=="xml":
value=readxml(data,path)
if types=="json":
value=readjson(data,path)
if types=="urlcode":
value=readurlcode(data, path)
if types=="html":
value=readhtml(data,path)
return value
def writenode_ffile(files, path, value):
## 判断类型
data=open(files).read()
types=whichtypes(data)
if types=="":
hues.error("文件类型识别错误")
return ""
if types=="xml":
data=writexml_ffile(files, path, value)
if types=="json":
data=writejson_ffile(files, path, value)
if types=="urlcode":
data=writeurlcode_ffile(files, path, value)
if types=="html":
hues.error("html格式只支持读节点")
return data
| sheerfish999/torpedo | modules/dodata.py | Python | gpl-3.0 | 9,121 |
import json
import pytest
from plugins.fishbans import fishbans, bancount
from cloudbot import http
test_user = "notch"
test_api = """
{"success":true,"stats":{"username":"notch","uuid":"069a79f444e94726a5befca90e38aaf5","totalbans":11,"service":{"mcbans":0,"mcbouncer":11,"mcblockit":0,"minebans":0,"glizer":0}}}
"""
test_api_single = """
{"success":true,"stats":{"username":"notch","uuid":"069a79f444e94726a5befca90e38aaf5","totalbans":1,"service":{"mcbans":0,"mcbouncer":1,"mcblockit":0,"minebans":0,"glizer":0}}}
"""
test_api_none = """
{"success":true,"stats":{"username":"notch","uuid":"069a79f444e94726a5befca90e38aaf5","totalbans":0,"service":{"mcbans":0,"mcbouncer":0,"mcblockit":0,"minebans":0,"glizer":0}}}
"""
test_api_failed = """
{"success":false}
"""
bans_reply = "The user \x02notch\x02 has \x0211\x02 bans - http://fishbans.com/u/notch/"
count_reply = "Bans for \x02notch\x02: mcbouncer: \x0211\x02 - http://fishbans.com/u/notch/"
bans_reply_single = "The user \x02notch\x02 has \x021\x02 ban - http://fishbans.com/u/notch/"
bans_reply_failed = "Could not fetch ban data for notch."
count_reply_failed = "Could not fetch ban data for notch."
bans_reply_none = "The user \x02notch\x02 has no bans - http://fishbans.com/u/notch/"
count_reply_none = "The user \x02notch\x02 has no bans - http://fishbans.com/u/notch/"
def test_bans(monkeypatch):
""" tests fishbans with a successful API response having multiple bans
"""
def fake_http(url):
assert url == "http://api.fishbans.com/stats/notch/"
return json.loads(test_api)
monkeypatch.setattr(http, "get_json", fake_http)
assert fishbans(test_user) == bans_reply
def test_bans_single(monkeypatch):
""" tests fishbans with a successful API response having a single ban
"""
def fake_http(url):
assert url == "http://api.fishbans.com/stats/notch/"
return json.loads(test_api_single)
monkeypatch.setattr(http, "get_json", fake_http)
assert fishbans(test_user) == bans_reply_single
def test_bans_failed(monkeypatch):
""" tests fishbans with a failed API response
"""
def fake_http(url):
assert url == "http://api.fishbans.com/stats/notch/"
return json.loads(test_api_failed)
monkeypatch.setattr(http, "get_json", fake_http)
assert fishbans(test_user) == bans_reply_failed
def test_bans_none(monkeypatch):
""" tests fishbans with a successful API response having no bans
"""
def fake_http(url):
assert url == "http://api.fishbans.com/stats/notch/"
return json.loads(test_api_none)
monkeypatch.setattr(http, "get_json", fake_http)
assert fishbans(test_user) == bans_reply_none
def test_count(monkeypatch):
def fake_http(url):
assert url == "http://api.fishbans.com/stats/notch/"
return json.loads(test_api)
monkeypatch.setattr(http, "get_json", fake_http)
assert bancount(test_user) == count_reply
def test_count_failed(monkeypatch):
def fake_http(url):
assert url == "http://api.fishbans.com/stats/notch/"
return json.loads(test_api_failed)
monkeypatch.setattr(http, "get_json", fake_http)
assert bancount(test_user) == count_reply_failed
def test_count_no_bans(monkeypatch):
def fake_http(url):
assert url == "http://api.fishbans.com/stats/notch/"
return json.loads(test_api_none)
monkeypatch.setattr(http, "get_json", fake_http)
assert bancount(test_user) == count_reply_none
| Zarthus/CloudBotRefresh | plugins/test/test_fishbans.py | Python | gpl-3.0 | 3,502 |
#!/usr/bin/env python
import random
from copy import copy
from cnfgen.formula.cnf import CNF
def Shuffle(F,
polarity_flips='shuffle',
variables_permutation='shuffle',
clauses_permutation='shuffle'):
"""Reshuffle the given formula F
Returns a formula logically equivalent to `F` with the
following transformations applied the following order:
1. Polarity flips, specified as one of the following
- string 'fixed': no flips is applied
- string 'shuffle': each variable is subjected
to a random flip of polarity
- a list of `-1` and `+1` of length equal to
the number of the variables in the formula `F`.
2. Variable shuffling, specified as one of the following
- string 'fixed': no shuffling
- string 'shuffle': the variable order is randomly permuted
- an sequence of integer which represents a permutation of [1,...,N],
where N is the number of the variable in the formula. The i-th variable
is mapped to the i-th index in the sequence.
3. Clauses shuffling, specified as one of the following
- string 'fixed': no shuffling
- string 'shuffle': the clauses are randomly permuted
- an sequence S of integer which represents a permutation of [0,...,M-1],
where M is the number of the variable in the formula. The i-th clause in F
is going to be at position S[i] in the new formula.
Parameters
----------
F : cnfgen.formula.cnf.CNF
formula to be shuffled
polarity_flips: string or iterable(int)
Specifies the flips of polarity
variables_permutation: string or iterable(int)
Specifies the permutation of the variables.
clauses_permutation: string or iterable(int)
Specifies the permutation of the clauses.
"""
# empty cnf
out = CNF()
out.header = copy(F.header)
if 'description' in out.header:
out.header['description'] += " (reshuffled)"
i = 1
while 'transformation {}'.format(i) in out.header:
i += 1
out.header['transformation {}'.format(i)] = "Formula reshuffling"
N = F.number_of_variables()
M = F.number_of_clauses()
out.update_variable_number(N)
# Manage the parameters
perr = 'polarity_flips is either \'fixed\', \'shuffle\' or in {-1,+1]}^n'
verr = 'variables_permutation is either \'fixed\', \'shuffle\' or a permutation of [1,...,N]'
cerr = 'clauses_permutation is either \'fixed\', \'shuffle\' or a permutation of [0,...,M-1]'
# polarity flips
if polarity_flips == 'fixed':
polarity_flips = [1] * N
elif polarity_flips == 'shuffle':
polarity_flips = [random.choice([-1, 1]) for x in range(N)]
else:
if len(polarity_flips) != N:
raise ValueError(perr)
for i in range(N):
if abs(polarity_flips[i]) != 1:
raise ValueError(perr)
# variables permutation
if variables_permutation == 'fixed':
variables_permutation = range(1, N+1)
elif variables_permutation == 'shuffle':
variables_permutation = list(range(1, N+1))
random.shuffle(variables_permutation)
else:
if len(variables_permutation) != N:
raise ValueError(verr)
tmp = sorted(variables_permutation)
for i in range(N):
if i+1 != tmp[i]:
raise ValueError(verr)
#
# permutation of clauses
#
if clauses_permutation == 'fixed':
clauses_mapping = ((i, i) for i in range(M))
elif clauses_permutation == 'shuffle':
tmp = list(range(M))
random.shuffle(tmp)
clauses_mapping = sorted(enumerate(tmp), key=lambda x: x[1])
else:
if len(clauses_permutation) != M:
raise ValueError(cerr)
tmp = sorted(clauses_permutation)
for i in range(M):
if i != tmp[i]:
raise ValueError(cerr)
clauses_mapping = sorted(enumerate(clauses_permutation), key=lambda x: x[1])
# precompute literal mapping
substitution = [None] * (2 * N + 1)
for i in range(1, N+1):
substitution[i] = polarity_flips[i-1] * variables_permutation[i-1]
substitution[-i] = -substitution[i]
# load clauses
for (old, new) in clauses_mapping:
assert new == out.number_of_clauses()
out.add_clause(substitution[lit] for lit in F[old])
return out
| MassimoLauria/cnfgen | cnfgen/transformations/shuffle.py | Python | gpl-3.0 | 4,446 |
#!/usr/bin/python
#
# bravialib - Will Cooke - Whizzy Labs - @8none1
# http://www.whizzy.org
# Copyright Will Cooke 2016. Released under the GPL.
#
#
# My attempt to talk to the Sony Bravia web API.
#
# This is designed to be used by a long running process
# So there is a potentially slow start-up time but then it should be quick enough
# at the expense of some memory usage
#
# The TV will give you access based on the device_id and nickname once you are authorised I think.
# The TV will need to be already switched on for this to work.
#
#
# Thanks:
# https://github.com/aparraga/braviarc/
# https://docs.google.com/viewer?a=v&pid=sites&srcid=ZGlhbC1tdWx0aXNjcmVlbi5vcmd8ZGlhbHxneDoyNzlmNzY3YWJlMmY1MjZl
#
# Some useful resources:
# A tidied up packet capture I did from the iphone app: http://paste.ubuntu.com/23417464/plain/
#
#
# TODO:
# Move logging out of prints and in to logging
#
import requests
from requests.auth import HTTPBasicAuth
import json
from xml.dom import minidom
import socket
import struct
import time
class MockResponse(object):
def __init__(self, status_code):
self.status_code = status_code
class Bravia(object):
def __init__(self, hostname = None, ip_addr = None, mac_addr = None):
self.ip_addr = ip_addr
self.hostname = hostname
self.mac_addr = mac_addr # You don't *have* to specify the MAC address as once we are paired via IP we can find
# it from the TV but it will only be stored for this session. If the TV is off and you are running this script
# from cold - you will need the MAC to wake the TV up.
if self.ip_addr is None and self.hostname is not None:
self.ip_addr = self._lookup_ip_from_hostname(self.hostname)
self.device_id = "WebInterface:001"
self.nickname = "IoT Remote Controller Interface"
self.endpoint = 'http://'+self.ip_addr
self.cookies = None
self.x_auth_psk = None # If you're using PSK instead of cookies you need to set this.
self.DIAL_cookie = {}
self.packet_id = 1
self.device_friendly_name = ""
self._JSON_HEADER = {'content-type':'application/json', 'connection':'close'}
self._TIMEOUT = 10
self.remote_controller_code_lookup = {}
self.app_lookup = {}
self.input_map = {}
self.dvbt_channels = {}
self.paired = False
def _debug_request(self, r):
# Pass a Requests response in here to see what happened
print "\n\n\n"
print "------- What was sent out ---------"
print r.request.headers
print r.request.body
print "---------What came back -----------"
print r.status_code
print r.headers
print r.text
print "-----------------------------------"
print "\n\n\n"
def _lookup_ip_from_hostname(self, hostname):
ipaddr = socket.gethostbyname(hostname)
if ipaddr is not '127.0.0.1':
return ipaddr
else:
# IP lookup failed
return False
def _build_json_payload(self,method, params = [], version="1.0"):
return {"id":self.packet_id, "method":method, "params":params,
"version":version}
def is_available(self):
# Try to find out if the TV is actually on or not. Pinging the TV would require
# this script to run as root, so not doing that. This function return True or
# False depending on if the box is on or not.
payload = self._build_json_payload("getPowerStatus")
try:
# Using a shorter timeout here so we can return more quickly
r = self.do_POST(url="/sony/system", payload = payload, timeout=2)
data = r.json()
if data.has_key('result'):
if data['result'][0]['status'] == "standby":
# TV is in standby mode, and so not on.
return False
elif data['result'][0]['status'] == "active":
# TV really is on
return True
else:
# Assume it's not on.
print "Uncaught result"
return False
if data.has_key('error'):
if 404 in data['error']:
# TV is probably booting at this point - so not available yet
return False
elif 403 in data['error']:
# A 403 Forbidden is acceptable here, because it means the TV is responding to requests
return True
else:
print "Uncaught error"
return False
return True
except requests.exceptions.ConnectTimeout:
print "No response, TV is probably off"
return False
except requests.exceptions.ConnectionError:
print "TV is certainly off."
return False
except requests.exceptions.ReadTimeout:
print "TV is on but not accepting commands yet"
return False
except ValueError:
print "Didn't get back JSON as expected"
# This might lead to false negatives - need to check
return False
def do_GET(self, url=None, headers=None, auth=None, cookies=None, timeout=None):
if url is None: return False
if url[0:4] != "http": url=self.endpoint+url
if cookies is None and self.cookies is not None: cookies=self.cookies
if self.x_auth_psk is not None: headers['X-Auth-PSK']=self.x_auth_psk
if timeout is None: timeout = self._TIMEOUT
if headers is None:
r = requests.get(url, cookies=cookies, auth=auth, timeout=timeout)
else:
r = requests.get(url, headers=headers, cookies=cookies, auth=auth, timeout=timeout)
return r
def do_POST(self, url=None, payload=None, headers=None, auth=None, cookies=None, timeout=None):
if url is None: return False
if type(payload) is dict: payload = json.dumps(payload)
if headers is None: headers = self._JSON_HEADER # If you don't want any extra headers pass in ""
if cookies is None and self.cookies is not None: cookies=self.cookies
if self.x_auth_psk is not None: headers['X-Auth-PSK']=self.x_auth_psk
if timeout is None: timeout = self._TIMEOUT
if url[0:4] != "http": url = self.endpoint+url # if you want to pass just the path you can, otherwise pass a full url and it will be used
self.packet_id += 1 # From packet captures, this increments on each request, so its a good idea to use this method all the time
if auth is not None:
r = requests.post(url, data=payload, headers=headers, cookies=cookies, auth=self.auth, timeout=timeout)
else:
r = requests.post(url, data=payload, headers=headers, cookies=cookies, timeout=timeout)
print r
return r
def connect(self):
# TODO: What if the TV is off and we can't connect?
#
# From looking at packet captures what seems to happen is:
# 1. Try and connect to the accessControl interface with the "pinRegistration" part in the payload
# 2. If you get back a 200 *and* the return data looks OK then you have already authorised
# 3. If #2 is a 200 you get back an auth token. I think that this token will expire, so we might need to
# re-connect later on - given that this script will be running for a long time. Hopefully you won't
# need to get a new PIN number ever.
# 4. If #2 was a 401 then you need to authorise, and then you do that by sending the PIN on screen as
# a base64 encoded BasicAuth using a blank username (e.g. "<username>:<password" -> ":1234")
# If that works, you should get a cookie back.
# 5. Use the cookie in all subsequent requests. Note there is an issue with this. The cookie is for
# path "/sony/" *but* the Apps are run from a path "/DIAL/sony/" so I try and fix this by adding a
# second cookie with that path and the same auth data.
if self.x_auth_psk is None: # We have not specified a PSK therefore we have to use Cookies
payload = self._build_json_payload("actRegister",
[{"clientid":self.device_id,"nickname":self.nickname},
[{"value":"no","function":"WOL"},
{"value":"no","function":"pinRegistration"}]])
try:
r = self.do_POST(url='/sony/accessControl', payload=payload)
except requests.exceptions.ConnectTimeout:
print "No response, TV is probably off"
return None, False
except requests.exceptions.ConnectionError:
print "TV is certainly off."
return None, False
if r.status_code == 200:
# Rather handily, the TV returns a 200 if the TV is in stand-by but not really on :)
try:
if "error" in r.json(): #.keys():
if "not power-on" in r.json()['error']:
# TV isn't powered up
r = self.wakeonlan()
print "TV not on! Have sent wakeonlan, probably try again in a mo."
# TODO: make this less crap
return None,False
except:
raise
# If we get here then We are already paired so get the new token
self.paired = True
self.cookies = r.cookies
# Also add the /DIAL/ path cookie
# Looks like requests doesn't handle two cookies with the same name ('auth') in one jar
# so going to have a dict for the DIAL cookie and pass around as needed. :/
a = r.headers['Set-Cookie'].split(';') # copy the cookie data headers
for each in a:
if len(each) > 0:
b = each.split('=')
self.DIAL_cookie[b[0].strip()] = b[1]
elif r.status_code == 401:
print "We are not paired!"
return r,False
elif r.status_code == 404:
# Most likely the TV hasn't booted yet
print("TV probably hasn't booted yet")
return r,False
else: return None,False
else: # We are using a PSK
self.paired = True
self.cookies = None
self.DIAL_cookie = None
r = None
# Populate some data now automatically.
print "Getting DMR info..."
self.get_dmr()
print "Getting sysem info..."
self.get_system_info()
print "Populating remote control codes..."
self.populate_controller_lookup()
print "Enumerating TV inputs..."
self.get_input_map()
print "Populating apps list..."
self.populate_apps_lookup()
print "Populating channel list..."
self.get_channel_list()
print "Matching HD channels..."
self.create_HD_chan_lookups() # You might not want to do this if you don't use Freeview in the UK
print "Done initialising TV data."
return r,True
def start_pair(self):
# This should prompt the TV to display the pairing screen
payload = self._build_json_payload("actRegister",
[{"clientid":self.device_id,"nickname":self.nickname},
[{"value":"no","function":"WOL"}]])
r = self.do_POST(url='/sony/accessControl', payload=payload)
if r.status_code == 200:
print "Probably already paired"
return r,True
if r.status_code == 401:
return r,False
else:
return None,False
def complete_pair(self, pin):
# The user should have a PIN on the screen now, pass it in here to complete the pairing process
payload = self._build_json_payload("actRegister",
[{"clientid":self.device_id, "nickname":self.nickname},
[{"value":"no", "function":"WOL"}]])
self.auth = HTTPBasicAuth('',pin) # Going to keep this in the object, just in case we need it again later
r = self.do_POST(url='/sony/accessControl', payload=payload, auth=self.auth)
if r.status_code == 200:
print("have paired")
self.paired = True
# let's call connect again to get the cookies all set up properly
a,b = self.connect()
if b is True:
return r,True
else: return r,False
else:
return None,False
def get_system_info(self):
payload = self._build_json_payload("getSystemInformation")
r = self.do_POST(url="/sony/system", payload=payload)
if r.status_code == 200:
self.system_info = r.json()['result'][0]
if self.mac_addr == None: self.mac_addr = self.system_info['macAddr']
return self.system_info
else:
return False
def get_input_map(self):
payload = self._build_json_payload("getCurrentExternalInputsStatus")
r = self.do_POST(url="/sony/avContent", payload=payload)
if r.status_code == 200:
for each in r.json()['result'][0]:
self.input_map[each['title']] = {'label':each['label'], 'uri':each['uri']}
return True
else:
return False
def get_input_uri_from_label(self, label):
for each in self.input_map:
if self.input_map[each]['label'] == label:
return self.input_map[each]['uri']
print "Didnt match the input name."
return None
def set_external_input(self, uri):
payload = self._build_json_payload("setPlayContent", [{"uri":uri}])
r = self.do_POST(url="/sony/avContent", payload=payload)
if r.status_code == 200:
if "error" in r.json():
# Something didnt work. The JSON will tell you what.
return False
else:
return True
else:
return False
def get_dmr(self):
r = self.do_GET('http://'+self.ip_addr+':52323/dmr.xml')
self.dmr_data = minidom.parseString(r.text)
# XML. FFS. :(
self.device_friendly_name = self.dmr_data.getElementsByTagName('friendlyName')[0].childNodes[0].data
a = self.dmr_data.getElementsByTagNameNS('urn:schemas-sony-com:av','X_IRCCCode')
for each in a:
name = each.getAttribute("command")
value = each.firstChild.nodeValue
self.remote_controller_code_lookup[name.lower()] = value
# Not much more interesting stuff here really, but see: https://aydbe.com/assets/uploads/2014/11/json.txt
# and https://github.com/bunk3r/braviapy
# Maybe /sony/system/setLEDIndicatorStatus would be fun?
#"setLEDIndicatorStatus" -> {"mode":"string","status":"bool"}
# Maybe mode is a hex colour? and bool is on/off?
def populate_controller_lookup(self):
payload = self._build_json_payload("getRemoteControllerInfo")
r = self.do_POST(url='/sony/system', payload=payload)
if r.status_code == 200:
for each in r.json()['result'][1]:
self.remote_controller_code_lookup[each['name'].lower()] = each['value']
return True
else:
return False
def do_remote_control(self,action):
# Pass in the action name, such as:
# "PowerOff" "Mute" "Pause" "Play"
# You can probably guess what these would be, but if not:
# print <self>.remote_controller_code_lookup
action = action.lower()
if action in self.remote_controller_code_lookup: #.keys():
ircc_code = self.remote_controller_code_lookup[action]
else: return False
header = {'SOAPACTION': '"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC"'}
url = "/sony/IRCC"
body = '<?xml version="1.0"?>' # Look at all this crap just to send a remote control code...
body += '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
body += '<s:Body>'
body += '<u:X_SendIRCC xmlns:u="urn:schemas-sony-com:service:IRCC:1">'
body += '<IRCCCode>' + ircc_code + '</IRCCCode>'
body += '</u:X_SendIRCC>'
body += '</s:Body>'
body += '</s:Envelope>'
try:
r = self.do_POST(url=url, payload=body, headers=header)
except requests.exceptions.ConnectTimeout:
print("Connect timeout error")
r = MockResponse(200)
except requests.exceptions.ConnectionError:
print("Connect error")
r = MockResponse(200)
if r.status_code == 200:
return True
else:
return False
def populate_apps_lookup(self):
# Interesting note: If you don't do this (presumably just calling the
# URL is enough) then apps won't actually launch and you will get a 404
# error back from the TV. Once you've called this it starts working.
self.app_lookup={}
r = self.do_GET(url="/DIAL/sony/applist", cookies=self.DIAL_cookie)
if r.status_code == 200:
app_xml_data = minidom.parseString(r.text.encode('utf-8'))
for each in app_xml_data.getElementsByTagName('app'):
appid = each.getElementsByTagName('id')[0].firstChild.data
appname = each.getElementsByTagName('name')[0].firstChild.data
try: iconurl = each.getElementsByTagName('icon_url')[0].firstChild.data
except: iconurl = None
self.app_lookup[appname] = {'id':appid, 'iconurl':iconurl}
return True
else:
return False
def load_app(self, app_name):
# Pass in the name of the app, the most useful ones on my telly are:
# "Amazon Instant Video" , "Netflix", "BBC iPlayer", "Demand 5"
if self.app_lookup == {}: self.populate_apps_lookup() # This must happen before apps will launch
try:
app_id = self.app_lookup[app_name]['id']
except KeyError:
return False
print "Trying to load app:", app_id
headers = {'Connection':'close'}
r = self.do_POST(url="/DIAL/apps/"+app_id, headers=headers,
cookies=self.DIAL_cookie)
print r.status_code
print r.headers
print r
if r.status_code == 201:
return True
else:
return False
def get_app_status(self):
payload = self._build_json_payload("getApplicationStatusList")
r = self.do_POST(url="/sony/appControl", payload=payload)
return r.json()
def get_channel_list(self):
# This only supports dvbt for now...
# First, we find out how many channels there are
payload = self._build_json_payload("getContentCount",
[{"target":"all", "source":"tv:dvbt"}], version="1.1")
r = self.do_POST(url="/sony/avContent", payload=payload)
chan_count = int(r.json()['result'][0]['count'])
# It seems to only return the channels in lumps of 50, and some of those returned are blank?
chunk_size = 50
loops = int(chan_count / chunk_size) + (chan_count % chunk_size > 0) # Sneaky round up trick, the mod > 0 evaluates to int 1
chunk = 0
for x in range(loops):
payload = self._build_json_payload("getContentList",
[{"stIdx":chunk, "source":"tv:dvbt", "cnt":chunk_size,
"target":"all" }], version="1.2")
r = self.do_POST(url="/sony/avContent", payload=payload)
a = r.json()['result'][0]
for each in a:
if each['title'] == "": continue # We get back some blank entries, so just ignore them
if self.dvbt_channels.has_key(each['title']):
# Channel has already been added, we only want to keep the one with the lowest chan_num.
# The TV seems to return channel data for channels it can't actually receive (e.g. out of
# area local BBC channels). Trying to tune to these gives an error.
if int(each['dispNum']) > int(self.dvbt_channels[each['title']]['chan_num']):
# This is probably not a "real" channel we care about, so skip it.
continue
#self.dvbt_channels[each['title']] = {'chan_num':each['dispNum'], 'uri':each['uri']}
else:
self.dvbt_channels[each['title']] = {'chan_num':each['dispNum'], 'uri':each['uri']}
chunk += chunk_size
def create_HD_chan_lookups(self):
# This should probably be in the script that imports this library not in
# the library itself, but I wanted this feature, so I'm chucking it in
# here. This probably only works for Freeview in the UK.
# Use case to demonstrate why this is here: You want to use Alexa to
# switch the channel. Naturally, you want the HD channel if there is
# one but you don't want to have to say "BBC ONE HD" because that would
# be stupid. So you just say "BBC ONE" and the script does the work to
# find the HD version for you.
for each in self.dvbt_channels.iteritems():
hd_version = "%s HD" % each[0] # e.g. "BBC ONE" -> "BBC ONE HD"
if hd_version in self.dvbt_channels:
# Extend the schema by adding a "hd_uri" key
self.dvbt_channels[each[0]]['hd_uri'] = self.dvbt_channels[hd_version]['uri']
def get_channel_uri(self, title):
if self.dvbt_channels == {}: self.get_channel_list()
try:
return self.dvbt_channels[title]['uri']
except KeyError:
return False
def wakeonlan(self, mac=None):
# Thanks: Taken from https://github.com/aparraga/braviarc/blob/master/braviarc/braviarc.py
# Not using another library for this as it's pretty small...
if mac is None and self.mac_addr is not None:
mac = self.mac_addr
print "Waking MAC: " + mac
addr_byte = mac.split(':')
hw_addr = struct.pack('BBBBBB', int(addr_byte[0], 16),
int(addr_byte[1], 16),
int(addr_byte[2], 16),
int(addr_byte[3], 16),
int(addr_byte[4], 16),
int(addr_byte[5], 16))
msg = b'\xff' * 6 + hw_addr * 16
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_instance.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
socket_instance.sendto(msg, ('<broadcast>', 9))
socket_instance.close()
return True
def poweron(self):
# Convenience function to switch the TV on and block until it's ready
# to accept commands.
if self.paired is False:
print "You can only call this function once paired with the TV"
return False
elif self.paired is True:
ready = False
if self.is_available() is True:
# If we're already on, return now.
return True
self.wakeonlan()
for x in range(10):
if self.is_available() is True:
print "TV now available"
return True
else:
print "Didn't get a response. Trying again in 10 seconds. (Attempt "+str(x+1)+" of 10)"
time.sleep(10)
if ready is False:
print "Couldnt connect in a timely manner. Giving up"
return False
else:
return True
def get_client_ip(self):
host_ip = [(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
return host_ip
| 8none1/bravialib | bravialib.py | Python | gpl-3.0 | 24,487 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 Max Weller
## Copyright (C) 2019 DreamSourceLab <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import re
import sigrokdecode as srd
ann_cmdbit, ann_databit, ann_cmd, ann_data, ann_warning = range(5)
class Decoder(srd.Decoder):
api_version = 3
id = 'sda2506'
name = 'SDA2506'
longname = 'Siemens SDA 2506-5'
desc = 'Serial nonvolatile 1-Kbit EEPROM.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['IC', 'Memory']
channels = (
{'id': 'clk', 'name': 'CLK', 'desc': 'Clock'},
{'id': 'd', 'name': 'DATA', 'desc': 'Data'},
{'id': 'ce', 'name': 'CE#', 'desc': 'Chip-enable'},
)
annotations = (
('cmdbit', 'Command bit'),
('databit', 'Data bit'),
('cmd', 'Command'),
('data', 'Data byte'),
('warnings', 'Human-readable warnings'),
)
annotation_rows = (
('bits', 'Bits', (ann_cmdbit, ann_databit)),
('commands', 'Commands', (ann_cmd,)),
('data', 'Data', (ann_data,)),
('warnings', 'Warnings', (ann_warning,)),
)
def __init__(self):
self.samplerate = None
self.reset()
def reset(self):
self.cmdbits = []
self.databits = []
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putbit(self, ss, es, typ, value):
self.put(ss, es, self.out_ann, [typ, ['%s' % (value)]])
def putdata(self, ss, es):
value = 0
for i in range(8):
value = (value << 1) | self.databits[i]
self.put(ss, es, self.out_ann, [ann_data, ['%02X' % (value)]])
def decode_bits(self, offset, width):
out = 0
for i in range(width):
out = (out << 1) | self.cmdbits[offset + i][0]
return (out, self.cmdbits[offset + width - 1][1], self.cmdbits[offset][2])
def decode_field(self, name, offset, width):
val, ss, es = self.decode_bits(offset, width)
self.put(ss, es, self.out_ann, [ann_data, ['%s: %02X' % (name, val)]])
return val
def decode(self):
while True:
# Wait for CLK edge or CE edge.
(clk, d, ce) = self.wait([{0: 'e'}, {2: 'e'}])
if (self.matched & (0b1 << 0)) and ce == 1 and clk == 1:
# Rising clk edge and command mode.
bitstart = self.samplenum
self.wait({0: 'f'})
self.cmdbits = [(d, bitstart, self.samplenum)] + self.cmdbits
if len(self.cmdbits) > 24:
self.cmdbits = self.cmdbits[0:24]
self.putbit(bitstart, self.samplenum, ann_cmdbit, d)
elif (self.matched & (0b1 << 0)) and ce == 0 and clk == 0:
# Falling clk edge and data mode.
bitstart = self.samplenum
(clk, d, ce) = self.wait([{'skip': int(2.5 * (1e6 / self.samplerate))}, {0: 'r'}, {2: 'e'}]) # Wait 25 us for data ready.
if (self.matched & (0b1 << 2)) and not (self.matched & 0b011):
self.wait([{0: 'r'}, {2: 'e'}])
if len(self.databits) == 0:
self.datastart = bitstart
self.databits = [d] + self.databits
self.putbit(bitstart, self.samplenum, ann_databit, d)
if len(self.databits) == 8:
self.putdata(self.datastart, self.samplenum)
self.databits = []
elif (self.matched & (0b1 << 1)) and ce == 0:
# Chip enable edge.
try:
self.decode_field('addr', 1, 7)
self.decode_field('CB', 0, 1)
if self.cmdbits[0][0] == 0:
# Beginning read command.
self.decode_field('read', 1, 7)
self.put(self.cmdbits[7][1], self.samplenum,
self.out_ann, [ann_cmd, ['read' ]])
elif d == 0:
# Beginning write command.
self.decode_field('data', 8, 8)
addr, ss, es = self.decode_bits(1, 7)
data, ss, es = self.decode_bits(8, 8)
cmdstart = self.samplenum
self.wait({2: 'r'})
self.put(cmdstart, self.samplenum, self.out_ann,
[ann_cmd, ['Write to %02X: %02X' % (addr, data)]])
else:
# Beginning erase command.
val, ss, es = self.decode_bits(1, 7)
cmdstart = self.samplenum
self.wait({2: 'r'})
self.put(cmdstart, self.samplenum, self.out_ann,
[ann_cmd, ['Erase: %02X' % (val)]])
self.databits = []
except Exception as ex:
self.reset()
| DreamSourceLab/DSView | libsigrokdecode4DSL/decoders/sda2506/pd.py | Python | gpl-3.0 | 5,750 |
#!/usr/bin/env python
from setuptools import setup
setup(name='edith',
version='0.1.0a1',
description='Edit-distance implementation with edit-path retrieval',
author='david weil (tenuki)',
author_email='[email protected]',
url='https://github.com/tenuki/edith',
py_modules=['edith'],
license="GNU General Public License v3.0",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
]
) | tenuki/edith | setup.py | Python | gpl-3.0 | 554 |
from setuptools import setup, find_packages
from fccsmap import __version__
test_requirements = []
with open('requirements-test.txt') as f:
test_requirements = [r for r in f.read().splitlines()]
setup(
name='fccsmap',
version=__version__,
author='Joel Dubowy',
license='GPLv3+',
author_email='[email protected]',
packages=find_packages(),
scripts=[
'bin/fccsmap'
],
package_data={
'fccsmap': ['data/*.nc']
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX",
"Operating System :: MacOS"
],
url='https://github.com/pnwairfire/fccsmap/',
description='supports the look-up of FCCS fuelbed information by lat/lng or vector geo spatial data.',
install_requires=[
"afscripting>=2.0.0",
# Note: numpy and gdal must now be installed manually beforehand
"shapely==1.7.1",
"pyproj==3.0.0.post1",
"rasterstats==0.15.0"
],
dependency_links=[
"https://pypi.airfire.org/simple/afscripting/",
],
tests_require=test_requirements
)
| pnwairfire/fccsmap | setup.py | Python | gpl-3.0 | 1,292 |
from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# We need to go back a dir to get the config.
_this_dir = os.path.dirname((os.path.abspath(__file__)))
_parent_dir = os.path.join(_this_dir, '../')
for _p in (_this_dir, _parent_dir):
if _p not in sys.path:
sys.path.append(_p)
from config import API, APP
# Bind some vars for our migrations to use for environmental setup
API_URL_WITH_SLASH = API.LISTEN_URL + "/"
#
# n.b. this is only currently doing API migrations
#
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def merged_ini_py_conf():
"""Update some settings that would be fetched from the ini with those from our
application config.
this could maybe be cleaner with some clever .setdefault('key', default_value)
:return: merged settings dict
"""
conf = config.get_section(config.config_ini_section)
if hasattr(API, 'SQLALCHEMY_DATABASE_URI'):
conf['sqlalchemy.url'] = API.SQLALCHEMY_DATABASE_URI
return conf
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=merged_ini_py_conf().get('sqlalchemy.url'),
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
merged_ini_py_conf(),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| tristanfisher/yams | multidb/env.py | Python | gpl-3.0 | 2,875 |
#!/usr/bin/env python
#
# !!!!!!!!! WARNING !!!!!!!!!!!!!!!
# This Script was bastardized To Read Password From /home/bspaans/.googlecode
#
#
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: [email protected] (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = '[email protected] (David Anderson)'
import http.client
import os.path
import optparse
import getpass
import base64
import sys
def get_svn_config_dir():
pass
def get_svn_auth(project_name, config_dir):
"""Return (username, password) for project_name in config_dir.
!!!!! CHANGED !!!!!!!!"""
f = open("/home/bspaans/.googlecode", 'r')
usr_data = f.read().split(":")
f.close()
return (usr_data[0], usr_data[1][:-1])
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of [email protected]. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = http.client.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
config_dir=None, user_name=None, tries=1):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
If config_dir is None, try get_svn_config_dir(); if it is 'none', skip
trying the Subversion configuration entirely. If user_name is not None, use
it for the first attempt; prompt for subsequent attempts.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
if config_dir != 'none':
# Try to load username/password from svn config for first try.
if config_dir is None:
config_dir = get_svn_config_dir()
(svn_username, password) = get_svn_auth(project_name, config_dir)
if user_name is None:
# If username was not supplied by caller, use svn config.
user_name = svn_username
else:
# Just initialize password for the first try.
password = None
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print('Please enter your googlecode.com password.')
print('** Note that this is NOT your Gmail account password! **')
print('It is the password you use to access Subversion repositories,')
print('and can be found here: http://code.google.com/hosting/settings')
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [http.client.FORBIDDEN]:
# Rest for another try.
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('--config-dir', dest='config_dir', metavar='DIR',
help='read svn auth data from DIR'
' ("none" means not to use svn auth data)')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of labels to attach to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.config_dir, options.user)
if url:
print('The file was uploaded successfully.')
print('URL: %s' % url)
return 0
else:
print('An error occurred. Your file was not uploaded.')
print('Google Code upload server said: %s (%s)' % (reason, status))
return 1
if __name__ == '__main__':
sys.exit(main())
| anthonyt/mingus-counterpoint | googlecode_upload.py | Python | gpl-3.0 | 9,994 |
# Utilities ------------------------------------------------------------------ #
import math
def clamp(val, min, max):
if val <= min:
return min
elif val >= max:
return max
return val
def fixAngle(angle):
while angle > 180.0:
angle -= 360.0
while angle < -180.0:
angle += 360.0
return angle
def diffAngle(angle1, angle2):
return fixAngle(angle1 - angle2)
# Utilities ------------------------------------------------------------------ # | CertainlyUncertain/Kinetic-Gunner-Gunner-of-Angst | utils.py | Python | gpl-3.0 | 502 |
import re, shlex
import hangups
from hangupsbot.utils import text_to_segments
from hangupsbot.handlers import handler, StopEventHandling
from hangupsbot.commands import command
default_bot_alias = '/bot'
def find_bot_alias(aliases_list, text):
"""Return True if text starts with bot alias"""
command = text.split()[0].lower()
for alias in aliases_list:
if alias.lower().startswith('regex:') and re.search(alias[6:], command, re.IGNORECASE):
return True
elif command == alias.lower():
return True
return False
def is_bot_alias_too_long(text):
"""check whether the bot alias is too long or not"""
if default_bot_alias in text:
return True
else:
return False
@handler.register(priority=5, event=hangups.ChatMessageEvent)
def handle_command(bot, event):
"""Handle command messages"""
# Test if message is not empty
if not event.text:
return
# Get list of bot aliases
aliases_list = bot.get_config_suboption(event.conv_id, 'commands_aliases')
if not aliases_list:
aliases_list = [default_bot_alias]
# Test if message starts with bot alias
if not find_bot_alias(aliases_list, event.text):
return
# Test if command handling is enabled
if not bot.get_config_suboption(event.conv_id, 'commands_enabled'):
raise StopEventHandling
# Parse message
line_args = shlex.split(event.text, posix=False)
# Test if command length is sufficient
if len(line_args) < 2:
yield from event.conv.send_message(
text_to_segments(_('{}: 무엇을 도와드릴까요?').format(event.user.full_name))
)
raise StopEventHandling
# Test if user has permissions for running command
commands_admin_list = command.get_admin_commands(bot, event.conv_id)
if commands_admin_list and line_args[1].lower() in commands_admin_list:
admins_list = bot.get_config_suboption(event.conv_id, 'admins')
if event.user_id.chat_id not in admins_list:
yield from event.conv.send_message(
text_to_segments(_('{}: 권한이 없습니다.').format(event.user.full_name))
)
raise StopEventHandling
# Run command
yield from command.run(bot, event, *line_args[1:])
#Check whether the bot alias is too long or not
if is_bot_alias_too_long(event.text):
yield from event.conv.send_message(
text_to_segments(_('**Tip**: /bot 대신에 /b, /, ! 등을 사용할 수 있어요'))
)
# Prevent other handlers from processing event
raise StopEventHandling
| ildelusion/JSBot | hangupsbot/handlers/commands.py | Python | gpl-3.0 | 2,643 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from statemachine import _Statemachine
class Windows8_1StateMachine(_Statemachine):
def __init__(self, params):
_Statemachine.__init__(self, params)
def _list_share(self):
return super(Windows8_1StateMachine, self)._list_share()
def _list_running(self):
return super(Windows8_1StateMachine, self)._list_running()
def _list_drives(self):
return super(Windows8_1StateMachine, self)._list_drives()
def _list_network_drives(self):
return super(Windows8_1StateMachine, self)._list_network_drives()
def _list_sessions(self):
return super(Windows8_1StateMachine, self)._list_sessions()
def _list_scheduled_jobs(self):
return super(Windows8_1StateMachine, self)._list_scheduled_jobs()
def _list_network_adapters(self):
return super(Windows8_1StateMachine, self)._list_network_adapters()
def _list_arp_table(self):
return super(Windows8_1StateMachine, self)._list_arp_table()
def _list_route_table(self):
return super(Windows8_1StateMachine, self)._list_route_table()
def _list_sockets_network(self):
return super(Windows8_1StateMachine, self)._list_sockets_network()
def _list_sockets_services(self):
return super(Windows8_1StateMachine, self)._list_services()
def _list_kb(self):
return super(Windows8_1StateMachine, self)._list_kb()
def csv_list_drives(self):
super(Windows8_1StateMachine, self)._csv_list_drives(self._list_drives())
def csv_list_network_drives(self):
super(Windows8_1StateMachine, self)._csv_list_network_drives(self._list_network_drives())
def csv_list_share(self):
super(Windows8_1StateMachine, self)._csv_list_share(self._list_share())
def csv_list_running_proccess(self):
super(Windows8_1StateMachine, self)._csv_list_running_process(self._list_running())
def csv_hash_running_proccess(self):
super(Windows10StateMachine, self)._csv_hash_running_process(self._list_running())
def csv_list_sessions(self):
super(Windows8_1StateMachine, self)._csv_list_sessions(self._list_sessions())
def csv_list_arp_table(self):
super(Windows8_1StateMachine, self)._csv_list_arp_table(self._list_arp_table())
def csv_list_route_table(self):
super(Windows8_1StateMachine, self)._csv_list_route_table(self._list_route_table())
def csv_list_sockets_networks(self):
super(Windows8_1StateMachine, self)._csv_list_sockets_network(self._list_sockets_network())
def csv_list_services(self):
super(Windows8_1StateMachine, self)._csv_list_services(self._list_services())
def csv_list_kb(self):
super(Windows8_1StateMachine, self)._csv_list_kb(self._list_kb())
| SeungGiJeong/SK_FastIR | health/windows8_1StateMachine.py | Python | gpl-3.0 | 2,827 |
#!/usr/bin/env python
# Copyright (C) 2012 nwmaltego Developer.
# This file is part of nwmaltego - https://github.com/bostonlink/nwmaltego
# See the file 'LICENSE' for copying permission.
# Netwitness Threat to Filename Maltego transform
# Author: David Bressler (@bostonlink)
import sys
import urllib2, urllib, json
from datetime import datetime, timedelta
from lib import nwmodule
# Maltego XML Header
trans_header = """<MaltegoMessage>
<MaltegoTransformResponseMessage>
<Entities>"""
# Authenticate to the NW Concentrator via HTTP basic auth
nwmodule.nw_http_auth()
# NW REST API Query amd results
risk_name = sys.argv[1]
fields = sys.argv[2].split('#')
date_t = datetime.today()
tdelta = timedelta(days=1)
diff = date_t - tdelta
diff = "'" + diff.strftime('%Y-%b-%d %H:%M:%S') + "'-'" + date_t.strftime('%Y-%b-%d %H:%M:%S') + "'"
for i in fields:
if 'ip' in i:
parse = i.split('=')
ip = parse[1]
where_clause = '(time=%s) && risk.warning="%s" && ip.src=%s || ip.dst=%s' % (diff, risk_name, ip, ip)
else:
where_clause = '(time=%s) && risk.warning="%s"' % (diff, risk_name)
field_name = 'attachment'
json_data = json.loads(nwmodule.nwValue(0, 0, 25, field_name, 'application/json', where_clause))
file_list = []
# Print the Maltego XML Header
print trans_header
for d in json_data['results']['fields']:
value = d['value'].decode('ascii')
if value in file_list:
continue
elif value == "<none>":
pass
else:
# Kind of a hack but hey it works!
print """ <Entity Type="netwitness.NWFilename">
<Value>%s</Value>
<AdditionalFields>
<Field Name="risk" DisplayName="Threat Name">%s</Field>
<Field Name="ip" DisplayName="IP Address">%s</Field>
<Field Name="metaid1" DisplayName="Meta id1">%s</Field>
<Field Name="metaid2" DisplayName="Meta id2">%s</Field>
<Field Name="type" DisplayName="Type">%s</Field>
<Field Name="count" DisplayName="Count">%s</Field>
</AdditionalFields>
</Entity>""" % (value, risk_name, ip, d['id1'], d['id2'], d['type'], d['count'])
file_list.append(value)
# Maltego transform XML footer
trans_footer = """ </Entities>
</MaltegoTransformResponseMessage>
</MaltegoMessage> """
print trans_footer | bostonlink/nwmaltego | nw_threat_2_file_attachment.py | Python | gpl-3.0 | 2,405 |
import pytest
from learn.models import Task, ProblemSet, Domain
from learn.models import Student, TaskSession, Skill
from learn.mastery import has_mastered, get_level
from learn.mastery import get_first_unsolved_mission
from learn.mastery import get_first_unsolved_phase
from learn.mastery import get_current_mission_phase
# Django DB is always needed for many-to-many relations (chunks.tasks)
@pytest.mark.django_db
def test_has_mastered__initially_not():
ps = ProblemSet.objects.create()
ps.add_task()
student = Student.objects.create()
assert not has_mastered(student, ps)
# django db is always needed for many-to-many relations (student.skills)
# todo: find a way how to test the following without using db.
@pytest.mark.django_db
def test_has_mastered__when_skill_is_1():
ps = ProblemSet.objects.create()
student = Student.objects.create()
Skill.objects.create(student=student, chunk=ps, value=1.0)
assert has_mastered(student, ps)
@pytest.mark.django_db
def test_has_mastered__mastered_parts():
m1 = ProblemSet.objects.create()
p1 = m1.add_part()
p2 = m1.add_part()
student = Student.objects.create()
Skill.objects.create(student=student, chunk=m1, value=1)
Skill.objects.create(student=student, chunk=p1, value=1)
Skill.objects.create(student=student, chunk=p2, value=1)
assert has_mastered(student, m1)
@pytest.mark.django_db
def test_has_mastered__not_when_skill_is_low():
ps = ProblemSet.objects.create()
student = Student.objects.create()
Skill.objects.create(student=student, chunk=ps, value=0.5)
assert not has_mastered(student, ps)
@pytest.mark.django_db
def test_has_mastered__not_unmastered_subchunk():
m1 = ProblemSet.objects.create()
p1 = m1.add_part()
p2 = m1.add_part()
student = Student.objects.create()
Skill.objects.create(student=student, chunk=m1, value=1)
Skill.objects.create(student=student, chunk=p1, value=1)
Skill.objects.create(student=student, chunk=p2, value=0)
assert not has_mastered(student, m1)
@pytest.mark.django_db
def test_get_first_unsolved_mission__single():
mission = ProblemSet.objects.create()
domain = Domain.objects.create()
domain.problemsets.set([mission])
student = Student.objects.create()
assert get_first_unsolved_mission(domain, student) == mission
@pytest.mark.django_db
def test_get_first_unsolved_mission__all_unsolved():
mission1 = ProblemSet.objects.create(section='1')
mission2 = ProblemSet.objects.create(section='2')
domain = Domain.objects.create()
domain.problemsets.set([mission1, mission2])
student = Student.objects.create()
assert get_first_unsolved_mission(domain, student) == mission1
@pytest.mark.django_db
def test_get_first_unsolved_mission__first_solved():
mission1 = ProblemSet.objects.create(section='1')
mission2 = ProblemSet.objects.create(section='2')
domain = Domain.objects.create()
domain.problemsets.set([mission1, mission2])
student = Student.objects.create()
Skill.objects.create(student=student, chunk=mission1, value=1)
assert get_first_unsolved_mission(domain, student) == mission2
@pytest.mark.django_db
def test_get_first_unsolved_phase__all_unsolved():
m1 = ProblemSet.objects.create()
p1 = m1.add_part()
m1.add_part()
student = Student.objects.create()
assert get_first_unsolved_phase(m1, student) == p1
@pytest.mark.django_db
def test_get_first_unsolved_phase__first_solved():
m1 = ProblemSet.objects.create()
p1 = m1.add_part()
p2 = m1.add_part()
student = Student.objects.create()
Skill.objects.create(student=student, chunk=p1, value=1)
assert get_first_unsolved_phase(m1, student) == p2
@pytest.mark.django_db
def test_get_first_unsolved_phase__all_solved():
m1 = ProblemSet.objects.create()
p1 = m1.add_part()
student = Student.objects.create()
Skill.objects.create(student=student, chunk=p1, value=1)
Skill.objects.create(student=student, chunk=m1, value=1)
assert get_first_unsolved_phase(m1, student) == None
@pytest.mark.django_db
def test_get_mission_phase__all_solved():
domain = Domain.objects.create()
m1 = ProblemSet.objects.create()
p1 = m1.add_part()
domain.problemsets.set([m1, p1])
student = Student.objects.create()
Skill.objects.create(student=student, chunk=p1, value=1)
Skill.objects.create(student=student, chunk=m1, value=1)
assert get_current_mission_phase(domain, student) == (None, None)
@pytest.mark.django_db
def test_get_level_for_new_student():
mission = ProblemSet.objects.create()
domain = Domain.objects.create()
domain.problemsets.set([mission])
student = Student.objects.create()
assert get_first_unsolved_mission(domain, student) == mission
assert get_level(domain, student) == 1
@pytest.mark.django_db
def test_level_is_number_of_solved_missions_plus_1():
m1 = ProblemSet.objects.create()
m2 = ProblemSet.objects.create()
m3 = ProblemSet.objects.create()
domain = Domain.objects.create()
domain.problemsets.set([m1, m2, m3])
student = Student.objects.create()
Skill.objects.create(student=student, chunk=m1, value=1)
Skill.objects.create(student=student, chunk=m3, value=1)
assert get_level(domain, student) == 3
| adaptive-learning/robomission | backend/learn/tests/test_mastery.py | Python | gpl-3.0 | 5,319 |
#!/usr/bin/env python
import os
import json
from flask import Flask, abort, jsonify, request, g, url_for
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.httpauth import HTTPBasicAuth
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
colors = json.load(file('colors.json', 'r'))
# initialization
app = Flask(__name__)
app.config['SECRET_KEY'] = 'the quick brown fox jumps over the lazy dog'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
# extensions
db = SQLAlchemy(app)
auth = HTTPBasicAuth()
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), index=True)
password_hash = db.Column(db.String(64))
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration=600):
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
@auth.verify_password
def verify_password(username_or_token, password):
# first try to authenticate by token
user = User.verify_auth_token(username_or_token)
if not user:
# try to authenticate with username/password
user = User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@app.route('/api/users', methods=['POST'])
def new_user():
username = request.json.get('username')
password = request.json.get('password')
if username is None or password is None:
abort(400) # missing arguments
if User.query.filter_by(username=username).first() is not None:
abort(400) # existing user
user = User(username=username)
user.hash_password(password)
db.session.add(user)
db.session.commit()
return (jsonify({'username': user.username}), 201,
{'Location': url_for('get_user', id=user.id, _external=True)})
@app.route('/api/users/<int:id>', methods = ['GET'])
def get_user(id):
user = User.query.get(id)
if not user:
abort(400)
return jsonify({'username': user.username})
@app.route('/api/token', methods = ['GET'])
@auth.login_required
def get_auth_token():
print(request.headers.get('Date'))
token = g.user.generate_auth_token(600)
return jsonify({'token': token.decode('ascii'), 'duration': 600})
@app.route('/api/resource')
@auth.login_required
def get_resource():
return jsonify({'data': 'Hello, %s!' % g.user.username})
@app.route('/api/colors', methods = ['GET'])
def get_colors():
print(colors)
return jsonify( { "data" : colors })
@app.route('/api/colors/<name>', methods = ['GET'])
def get_color(name):
for color in colors:
if color["name"] == name:
return jsonify( color )
return jsonify( { 'error' : True } )
@app.route('/api/colors', methods= ['POST'])
@auth.login_required
def create_color():
print('create color')
color = {
'name': request.json['name'],
'value': request.json['value']
}
print(color)
colors.append(color)
return jsonify( color ), 201
@app.route('/api/colors/<name>', methods= ['PUT'])
@auth.login_required
def update_color(name):
success = False
print('update color')
for color in colors:
if color["name"] == name:
color['value'] = request.json.get('value', color['value'])
print(color)
return jsonify( color )
return jsonify( { 'error' : True } )
@app.route('/api/colors/<name>', methods=['DELETE'])
@auth.login_required
def delete_color(name):
success = False
print('delete color')
for color in colors:
if color["name"] == name:
colors.remove(color)
print(color)
return jsonify(color)
return jsonify( { 'error' : True } )
if __name__ == '__main__':
if not os.path.exists('db.sqlite'):
db.create_all()
app.run(debug = True)
| emilio-simoes/qt-rest-client | tools/test-service/server.py | Python | gpl-3.0 | 4,688 |
#!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# A sword KJV indexed search module.
# Copyright (C) 2012-2013 Josiah Gordon <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from collections import defaultdict
from xml.dom.minidom import parseString
from textwrap import fill
from os.path import dirname as os_dirname
from os.path import join as os_join
import dbm
import sys
import re
import Sword
from .utils import *
data_path = os_join(os_dirname(__file__), 'data')
def book_gen():
""" A Generator function that yields book names in order.
"""
# Yield a list of all the book names in the bible.
verse_key = Sword.VerseKey('Genesis 1:1')
for testament in [1, 2]:
for book in range(1, verse_key.bookCount(testament) + 1):
yield(verse_key.bookName(testament, book))
# book_list = list(book_gen())
try:
book_list = []
for book in book_gen():
book_list.append(book)
except:
pass
# Key function used to sort a list of verse references.
def sort_key(ref):
""" Sort verses by book.
"""
try:
book, chap_verse = ref.rsplit(' ', 1)
chap, verse = chap_verse.split(':')
val = '%02d%03d%03d' % (int(book_list.index(book)), int(chap),
int(verse))
return val
except Exception as err:
print('Error sorting "%s": %s' % (ref, err), file=sys.stderr)
sys.exit()
def parse_verse_range(verse_ref_list):
""" Uses VerseKey ParseVerseList to parse the reference list.
"""
# Make the argument a parseable string.
if isinstance(verse_ref_list, str):
verse_ref_str = verse_ref_list
else:
verse_ref_str = ' '.join(verse_ref_list)
verse_key = Sword.VerseKey()
# Parse the list.
# args: verse_list, default_key, expand_range, chapter_as_verse?
verse_list = verse_key.parseVerseList(verse_ref_str, 'Genesis 1:1', True,
False)
verse_set = set()
for i in range(verse_list.getCount()):
key = Sword.VerseKey(verse_list.getElement(i))
if key:
upper = key.getUpperBound().getText()
lower = key.getLowerBound().getText()
if upper != lower:
verse_set.update(VerseIter(lower, upper))
else:
verse_set.add(key.getText())
return verse_set
def add_context(ref_set, count=0):
""" Add count number of verses before and after each reference.
"""
if count == 0:
return ref_set
# Make a copy to work on.
clone_set = set(ref_set)
for ref in ref_set:
start = Sword.VerseKey(ref)
end = Sword.VerseKey(ref)
# Pass the beginning of the book.
start.decrement()
start.decrement(count - 1)
# Pass the end of the book.
end.increment()
end.increment(count - 1)
clone_set.update(VerseIter(start.getText(), end.getText()))
return clone_set
def mod_to_dbm(module: str, key_iter: iter, path: str) -> str:
""" Reads all the elements of key_iter from the module and saves them to a
dbm file.
"""
lookup = Lookup(module_name=module)
dbm_name = '%s/%s.dbm' % (path, module)
with IndexDbm(dbm_name, 'nf') as dbm_file:
for key in key_iter:
dbm_file[key] = lookup.get_raw_text(key)
return dbm_name
def make_daily_dbm(path: str=INDEX_PATH) -> str:
""" Saves the daily devotional to a dbm file.
"""
from datetime import date, timedelta
# Use a leap year to get all the days in February.
start = date(2012, 1, 1)
date_iter = ((start + timedelta(i)).strftime('%m.%d') for i in range(365))
return mod_to_dbm('Daily', date_iter, path)
def make_strongs_dbm(path: str=INDEX_PATH) -> str:
""" Saves the StrongsReal modules as dbms.
"""
keys = IndexDict('KJV')['_strongs_']
greek_keys = (i[1:] for i in keys if i.startswith('G'))
hebrew_keys = (i[1:] for i in keys if i.startswith('H'))
greek_file = mod_to_dbm('StrongsRealGreek', greek_keys, path)
hebrew_file = mod_to_dbm('StrongsRealHebrew', hebrew_keys, path)
return '\n'.join((greek_file, hebrew_file))
def make_robinson_dbm(path: str=INDEX_PATH) -> str:
""" Save robinson morph definitions in a dbm.
"""
keys = IndexDict('KJV')['_morph_']
robinson_keys = (i for i in keys if not i.startswith('TH'))
return mod_to_dbm('Robinson', robinson_keys, path)
def make_raw_kjv_dbm(path: str=INDEX_PATH) -> str:
""" Saves the KJV modules raw text as a dbm.
"""
verse_iter = VerseIter('Genesis 1:1')
return mod_to_dbm('KJV', verse_iter, path)
class Lookup(object):
""" A generic object to lookup refrences in differend sword modules.
"""
def __init__(self, module_name='KJV', markup=Sword.FMT_PLAIN):
""" Setup the module to look up information in.
"""
markup = Sword.MarkupFilterMgr(markup)
# We don't own this or it will segfault.
markup.thisown = False
self._library = Sword.SWMgr(markup)
self._module = self._library.getModule(module_name)
self._bold_regx = re.compile(r'<b>(\w+)</b>', re.I)
self._italic_regx = re.compile(r'''
(?:<i>|<hi\s*type="italic">)
([\w\s]+)(?:</i>|</hi>)
''', re.I | re.X)
self._br_regx = re.compile(r'(<br[\s]*/>|<lb/>)[\s]?', re.I)
self._cleanup_regx = re.compile(r'<[^>]*>')
self._brace_regx = re.compile(r'\{([\W]*)([\w]*)([\W]*)\}')
self._parenthesis_regx = re.compile(r'\(([\W]*)([\w]*)([\W]*)\)')
self._bracket_regx = re.compile(r'\[([\W]*)([\w ]*)([\W]*)\]')
self._verse_ref_regx = re.compile(r'''
<scripRef[^>]*>
([^<]*)
</scripRef>
''', re.I)
def get_text(self, key):
""" Get the text at the given key in the module.
i.e. get_text('3778') returns the greek strongs.
"""
encoding = get_encoding()
self._module.setKey(Sword.SWKey(key))
item_text = self._module.renderText()
# Make the text printable.
item_text = item_text.encode(encoding, 'replace')
item_text = item_text.decode(encoding, 'replace')
return fill(item_text, screen_size()[1])
def get_raw_text(self, key):
""" Get the text at the given key in the module.
i.e. get_text('3778') returns the greek strongs.
"""
encoding = get_encoding()
self._module.setKey(Sword.SWKey(key))
item_text = self._module.getRawEntry()
# Make the text printable.
item_text = item_text.encode(encoding, 'replace')
item_text = item_text.decode(encoding, 'replace')
return item_text
def get_formatted_text(self, key):
""" Returns the formated raw text of the specified key.
"""
text = self.get_raw_text(key)
# Format and highlight the text.
text = self._bold_regx.sub('\033[1m\\1\033[m', text)
text = self._italic_regx.sub('\033[36m\\1\033[m', text)
text = self._br_regx.sub('\n', text)
text = self._bracket_regx.sub('[\\1\033[33m\\2\033[m\\3]', text)
text = self._brace_regx.sub('{\\1\033[35m\\2\033[m\\3}', text)
text = self._parenthesis_regx.sub('(\\1\033[34m\\2\033[m\\3)', text)
text = self._verse_ref_regx.sub('\033[32m\\1\033[m', text)
text = self._cleanup_regx.sub('', text)
return text
class VerseTextIter(object):
""" An iterable object for accessing verses in the Bible. Maybe it will
be easier maybe not.
"""
def __init__(self, reference_iter, strongs=False, morph=False,
module='KJV', markup=Sword.FMT_PLAIN, render=''):
""" Initialize.
"""
markup = Sword.MarkupFilterMgr(markup)
# We don't own this or it will segfault.
markup.thisown = False
self._library = Sword.SWMgr(markup)
self._library.setGlobalOption("Headings", "On")
self._library.setGlobalOption("Cross-references", "Off")
if strongs:
self._library.setGlobalOption("Strong's Numbers", "On")
else:
self._library.setGlobalOption("Strong's Numbers", "Off")
if morph:
self._library.setGlobalOption("Morphological Tags", "On")
else:
self._library.setGlobalOption("Morphological Tags", "Off")
# Strings for finding the heading.
self._head_str = Sword.SWBuf('Heading')
self._preverse_str = Sword.SWBuf('Preverse')
self._canon_str = Sword.SWBuf('canonical')
self._module = self._library.getModule(module)
self._key = self._module.getKey()
if render.lower() == 'raw':
self._render_func = self._module.getRawEntry
elif render.lower() == 'render_raw':
self._fix_space_regx = re.compile(r'([^\.:\?!])\s+')
self._fix_end_regx = re.compile(r'\s+([\.:\?!,;])')
self._fix_start_tag_regx = re.compile(r'(<[npi]>)\s*')
self._fix_end_tag_regx = re.compile(r'\s*(</[npi]>)')
self._upper_divname_regx = re.compile(r'(\w+)([\'s]*)')
self._render_func = \
lambda: self._parse_raw(self._module.getRawEntry(),
strongs, morph)
else:
self._render_func = self._module.renderText
self._ref_iter = reference_iter
def next(self):
""" Returns the next verse reference and text.
"""
return self.__next__()
def __next__(self):
""" Returns a tuple of the next verse reference and text.
"""
# Retrieve the next reference.
verse_ref = next(self._ref_iter)
self._key.setText(verse_ref)
# Set the verse and render the text.
verse_text = self._get_text(verse_ref)
return (verse_ref, verse_text)
def __iter__(self):
""" Returns an iterator of self.
"""
return self
def _get_text(self, verse_ref):
""" Returns the verse text. Override this to produce formatted verse
text.
"""
verse_text = self._render_func()
if self._render_func == self._module.renderText:
verse_text = '%s %s' % (self._get_heading(), verse_text)
return verse_text
def _get_heading(self):
""" Returns the verse heading if there is one.
"""
attr_map = self._module.getEntryAttributesMap()
heading_list = []
head_str = self._head_str
preverse_str = self._preverse_str
canon_str = self._canon_str
if head_str in attr_map:
heading_attrs = attr_map[head_str]
if self._preverse_str in heading_attrs:
preverse_attrs = heading_attrs[preverse_str]
for k, val in preverse_attrs.items():
if canon_str in heading_attrs[k]:
if heading_attrs[k][canon_str].c_str() == 'true':
heading_list.append(val.c_str())
if heading_list:
return self._module.renderText(''.join(heading_list))
else:
return ''
def _parse_xml(self, xml_dom, strongs=False, morph=False):
""" Recursively parse all the childNodes in a xml minidom, and build
the verse text.
"""
# The string that will hold the verse.
verse_text = ''
# The name of the current tag.
name = xml_dom.localName if xml_dom.localName else ''
strongs_str = morph_str = ''
if xml_dom.attributes:
attr_dict = dict(xml_dom.attributes.items())
info_print(attr_dict, tag=4)
# Get any paragraph marker.
if 'marker' in attr_dict:
verse_text = '<p>%s</p> ' % attr_dict['marker']
else:
verse_text = ''
italic_str = '%s'
note_str = '%s'
for key, value in attr_dict.items():
# Italicize any added text.
if 'added' in value.lower():
italic_str = '<i>%s</i> '
# Label study notes.
elif 'study' in value.lower() or 'note' in name.lower():
note_str = '<n>%s</n>'
# Check for strongs.
elif 'lemma' in key.lower() and strongs:
for num in value.split():
strongs_str += ' <%s>' % num.split(':')[1]
# Check for morphology.
elif 'morph' in key.lower() and morph:
for tag in value.split():
morph_str += ' {%s}' % tag.split(':')[1]
# Recursively build the text from all the child nodes.
for node in xml_dom.childNodes:
child_s = self._parse_xml(node, strongs, morph)
if 'divine' in name.lower():
verse_text += \
' %s' % self._upper_divname_regx.sub(
lambda m: m.group(1).upper() + m.group(2),
child_s)
else:
verse_text += '%s' % child_s
if xml_dom.attributes:
return italic_str % note_str % '%s%s%s' % (verse_text, strongs_str,
morph_str)
if hasattr(xml_dom, 'data'):
info_print(xml_dom.data, tag=4)
return xml_dom.data
return verse_text.strip()
def _parse_raw(self, raw_text, strongs=False, morph=False):
""" Parse raw verse text and return a formated version.
"""
# A hack to make the raw text parse as xml.
xml_text = '''<?xml version="1.0"?>
<root xmlns="%s">
%s
</root>'''
# It works now we can parse the xml dom.
try:
parsed_xml = parseString(xml_text % ('verse', raw_text))
parsed_str = self._parse_xml(parsed_xml, strongs, morph)
except Exception as err:
print('Error %s while processing %s.\n' % (err, raw_text),
file=sys.stderr)
parsed_str = raw_text
# Make all the spacing correct.
fixed_str = self._fix_end_regx.sub('\\1', parsed_str)
fixed_str = self._fix_space_regx.sub('\\1 ', fixed_str)
fixed_str = self._fix_start_tag_regx.sub('\\1', fixed_str)
fixed_str = self._fix_end_tag_regx.sub('\\1', fixed_str)
return fixed_str.replace('\n', '')
class RawDict(object):
""" Parse raw verse text into a dictionary so it can easly be found out how
words are translated and how Strong's numbers are used.
"""
def __init__(self, reference_iter, module='KJV'):
""" Initialize the sword module.
"""
# This doesn't matter.
markup = Sword.MarkupFilterMgr(Sword.FMT_PLAIN)
# We don't own this or it will segfault.
markup.thisown = False
self._library = Sword.SWMgr(markup)
self._module = self._library.getModule(module)
self._key = self._module.getKey()
self._ref_iter = reference_iter
self._fix_space_regx = re.compile(r'([^\.:\?!])\s+')
self._fix_end_regx = re.compile(r'\s+([\.:\?!,;])')
self._remove_tag_regx = re.compile(r'(<i>\s?|\s?</i>)')
self._fix_start_tag_regx = re.compile(r'(<i>)\s*')
self._fix_end_tag_regx = re.compile(r'\s*(</i>)')
def next(self):
""" Returns the next verse reference and text.
"""
return self.__next__()
def __next__(self):
""" Returns a tuple of the next verse reference and text.
"""
# Retrieve the next reference.
verse_ref = next(self._ref_iter)
self._key.setText(verse_ref)
# Set the verse and render the text.
verse_dict = self.get_dict(verse_ref)
return (verse_ref, verse_dict)
def __iter__(self):
""" Returns an iterator of self.
"""
return self
def get_dict(self, verse_reference):
""" Lookup the verse reference in the sword module specified and
return a dictionary from it.
"""
self._key.setText(verse_reference)
raw_text = self._module.getRawEntry()
return self._get_parsed_dict(raw_text, True, True)
def _raw_to_dict(self, xml_dom, strongs=False, morph=False):
""" Recursively parse all the childNodes in a xml minidom, and build
a dictionary to use for telling what strongs numbers go to what words
and vise versa.
"""
# The dictionary that will hold the verse.
verse_dict = defaultdict(list)
verse_dict['_words'].append(defaultdict(list))
# Recursively build the text from all the child nodes.
child_s = ''
# The string that will hold the verse.
verse_text = ''
# The name of the current tag.
name = xml_dom.localName if xml_dom.localName else ''
# Build up the dictionary and verse text from the child nodes.
for node in xml_dom.childNodes:
child_s, child_d = self._raw_to_dict(node, strongs, morph)
if 'divine' in name.lower():
# Uppercase 'LORD's in the text.
verse_text += ' %s' % child_s.upper()
else:
verse_text += ' %s' % child_s
for key, value in child_d.items():
# Cleanup the items in the dictionary.
if value and not isinstance(value[0], dict):
new_list = set(value).union(verse_dict[key])
else:
new_list = value
if key == '_words':
# Update the words dictionary.
for words, lst in value[0].items():
new_list = filter(any, lst)
verse_dict['_words'][0][words].extend(new_list)
else:
# Make sure all items in the list are not None.
verse_dict[key].extend(filter(any, new_list))
if xml_dom.attributes:
attr_dict = dict(xml_dom.attributes.items())
# Cleanup and format the verse text.
verse_text = self._fix_end_regx.sub('\\1', verse_text)
verse_text = self._fix_space_regx.sub('\\1 ', verse_text)
verse_text = self._fix_start_tag_regx.sub('\\1', verse_text)
verse_text = self._fix_end_tag_regx.sub('\\1', verse_text)
verse_text = verse_text.replace('\n', '')
# Text clean of all italic tags.
clean_text = self._remove_tag_regx.sub('', verse_text)
italic_str = '%s'
# Dictionary to hold Strong's and Morphological attributes.
attrib_dict = defaultdict(list)
strongs_str = morph_str = ''
for key, value in attr_dict.items():
# Check for strongs.
if 'lemma' in key.lower():
for num in value.split():
# Get the number.
num = num.split(':')[1]
attrib_dict['strongs'].append(num)
# Associate the text with the number.
verse_dict[num].append(clean_text.strip())
if strongs:
strongs_str += ' <%s> ' % num
# Cleanup the attribute dictionary.
attrib_dict['strongs'] = list(set(attrib_dict['strongs']))
# Check for morphology.
elif 'morph' in key.lower():
for tag in value.split():
# Get the tag.
tag = tag.split(':')[1]
attrib_dict['morph'].append(tag)
# Associate the text with the tag.
verse_dict[tag].append(clean_text.strip())
if morph:
morph_str += ' {%s} ' % tag
# Cleanup the attribute dictionary.
attrib_dict['morph'] = list(set(attrib_dict['morph']))
if attrib_dict:
# Associate the numbers and tags with the text.
verse_dict['_words'][0][clean_text.strip()].append(attrib_dict)
elif 'type' in attr_dict or 'subType' in attr_dict:
_sub_type = attr_dict.get('subType', '')
_type = attr_dict.get('type', _sub_type)
if _type.lower() == 'x-p' or 'marker' in attr_dict:
# Get any paragraph marker.
verse_dict['_x-p'].append(attr_dict['marker'].strip())
elif 'study' in _type.lower() or 'note' in name.lower():
verse_dict['_notes'].append(verse_text.strip())
if 'added' in _type.lower() or 'added' in _sub_type.lower():
if 'marker' not in attr_dict:
# Italicize any added text.
italic_str = '<i>%s</i>'
verse_dict['_added'].append(verse_text.strip())
elif 'section' in _type.lower() or \
'preverse' in _sub_type.lower():
# Add the preverse heading.
verse_dict['_preverse'].append(verse_text.strip())
else:
# Don't include unwanted tags (e.g. strongs markup and
# notes) in the text.
verse_text = ''
elif 'xmlns' in attr_dict:
verse_text = verse_text.strip()
# Include the entire verse text in the dictionary.
verse_dict['_%s' % attr_dict['xmlns']].append(verse_text)
# Build up the verse string.
temp_str = '%s%s%s' % (verse_text, strongs_str, morph_str)
verse_text = italic_str % temp_str
if hasattr(xml_dom, 'data'):
return xml_dom.data, verse_dict
return verse_text, verse_dict
def _get_parsed_dict(self, raw_text, strongs=False, morph=False):
""" Parse raw verse text and return a formated version.
"""
info_print(raw_text, tag=31)
# A hack to make the raw text parse as xml.
xml_text = '''<?xml version="1.0"?>
<root xmlns="%s">
%s
</root>''' % ('verse_text', raw_text)
# It works now we can parse the xml dom.
try:
parsed_xml = parseString(xml_text)
return self._raw_to_dict(parsed_xml, strongs, morph)
except Exception as err:
info_print('Error %s while processing %s.\n' % (err, raw_text),
tag=31)
return raw_text, {'_verse_text': [raw_text],
'_words': [defaultdict(list)]}
class VerseIter(object):
""" Iterator of verse references.
"""
def __init__(self, start, end='Revelation of John 22:21'):
""" Setup the start and end references of the range.
"""
# Make sure the range is in order.
start, end = sorted([start, end], key=sort_key)
self._verse = Sword.VerseKey(start, end)
self._end_ref = self._verse.getUpperBound().getText()
self._verse_ref = ''
def __next__(self):
""" Returns the next verse reference.
"""
# End the iteration when we reach the end of the range.
if self._verse_ref == self._end_ref:
raise StopIteration()
# Get the current verse reference.
self._verse_ref = self._verse.getText()
# Load the next verse in the range.
self._verse.increment()
# Return only the reference.
return self._verse_ref
def __iter__(self):
""" Returns an iterator of self.
"""
return self
def next(self):
""" Returns the next verse reference.
"""
return self.__next__()
class ChapterIter(VerseIter):
""" Iterates over just one chapter.
"""
def __init__(self, book='Genesis', chapter=1):
""" Setup iterator.
"""
start = Sword.VerseKey('%s %s:1' % (book, chapter))
end = Sword.VerseKey(start.clone())
end.setVerse(end.getVerseMax())
super(ChapterIter, self).__init__(start.getText(), end.getText())
class BookIter(VerseIter):
""" Iterates over just one book.
"""
def __init__(self, book='Genesis'):
""" Setup iterator.
"""
start = Sword.VerseKey('%s 1:1' % book)
end = Sword.VerseKey(start.clone())
end.setChapter(end.getChapterMax())
end.setVerse(end.getVerseMax())
super(BookIter, self).__init__(start.getText(), end.getText())
class IndexBible(object):
""" Index the bible by Strong's Numbers, Morphological Tags, and words.
"""
def __init__(self, module='KJV', path=''):
""" Initialize the index dicts.
"""
self._module_name = module
self._path = path if path else INDEX_PATH
# Remove morphological and strongs information.
self._cleanup_regx = re.compile(r'\s*(<([GH]\d*)>|\{([A-Z\d-]*)\})')
# Note removal regular expression.
self._remove_notes_regex = re.compile(r'\s?<n>\s?(.*?)\s?</n>', re.S)
self._remove_tags_regex = re.compile(r'<[/]?[pin]>')
self._non_alnum_regx = re.compile(r'\W')
self._fix_regx = re.compile(r'\s+')
self._strongs_regx = re.compile(r'\s<([GH]\d+)>', re.I)
self._morph_regx = re.compile(r'\s\{([\w-]+)\}', re.I)
self._module_dict = defaultdict(list)
# lower_case is used to store lower_case words case sensitive
# counterpart. _Words_ is for easy key lookup for partial words.
self._words_set = set()
self._strongs_set = set()
self._morph_set = set()
self._module_dict.update({'lower_case': defaultdict(list)})
self._index_dict = {
'%s_index_i' % self._module_name: self._module_dict
}
self._index_built = False
def _book_gen(self):
""" A Generator function that yields book names in order.
"""
# Yield a list of all the book names in the bible.
verse_key = Sword.VerseKey('Genesis 1:1')
for testament in [1, 2]:
for book in range(1, verse_key.bookCount(testament) + 1):
yield(verse_key.bookName(testament, book))
def _index_strongs(self, verse_ref, verse_text):
""" Update the modules strongs dictionary from the verse text.
"""
strongs_list = set(self._strongs_regx.findall(verse_text))
for strongs_num in strongs_list:
self._strongs_set.add(strongs_num)
self._module_dict[strongs_num].append(verse_ref)
def _index_morph(self, verse_ref, verse_text):
""" Update the modules mophological dictionary from the verse text.
"""
morph_list = set(self._morph_regx.findall(verse_text))
for morph_num in morph_list:
self._morph_set.add(morph_num)
self._module_dict[morph_num].append(verse_ref)
def _index_words(self, verse_ref, verse_text):
""" Update the modules word dictionary from the verse text.
"""
# Remove all the morphological and strongs stuff.
clean_text = self._cleanup_regx.sub('', verse_text)
# Remove any non-alpha-numeric stuff.
clean_text = self._non_alnum_regx.sub(' ', clean_text)
# Replace runs of one or more spaces with just a single space.
clean_text = self._fix_regx.sub(' ', clean_text).strip()
# Remove the strongs and morphological stuff in such a way that
# split words are still split (i.e. where in, instead of wherein).
# So there are split versions and non-split versions just to be sure
# that the correct one is in there.
verse_text = self._strongs_regx.sub('', verse_text)
verse_text = self._morph_regx.sub('', verse_text)
# Strip out all unicode so we can search correctly.
verse_text = verse_text.encode('ascii', 'ignore')
verse_text = verse_text.decode('ascii', 'ignore')
verse_text = self._non_alnum_regx.sub(' ', verse_text)
verse_text = self._fix_regx.sub(' ', verse_text).strip()
# Include the capitalized words for case sensitive search.
word_set = set(verse_text.split())
word_set.update(set(clean_text.split()))
for word in word_set:
if word:
self._words_set.add(word)
self._module_dict[word].append(verse_ref)
l_word = word.lower()
if l_word != word:
# Map the lowercase word to the regular word for case
# insensitive searches.
if word not in self._module_dict['lower_case'][l_word]:
self._module_dict['lower_case'][l_word].append(word)
def _index_book(self, book_name="Genesis"):
""" Creates indexes for strongs, morphology and words.
"""
book_iter = BookIter(book_name)
verse_iter = VerseTextIter(book_iter, True, True, self._module_name,
render='render_raw')
for verse_ref, verse_text in verse_iter:
info_print('\033[%dD\033[KIndexing...%s' % \
(len(verse_ref) + 20, verse_ref), end='')
# Put the entire Bible in the index, so we can pull it out
# faster.
self._module_dict[verse_ref] = verse_text
# Remove the notes so we don't search them.
verse_text = self._remove_notes_regex.sub('', verse_text)
# Remove tags so they don't mess anything up.
verse_text = self._remove_tags_regex.sub('', verse_text)
# Index everything else.
self._index_strongs(verse_ref, verse_text)
self._index_morph(verse_ref, verse_text)
self._index_words(verse_ref, verse_text)
def build_index(self):
""" Create index files of the bible for strongs numbers,
morphological tags, and case (in)sensitive words.
"""
info_print("Indexing %s could take a while..." % self._module_name)
try:
for book in self._book_gen():
self._index_book(book)
except:
pass
self._module_dict['_words_'].extend(self._words_set)
self._module_dict['_strongs_'].extend(self._strongs_set)
self._module_dict['_morph_'].extend(self._morph_set)
info_print('\nDone.')
self._index_built = True
def write_index(self):
""" Write all the index dictionaries to their respective files. If
Any of the dictionaries is empty, then build the index.
The indexes are just json-ed dictionaries. The keys are the indexed
items and the values are the verse references that contain the key.
"""
if not self._index_built:
self.build_index()
# Build the index if it's not already built.
for name, dic in self._index_dict.items():
info_print("Writing %s.dbm..." % name)
# Save as just a plain text file. Has to be loaded all at once,
# so it is really slow.
#with open(name, 'w') as index_file:
#json.dump(dic, index_file, indent=4)
#return
# Save a dbm database that we can access without loading it all
# into memeory so it is fast.
dbm_name = '%s/%s.dbm' % (self._path, name)
with IndexDbm(dbm_name, 'nf') as index_file:
#with open(name, 'r') as i_file:
#dic =json.load(i_file)
index_file.update(dic)
| zepto/biblesearch.web | sword_search.old/sword_verses.py | Python | gpl-3.0 | 32,753 |
# Copyright (C) 2013-2018 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Check whether all features used in the code are defined
#
import sys
import os
sys.path.append(os.path.join(sys.path[0], '..', 'config'))
import featuredefs
if len(sys.argv) != 2:
print("Usage: %s FILE" % sys.argv[0])
exit(2)
fdefs = featuredefs.defs(sys.argv[1])
| mkuron/espresso | maintainer/check_features.py | Python | gpl-3.0 | 1,020 |
from pycddb.dataset import Dataset
from lingpy import Wordlist, csv2list
from lingpy.compare.partial import _get_slices
def prepare(ds):
errs = 0
wl = Wordlist(ds.raw('bds.tsv'))
W = {}
for k in wl:
value = wl[k, 'value']
tokens = wl[k, 'tokens']
doc = wl[k, 'doculect']
if value:
morphemes = []
for a, b in _get_slices(wl[k, 'tokens']):
ipa = ''.join(tokens[a:b])
morphemes += [ipa]
ipa = ' '.join(morphemes)
clpa = ds.transform(ipa, 'CLPA')
struc = ds.transform(ipa, 'Structure')
try:
assert len(clpa.split(' ')) == len(struc.split(' '))
except:
errs += 1
print(errs, clpa, struc)
if '«' in clpa:
errs += 1
print(errs, ipa, clpa, struc)
W[k] = [doc, wl[k, 'concept'], wl[k, 'concepticon_id'], value,
clpa, struc, wl[k, 'partial_ids']]
W[0] = ['doculect', 'concept', 'concepticon_id', 'value', 'segments', 'structure', 'cogids']
ds.write_wordlist(Wordlist(W))
def inventories(ds):
data = csv2list(ds.raw('inv.tsv'))
header = data[0]
invs = {l: [] for l in ds.languages}
for i, line in enumerate(data[1:]):
stype, sis, ipa, struc = line[1:5]
if len(struc.split()) != len(ipa.split()):
print(i+2, 'warn', struc, ' | ', ipa)
for l, n in zip(header[5:], line[5:]):
if n:
note = '' if 'X' else n
invs[l] += [[sis, ipa, struc, stype, note]]
ds.write_inventories(invs)
| digling/cddb | datasets/Allen2007/__init__.py | Python | gpl-3.0 | 1,690 |
import re
def analyzeLine(txtlines):
outline = []
lcnt = -1
for line in txtlines:
lcnt += 1
typ = None
itmText = None
spc = (len(line) -len(line.lstrip()))*' '
tls = line.lstrip()
if tls.lower().startswith('<body'):
itmText = '<BODY>'
typ = 'object'
elif tls.lower().startswith('<head'):
itmText = '<HEAD>'
typ = 'object'
elif tls.lower().startswith('<table'):
itmText = '<TABLE>'
typ = 'function'
elif tls.startswith('<!---'):
itmText =tls[5:].replace('-->','')
typ = 'heading'
# Javascript
elif tls.startswith('function '):
itmText =tls[9:].rstrip()
if itmText.endswith('{'): itmText = itmText[:-1]
typ = 'function'
elif tls.startswith('//---'):
itmText =tls[5:]
typ = 'heading'
# CSS
elif tls.startswith('/*---'):
itmText =tls[5:].split('*/')[0]
typ = 'heading'
if itmText != None:
outline.append([spc+itmText,typ,lcnt])
return outline | lucidlylogicole/scope | plugins/outline/lang/html.py | Python | gpl-3.0 | 1,217 |
# test driver to verify that new version of code works
import opiniongame.config as og_cfg
import opiniongame.IO as og_io
import opiniongame.coupling as og_coupling
import opiniongame.state as og_state
import opiniongame.opinions as og_opinions
import opiniongame.adjacency as og_adj
import opiniongame.selection as og_select
import opiniongame.potentials as og_pot
import opiniongame.core as og_core
import opiniongame.stopping as og_stop
import numpy as np
#
# process command line
#
cmdline = og_cfg.CmdLineArguments()
cmdline.printOut()
#
# load configuration
#
# TODO: add option to generate defaults and save to file
# TODO: interpret args to get filename if specified on cmd line
config = og_cfg.staticParameters()
config.readFromFile('staticParameters.cfg')
config.threshold = 0.01
config.printOut()
#
# seed PRNG: must do this before any random numbers are
# ever sampled during default generation
#
print("SEEDING PRNG: "+str(config.startingseed))
np.random.seed(config.startingseed)
state = og_state.WorldState.fromCmdlineArguments(cmdline, config)
#
# run
#
tau_list = np.arange(0.45, 0.9, 0.01)
alpha_list = np.arange(0.05, 0.25, 0.01)
numalphas = len(alpha_list)
numtaus = len(tau_list)
numvars = 3
resultMatrix = np.zeros((numalphas, numtaus, numvars))
for (i, alpha) in enumerate(alpha_list):
config.learning_rate = alpha
print("")
for (j, tau) in enumerate(tau_list):
print((alpha, tau))
#
# functions for use by the simulation engine
#
ufuncs = og_cfg.UserFunctions(og_select.FastPairSelection,
og_stop.totalChangeStop,
og_pot.createTent(tau))
polarized = 0
notPolarized = 0
aveIters = 0
for k in range(100):
state = og_core.run_until_convergence(config, state, ufuncs)
results = og_opinions.isPolarized(state.history[-1], 0.05)
for result in results:
if result:
polarized += 1
else:
notPolarized += 1
aveIters += state.iterCount
state.reset()
state.initialOpinions = og_opinions.initialize_opinions(config.popSize, config.ntopics)
# maybe you want to do Consensus and nonConsensus. Finding consensus is easier!
# assuming pop_size = 20, ten people at 1, nine people at 0 and and one person
# at 0.5 will be polarization, but, still ...
resultMatrix[i][j][0] = polarized
resultMatrix[i][j][1] = notPolarized
resultMatrix[i][j][2] = aveIters/100.0
rdict = {}
rdict['results'] = resultMatrix
og_io.saveMatrix('output.mat', rdict)
| mjsottile/PyOpinionGame | driver_alpha_tau_study.py | Python | gpl-3.0 | 2,715 |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import pyrotrfid
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrotrfid'
copyright = u'GLP3'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyrotrfid.__version__
# The full version, including alpha/beta/rc tags.
release = pyrotrfid.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = project + " v" + release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'**': 'links.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrotrfiddoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'pyrotrfid.tex', u'pyrotrfid Documentation', u'', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'pyrotrfid', u'pyrotrfid Documentation', [u''], 1)]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
| xapple/pyrotrfid | doc/conf.py | Python | gpl-3.0 | 7,241 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set et sw=4 fenc=utf-8:
#
# Copyright 2016 INVITE Communications Co., Ltd. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""AGI script that renders speech to text using Google Cloud Speech API
using the REST API."""
# [START import_libraries]
from __future__ import print_function
#db_insert = ("INSERT INTO `%s` (`id`, `%s`) VALUES ('%s', '%s')")
db_update = ("UPDATE `{0}` SET `{1}` = '{2}' WHERE id = {3}")
db_int = ("UPDATE `{0}` SET `{1}` = `{1}` + {2} WHERE id = {3}")
#data_insert(db_update % (newTable, 'billsec', '%s' % (billsec), warlist))
data = { 'table': 'tableName', 'field': 'billsec', 'value' : 10, 'id' : 1121 }
print(db_update.format(1,2,3,'カタカナ'))
| invitecomm/asterisk-ivr | pigeonhole/junk.py | Python | gpl-3.0 | 1,356 |
# -*- encoding: utf-8 -*-
'''Dependencies:
The ``scoretools`` package should not import ``instrumenttools``
at top level.
'''
from abjad.tools import systemtools
systemtools.ImportManager.import_structured_package(
__path__[0],
globals(),
)
_documentation_section = 'core' | andrewyoung1991/abjad | abjad/tools/scoretools/__init__.py | Python | gpl-3.0 | 278 |
#!/usr/bin/python
# =======================================================================
# This file is part of MCLRE.
#
# MCLRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MCLRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCLRE. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2015 Augusto Queiroz de Macedo <[email protected]>
# =======================================================================
"""
MRBPR Runner
"""
from os import path
from argparse import ArgumentParser
import shlex
import subprocess
import multiprocessing
import logging
from run_rec_functions import read_experiment_atts
from mrbpr.mrbpr_runner import create_meta_file, run
##############################################################################
# GLOBAL VARIABLES
##############################################################################
# Define the Logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(name)s : %(message)s',
level=logging.INFO)
LOGGER = logging.getLogger('mrbpr.run_rec_mrbpr')
LOGGER.setLevel(logging.INFO)
##############################################################################
# AUXILIAR FUNCTIONS
##############################################################################
def get_mrbpr_confs():
""" Yield the MRBPR Models Configurations """
pass
##############################################################################
# MAIN
##############################################################################
if __name__ == '__main__':
# ------------------------------------------------------------------------
# Define the argument parser
PARSER = ArgumentParser(description="Script that runs the mrbpr event recommender algorithms for" \
" a given 'experiment_name' with data from a given 'region'")
PARSER.add_argument("-e", "--experiment_name", type=str, required=True,
help="The Experiment Name (e.g. recsys-15)")
PARSER.add_argument("-r", "--region", type=str, required=True,
help="The data Region (e.g. san_jose)")
PARSER.add_argument("-a", "--algorithm", type=str, required=True,
help="The algorithm name (used only to differenciate our proposed MRBPR to the others")
ARGS = PARSER.parse_args()
EXPERIMENT_NAME = ARGS.experiment_name
REGION = ARGS.region
ALGORITHM_NAME = ARGS.algorithm
LOGGER.info(ALGORITHM_NAME)
DATA_DIR = "data"
PARTITIONED_DATA_DIR = path.join(DATA_DIR, "partitioned_data")
PARTITIONED_REGION_DATA_DIR = path.join(PARTITIONED_DATA_DIR, REGION)
EXPERIMENT_DIR = path.join(DATA_DIR, "experiments", EXPERIMENT_NAME)
EXPERIMENT_REGION_DATA_DIR = path.join(EXPERIMENT_DIR, REGION)
# LOGGER.info('Defining the MRBPR relation weights file...')
subprocess.call(shlex.split("Rscript %s %s %s" %
(path.join("src", "recommender_execution", "mrbpr", "mrbpr_relation_weights.R"),
EXPERIMENT_NAME, ALGORITHM_NAME)))
# ------------------------------------------------------------------------
# Reading and Defining the Experiment Attributes
EXPERIMENT_ATTS = read_experiment_atts(EXPERIMENT_DIR)
PARALLEL_RUNS = multiprocessing.cpu_count() - 1
TRAIN_RELATION_NAMES = EXPERIMENT_ATTS['%s_relation_names' % ALGORITHM_NAME.lower()]
TRAIN_RELATION_FILES = ["%s_train.tsv" % name for name in TRAIN_RELATION_NAMES]
PARTITIONS = reversed(EXPERIMENT_ATTS['partitions'])
# ------------------------------------------------------------------------
# Reading and Defining the Experiment Attributes
META_FILE = path.join(EXPERIMENT_DIR, "%s_meetup.meta" % ALGORITHM_NAME.lower())
LOGGER.info('Creating the META relations file...')
create_meta_file(TRAIN_RELATION_NAMES, META_FILE, PARTITIONED_DATA_DIR)
# ------------------------------------------------------------------------
# Fixed parameters
# ------------------------------------------------------------------------
# Algorithm (0 - MRBPR)
ALGORITHM = 0
# Size of the Ranked list of events per User
RANK_SIZE = 100
# Save Parameters
SAVE_MODEL = 0
# Hyper Parameters
REGULARIZATION_PER_ENTITY = ""
REGULARIZATION_PER_RELATION = ""
RELATION_WEIGHTS_FILE = path.join(EXPERIMENT_DIR, "%s_relation_weights.txt" % ALGORITHM_NAME.lower())
# ------------------------------------------------------------------------
if ALGORITHM_NAME == "MRBPR":
LEARN_RATES = [0.1]
NUM_FACTORS = [300]
NUM_ITERATIONS = [1500]
elif ALGORITHM_NAME == "BPR-NET":
LEARN_RATES = [0.1]
NUM_FACTORS = [200]
NUM_ITERATIONS = [600]
else:
LEARN_RATES = [0.1]
NUM_FACTORS = [10]
NUM_ITERATIONS = [10]
MRBPR_BIN_PATH = path.join("src", "recommender_execution", "mrbpr", "mrbpr.bin")
LOGGER.info("Start running MRBPR Process Scheduler!")
run(PARTITIONED_REGION_DATA_DIR, EXPERIMENT_REGION_DATA_DIR,
REGION, ALGORITHM, RANK_SIZE, SAVE_MODEL, META_FILE,
REGULARIZATION_PER_ENTITY, REGULARIZATION_PER_RELATION,
RELATION_WEIGHTS_FILE, TRAIN_RELATION_FILES,
PARTITIONS, NUM_ITERATIONS, NUM_FACTORS, LEARN_RATES,
MRBPR_BIN_PATH, PARALLEL_RUNS, ALGORITHM_NAME)
LOGGER.info("DONE!")
| augustoqm/MCLRE | src/recommender_execution/run_rec_mrbpr.py | Python | gpl-3.0 | 5,871 |
#!/usr/bin/python3
"""
Written by: True Demon
The non-racist Kali repository grabber for all operating systems.
Git Kali uses Offensive Security's package repositories and their generous catalog
of extremely handy penetration testing tools. This project is possible because
of Offensive Security actually sticking to good practices and keeping their
packages well-organized, so thanks OffSec! :)
#TryHarder
"""
# TODO: Finish Install Script
# TODO: Categorize tool searches
# TODO: Categorization of repos is a big task to be done later
# TODO: Include package management
import argparse
import packmgr as packager
from utils import * # includes sys, os
prog_info = "GIT Kali Project"
__author__ = "True Demon"
__winstall__ = "C:\\ProgramFiles\\GitKali\\" # Default package installation directory for Windows
__linstall__ = "/usr/share" # Default package installation directory for Linux
__install__ = "" # Used to store default install directory based on OS
try:
if os.name == 'posix':
__install__ = __linstall__
if os.getuid():
print("You need to be root to install packages. Try again as sudo.")
sys.exit()
elif os.name == 'nt':
__install__ = __winstall__
from ctypes import windll
if not windll.shell32.IsUserAnAdmin():
print("You must be an administrator to install packages. Please run from an escalated cmd.")
else:
sys.stderr("Could not detect your privileges / operating system. "
"This script only supports Linux (Posix) and Windows (nt) systems.")
except OSError:
sys.stderr("Unknown Operating System detected. You must have invented this one yourself! Teach me, Senpai!")
exit()
except ImportError as e:
sys.stderr("Invalid or missing libraries: \n%s" % e)
def search(search_word):
# search function for valid packages to install
found = []
with open('kali-packages.lst', 'r') as file:
packages = file.readlines()
for p in packages:
if search_word in p.split()[0]:
found.append(p.split()[0])
if not len(found):
print(Symbol.fail + " Could not find any matching packages")
return None
print("Found packages: ")
print(' '.join(found))
def check_install_dir(install_dir=__install__):
if os.path.exists(install_dir):
try:
os.chdir(install_dir)
if os.getcwd() != install_dir:
print("Something went wrong. We can't get to your installation directory: %s" % install_dir)
sys.exit()
except OSError:
print("Somehow, you broke it. Dunno how ya did it, but a bug report would be mighty handy to figure out how!")
sys.exit(-1)
def main():
parser = argparse.ArgumentParser(prog='gitkali.py', description='The apt-like Kali package installer for Linux',
epilog=prog_info, formatter_class=argparse.RawTextHelpFormatter)
parser._positionals.title = "Commands"
parser.add_argument("command", choices=["search", "install", "update", "upgrade"],
help="search : search package list for compatible packages\n" +
"install : install specified package\n" +
"update : update package lists\n" +
"upgrade : upgrade kali packages\n\n"
)
parser.add_argument("packages", action='store', metavar='package', nargs='*', help="package(s) to upgrade/install")
parser.add_argument("-d", "--directory", action='store', default=__install__,
help="Alternate installation directory")
args = parser.parse_args()
packages = [str(p) for p in args.packages] # Converts args.package(tuple) to list of strings for ease of use
args.directory = os.path.abspath(args.directory)
if args.command == 'search':
packager.check_kali_packages()
for p in packages:
search(p)
elif args.command == 'update':
packager.get_updates()
exit()
elif args.command == 'upgrade':
packager.upgrade(packages, args.directory)
elif args.command == 'install':
if len(packages) == 0 :
print("No packages given")
if '*' in packages:
# NEVER EVER EVER EVER EEEEEEEVVVVVEEEEEEEEEEEERRRRRRRRRRR DO THIS!!!
# TODO: EVENTUALLY...build a way for this to work safely...
packager.install_all(args.directory)
if args.directory != __install__: # Usually /usr/share/
check_install_dir(args.directory) # Check that the directory exists
warn_non_standard_dir(args.directory) # Warn the user that this is not advised
response = input("Do you wish to proceed?: [y/N]") # Confirm decision
if response.upper() != 'Y':
exit()
packages_to_install = packager.get_local_packages(packages)
# Returns a dictionary ex: {package_name: package_url}
for p in packages_to_install:
print("Proceeding with install: ", p)
packager.install(p, packages_to_install[p], args.directory) # install(package_name, url, into directory)
if __name__ == "__main__":
main()
| True-Demon/gitkali | gitkali.py | Python | gpl-3.0 | 5,391 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import trace
from trace_example.recurse import recurse
tracer = trace.Trace(count=True, trace=False, outfile='trace_report.dat')
tracer.runfunc(recurse, 2)
report_tracer = trace.Trace(count=False, trace=False, infile='trace_report.dat')
results = tracer.results()
results.write_results(summary=True, coverdir='/tmp')
| qilicun/python | python2/PyMOTW-1.132/PyMOTW/trace/trace_report.py | Python | gpl-3.0 | 469 |
# Django settings for freudiancommits project.
import os
DEBUG = True if os.environ.get('DJANGO_DEBUG', None) == '1' else False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# Don't require email addresses
SOCIALACCOUNT_EMAIL_REQUIRED = False
SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth'
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'freudiancommits.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'freudiancommits.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'freudiancommits.main',
'freudiancommits.github',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'south',
'gunicorn',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if 'AWS_STORAGE_BUCKET_NAME' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_CUSTOM_DOMAIN = AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 's3_folder_storage.s3.DefaultStorage'
DEFAULT_S3_PATH = 'media'
STATICFILES_STORAGE = 's3_folder_storage.s3.StaticStorage'
STATIC_S3_PATH = 'static'
AWS_S3_SECURE_URLS = False
AWS_QUERYSTRING_AUTH = False
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = '//%s/%s/' % \
(AWS_STORAGE_BUCKET_NAME, DEFAULT_S3_PATH)
STATIC_ROOT = '/%s/' % STATIC_S3_PATH
STATIC_URL = '//%s/%s/' % \
(AWS_STORAGE_BUCKET_NAME, STATIC_S3_PATH)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
LOGIN_REDIRECT_URL = '/github/loading/'
| michaelmior/freudiancommits | freudiancommits/settings.py | Python | gpl-3.0 | 6,595 |
#!/usr/bin/env python3
import argparse
import pathlib
import numpy as np
def waterfall(input_filename, output_filename):
fs = 200
nfft = 8192
w = np.blackman(nfft)
x = np.fromfile(input_filename, 'int16')
x = (x[::2] + 1j*x[1::2])/2**15
freq_span = 5
nbins = round(freq_span / fs * nfft)
# In these recordings the internal reference was used, so there
# is a frequency offset
freq_offset = 11.6 if '2021-12-08T12:57:25' in input_filename.name else 0
band = int(input_filename.name.split('_')[-2].replace('kHz', ''))
# 1.6 Hz offset is at 10 MHz
freq_offset *= band / 10000
bin_offset = round(freq_offset / fs * nfft)
freq_sel = slice(nfft//2-nbins+bin_offset, nfft//2+nbins+1+bin_offset)
x = x[:x.size//nfft*nfft]
f = np.fft.fftshift(
np.fft.fft(w * x.reshape(-1, nfft)),
axes=1)
f = np.abs(f[:, freq_sel])**2
np.save(output_filename, f.astype('float32'))
def parse_args():
parser = argparse.ArgumentParser(
description='Make waterfalls from the December 2021 eclipse IQ data')
parser.add_argument('input_folder',
help='Input folder')
parser.add_argument('output_folder',
help='Output folder')
return parser.parse_args()
def main():
args = parse_args()
input_files = pathlib.Path(args.input_folder).glob('*.sigmf-data')
output_path = pathlib.Path(args.output_folder)
for f_in in input_files:
f_out_name = f_in.name.replace('.sigmf-data', '_waterfall.npy')
f_out = output_path / f_out_name
waterfall(f_in, f_out)
if __name__ == '__main__':
main()
| daniestevez/jupyter_notebooks | december2021_eclipse/make_waterfalls.py | Python | gpl-3.0 | 1,664 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contests', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SuspendedProblem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('suspend_init_tests', models.BooleanField(default=True)),
('problem_instance', models.OneToOneField(related_name='suspended', to='contests.ProblemInstance', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
| sio2project/oioioi | oioioi/suspendjudge/migrations/0001_initial.py | Python | gpl-3.0 | 760 |
#!/usr/bin/env python3
#########################################################################
# File Name: mthreading.py
# Author: ly
# Created Time: Wed 05 Jul 2017 08:46:57 PM CST
# Description:
#########################################################################
# -*- coding: utf-8 -*-
import time
import threading
def play(name,count):
for i in range(1,count):
print('%s %d in %d' %(name, i, count))
time.sleep(1)
return
if __name__=='__main__':
t1=threading.Thread(target=play, args=('t1',10))
# 设置为守护线程
t1.setDaemon(True)
t1.start()
print("main")
# 等待子线程结束
t1.join()
exit(1)
| LingyuGitHub/codingofly | python/threading/mthreading.py | Python | gpl-3.0 | 699 |
# flake8: noqa
# -*- coding: utf-8 -*-
###############################################
# Geosite local settings
###############################################
import os
# Outside URL
SITEURL = 'http://$DOMAIN'
OGC_SERVER['default']['LOCATION'] = os.path.join(GEOSERVER_URL, 'geoserver/')
OGC_SERVER['default']['PUBLIC_LOCATION'] = os.path.join(SITEURL, 'geoserver/')
# databases unique to site if not defined in site settings
"""
SITE_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, '../development.db'),
},
}
"""
| simonemurzilli/geonode | geonode/contrib/geosites/site_template/local_settings_template.py | Python | gpl-3.0 | 597 |
"""
WSGI config for model_advanced project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "model_advanced.settings")
application = get_wsgi_application()
| mjiang-27/django_learn | model_advanced/model_advanced/wsgi.py | Python | gpl-3.0 | 406 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import os, subprocess, hashlib, shutil, glob, stat, sys, time
from subprocess import check_call
from tempfile import NamedTemporaryFile, mkdtemp
from zipfile import ZipFile
if __name__ == '__main__':
d = os.path.dirname
sys.path.insert(0, d(d(os.path.abspath(__file__))))
from setup import Command, __version__, installer_name, __appname__
PREFIX = "/var/www/calibre-ebook.com"
DOWNLOADS = PREFIX+"/htdocs/downloads"
BETAS = DOWNLOADS +'/betas'
USER_MANUAL = '/var/www/localhost/htdocs/'
HTML2LRF = "calibre/ebooks/lrf/html/demo"
TXT2LRF = "src/calibre/ebooks/lrf/txt/demo"
STAGING_HOST = '67.207.135.179'
STAGING_USER = 'root'
STAGING_DIR = '/root/staging'
def installers():
installers = list(map(installer_name, ('dmg', 'msi', 'tar.bz2')))
installers.append(installer_name('tar.bz2', is64bit=True))
installers.insert(0, 'dist/%s-%s.tar.xz'%(__appname__, __version__))
installers.append('dist/%s-portable-%s.zip'%(__appname__, __version__))
return installers
def installer_description(fname):
if fname.endswith('.tar.xz'):
return 'Source code'
if fname.endswith('.tar.bz2'):
bits = '32' if 'i686' in fname else '64'
return bits + 'bit Linux binary'
if fname.endswith('.msi'):
return 'Windows installer'
if fname.endswith('.dmg'):
return 'OS X dmg'
if fname.endswith('.zip'):
return 'Calibre Portable'
return 'Unknown file'
class ReUpload(Command): # {{{
description = 'Re-uplaod any installers present in dist/'
sub_commands = ['upload_installers']
def pre_sub_commands(self, opts):
opts.replace = True
def run(self, opts):
for x in installers():
if os.path.exists(x):
os.remove(x)
# }}}
# Data {{{
def get_google_data():
with open(os.path.expanduser('~/work/kde/conf/googlecodecalibre'), 'rb') as f:
gc_password, ga_un, pw = f.read().strip().split('|')
return {
'username':ga_un, 'password':pw, 'gc_password':gc_password,
'path_map_server':'[email protected]',
'path_map_location':'/var/www/status.calibre-ebook.com/googlepaths',
# If you change this remember to change it in the
# status.calibre-ebook.com server as well
'project':'calibre-ebook'
}
def get_sourceforge_data():
return {'username':'kovidgoyal', 'project':'calibre'}
def send_data(loc):
subprocess.check_call(['rsync', '--inplace', '--delete', '-r', '-z', '-h', '--progress', '-e', 'ssh -x',
loc+'/', '%s@%s:%s'%(STAGING_USER, STAGING_HOST, STAGING_DIR)])
def gc_cmdline(ver, gdata):
return [__appname__, ver, 'fmap', 'googlecode',
gdata['project'], gdata['username'], gdata['password'],
gdata['gc_password'], '--path-map-server',
gdata['path_map_server'], '--path-map-location',
gdata['path_map_location']]
def sf_cmdline(ver, sdata):
return [__appname__, ver, 'fmap', 'sourceforge', sdata['project'],
sdata['username']]
def run_remote_upload(args):
print 'Running remotely:', ' '.join(args)
subprocess.check_call(['ssh', '-x', '%s@%s'%(STAGING_USER, STAGING_HOST),
'cd', STAGING_DIR, '&&', 'python', 'hosting.py']+args)
# }}}
class UploadInstallers(Command): # {{{
def add_options(self, parser):
parser.add_option('--replace', default=False, action='store_true', help=
'Replace existing installers, when uploading to google')
def run(self, opts):
all_possible = set(installers())
available = set(glob.glob('dist/*'))
files = {x:installer_description(x) for x in
all_possible.intersection(available)}
tdir = mkdtemp()
try:
self.upload_to_staging(tdir, files)
self.upload_to_sourceforge()
self.upload_to_google(opts.replace)
finally:
shutil.rmtree(tdir, ignore_errors=True)
def upload_to_staging(self, tdir, files):
os.mkdir(tdir+'/dist')
hosting = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'hosting.py')
shutil.copyfile(hosting, os.path.join(tdir, 'hosting.py'))
for f in files:
shutil.copyfile(f, os.path.join(tdir, f))
with open(os.path.join(tdir, 'fmap'), 'wb') as fo:
for f, desc in files.iteritems():
fo.write('%s: %s\n'%(f, desc))
while True:
try:
send_data(tdir)
except:
print('\nUpload to staging failed, retrying in a minute')
time.sleep(60)
else:
break
def upload_to_google(self, replace):
gdata = get_google_data()
args = gc_cmdline(__version__, gdata)
if replace:
args = ['--replace'] + args
run_remote_upload(args)
def upload_to_sourceforge(self):
sdata = get_sourceforge_data()
args = sf_cmdline(__version__, sdata)
run_remote_upload(args)
# }}}
class UploadUserManual(Command): # {{{
description = 'Build and upload the User Manual'
sub_commands = ['manual']
def build_plugin_example(self, path):
from calibre import CurrentDir
with NamedTemporaryFile(suffix='.zip') as f:
os.fchmod(f.fileno(),
stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH|stat.S_IWRITE)
with CurrentDir(path):
with ZipFile(f, 'w') as zf:
for x in os.listdir('.'):
if x.endswith('.swp'): continue
zf.write(x)
if os.path.isdir(x):
for y in os.listdir(x):
zf.write(os.path.join(x, y))
bname = self.b(path) + '_plugin.zip'
dest = '%s/%s'%(DOWNLOADS, bname)
subprocess.check_call(['scp', f.name, 'divok:'+dest])
def run(self, opts):
path = self.j(self.SRC, '..', 'manual', 'plugin_examples')
for x in glob.glob(self.j(path, '*')):
self.build_plugin_example(x)
check_call(' '.join(['rsync', '-z', '-r', '--progress',
'manual/.build/html/',
'bugs:%s'%USER_MANUAL]), shell=True)
# }}}
class UploadDemo(Command): # {{{
description = 'Rebuild and upload various demos'
def run(self, opts):
check_call(
'''ebook-convert %s/demo.html /tmp/html2lrf.lrf '''
'''--title='Demonstration of html2lrf' --authors='Kovid Goyal' '''
'''--header '''
'''--serif-family "/usr/share/fonts/corefonts, Times New Roman" '''
'''--mono-family "/usr/share/fonts/corefonts, Andale Mono" '''
''''''%self.j(self.SRC, HTML2LRF), shell=True)
check_call(
'cd src/calibre/ebooks/lrf/html/demo/ && '
'zip -j /tmp/html-demo.zip * /tmp/html2lrf.lrf', shell=True)
check_call('scp /tmp/html-demo.zip divok:%s/'%(DOWNLOADS,), shell=True)
# }}}
class UploadToServer(Command): # {{{
description = 'Upload miscellaneous data to calibre server'
def run(self, opts):
check_call('ssh divok rm -f %s/calibre-\*.tar.xz'%DOWNLOADS, shell=True)
#check_call('scp dist/calibre-*.tar.xz divok:%s/'%DOWNLOADS, shell=True)
check_call('gpg --armor --detach-sign dist/calibre-*.tar.xz',
shell=True)
check_call('scp dist/calibre-*.tar.xz.asc divok:%s/signatures/'%DOWNLOADS,
shell=True)
check_call('ssh divok bzr update /usr/local/calibre',
shell=True)
check_call('''ssh divok echo %s \\> %s/latest_version'''\
%(__version__, DOWNLOADS), shell=True)
check_call('ssh divok /etc/init.d/apache2 graceful',
shell=True)
tdir = mkdtemp()
for installer in installers():
if not os.path.exists(installer):
continue
with open(installer, 'rb') as f:
raw = f.read()
fingerprint = hashlib.sha512(raw).hexdigest()
fname = os.path.basename(installer+'.sha512')
with open(os.path.join(tdir, fname), 'wb') as f:
f.write(fingerprint)
check_call('scp %s/*.sha512 divok:%s/signatures/' % (tdir, DOWNLOADS),
shell=True)
shutil.rmtree(tdir)
# }}}
# Testing {{{
def write_files(fmap):
for f in fmap:
with open(f, 'wb') as f:
f.write(os.urandom(100))
f.write(b'a'*1000000)
with open('fmap', 'wb') as fo:
for f, desc in fmap.iteritems():
fo.write('%s: %s\n'%(f, desc))
def setup_installers():
ver = '0.0.1'
files = {x.replace(__version__, ver):installer_description(x) for x in installers()}
tdir = mkdtemp()
os.chdir(tdir)
return tdir, files, ver
def test_google_uploader():
gdata = get_google_data()
gdata['project'] = 'calibre-hosting-uploader'
gdata['path_map_location'] += '-test'
hosting = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'hosting.py')
tdir, files, ver = setup_installers()
try:
os.mkdir('dist')
write_files(files)
shutil.copyfile(hosting, 'hosting.py')
send_data(tdir)
args = gc_cmdline(ver, gdata)
print ('Doing initial upload')
run_remote_upload(args)
raw_input('Press Enter to proceed:')
print ('\nDoing re-upload')
run_remote_upload(['--replace']+args)
raw_input('Press Enter to proceed:')
nv = ver + '.1'
files = {x.replace(__version__, nv):installer_description(x) for x in installers()}
write_files(files)
send_data(tdir)
args[1] = nv
print ('\nDoing update upload')
run_remote_upload(args)
print ('\nDont forget to delete any remaining files in the %s project'%
gdata['project'])
finally:
shutil.rmtree(tdir)
# }}}
if __name__ == '__main__':
test_google_uploader()
| Eksmo/calibre | setup/upload.py | Python | gpl-3.0 | 10,267 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO: PORASQUI: BUG: Cerrar la ventana no detiene el GMapCatcher y se queda
# como un proceso de fondo en espera bloqueante... Ni atiende al Ctrl+C
# siquiera.
import sys, os.path
dirfichero = os.path.realpath(os.path.dirname(__file__))
if os.path.realpath(os.path.curdir) == dirfichero:
os.chdir("..")
if ("utils" in os.listdir(os.path.curdir)
and os.path.abspath(os.path.curdir) not in sys.path):
sys.path.insert(0, ".")
from utils.googlemaps import GoogleMaps, GoogleMapsError
try:
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# raise ImportError # XXX: Solo para probar... BORRAR DESPUÉS
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
import osmgpsmap # Third dependency. No está en el árbol local.
from utils.mapviewer import DummyLayer, imdir
OSMGPSMAP = True
except ImportError, msg:
OSMGPSMAP = False
os.chdir(os.path.abspath(
os.path.join(dirfichero, "..", "utils", "gmapcatcher")))
import maps as gmc
os.chdir(os.path.join(dirfichero, ".."))
import gtk
APIFILENAME = "gg_api_key.txt"
class Mapa():
def __init__(self, apifile = None):
if not apifile:
mydir = os.path.dirname(os.path.abspath(__file__))
apifile = os.path.join(mydir, APIFILENAME)
fapi = open(apifile)
self.__ggapi = fapi.read()
fapi.close()
self.ggmap = GoogleMaps(self.__ggapi)
self.init_mapa()
def init_mapa(self):
if OSMGPSMAP:
self.osm = osmgpsmap.GpsMap()
self.osm.layer_add(osmgpsmap.GpsMapOsd(show_dpad = True,
show_zoom = True))
self.osm.layer_add(
DummyLayer())
self.osm.connect('button_release_event', self.map_clicked)
self.osm.set_zoom(13) # Zoom por defecto
else:
logging_path = conf_path = None # Es la conf. por defecto. Ver
# utils/gmapatcher/map.py para más detalles.
gmc.mapLogging.init_logging(logging_path)
gmc.log.info("Starting %s version %s." % (gmc.NAME, gmc.VERSION))
self.gmcw = gmc.MainWindow(config_path = conf_path)
self.gmcw.do_zoom(4) # Zoom por defecto.
# TODO: PORASQUI: Hacer un traslado de escala entre el zoom de
# GMC que va -creo- desde -2 (cerca) a más de 10 (lejos) al de
# OSM, que va al contrario y 13 es cerca. Ver las "constantes"
# definidas en cada caso (MAX_ZOOM_no_sé_qué en GMC).
self.osm = self.gmcw.container
def map_clicked(self, osm, event):
if OSMGPSMAP:
lat, lon = self.osm.get_event_location(event).get_degrees()
else:
lat, lon = 0, 0 # PORASQUI
if event.button == 1:
#self.latlon_entry.set_text(
# 'Map Centre: latitude %s longitude %s' % (
# self.osm.props.latitude,
# self.osm.props.longitude
# )
#)
pass
elif event.button == 2:
if OSMGPSMAP:
self.osm.gps_add(lat, lon, heading = osmgpsmap.INVALID);
else:
pass # PORASQUI
elif event.button == 3:
if OSMGPSMAP:
pb = gtk.gdk.pixbuf_new_from_file_at_size(
os.path.join(imdir, "poi.png"), 24,24)
self.osm.image_add(lat,lon,pb)
else:
pass # PORASQUI
def centrar_mapa(self, lat, lon, zoom = None, track = True, flag = False):
"""
@param track Indica si se debe marcar el punto con un círculo y el
"track" de recorrido.
@param flag Indica si se debe marcar con una bandera el punto.
"""
if lat == None:
raise ValueError, "Mapa.centrar_mapa -> Latitud incorrecta"
if lon == None:
raise ValueError, "Mapa.centrar_mapa -> Longitud incorrecta"
if zoom is None:
if OSMGPSMAP:
self.osm.set_center(lat, lon)
else:
self.gmcw.confirm_clicked(None, None, lat, lon)
else:
if OSMGPSMAP:
self.osm.set_center_and_zoom(lat, lon, zoom)
else:
self.gmcw.confirm_clicked(None, None, lat, lon)
self.gmcw.do_zoom(zoom)
if track:
if OSMGPSMAP:
self.osm.gps_add(lat, lon, heading = osmgpsmap.INVALID);
else:
self.gmcw.confirm_clicked(None, None, lat, lon)
# PORASQUI: No support for the moment...
if flag:
if OSMGPSMAP:
pb = gtk.gdk.pixbuf_new_from_file_at_size(
os.path.join(imdir, "poi.png"), 24, 24)
self.osm.image_add(lat, lon, pb)
else:
self.gmcw.confirm_clicked(None, None, lat, lon)
# PORASQUI: No support for the moment...
def put_mapa(self, container):
#m = self.wids['mapa_container']
m = container
m.add(self.osm)
m.show_all()
if not OSMGPSMAP: # Hay que ocultar algunas cosillas...
for w in (self.gmcw.export_panel,
self.gmcw.top_panel,
self.gmcw.status_bar):
try:
w.set_visible(False)
except AttributeError:
w.set_property("visible", False)
@property
def zoom(self):
"""Nivel actual de zoom en el mapa."""
if OSMGPSMAP:
return self.osm.props.zoom
else:
return self.gmcw.get_zoom()
def get_latlon(self, direccion):
"""
Devuelve la latitud y longitud como flotantes correspondiente a la
dirección recibida. Si no se encuentra en Google Maps, devuelve
(None, None).
"""
try:
res = self.ggmap.address_to_latlng(direccion)
except GoogleMapsError:
res = (None, None)
return res
def test():
w = gtk.Window()
m = Mapa()
m.put_mapa(w)
#w.show_all()
w.connect("destroy", lambda *a, **kw: gtk.main_quit())
gtk.main()
if __name__ == "__main__":
test()
| pacoqueen/cican | utils/mapa.py | Python | gpl-3.0 | 6,423 |
#significant input and copied functions from T. Morton's VESPA code (all mistakes are my own)
#coords -- RA and DEC of target in degrees. Needed for GAIA querying.
# Degrees, 0-360 and -90 to +90. List format [RA,DEC].
import numpy as np
import pandas as pd
from scipy.integrate import quad
from scipy import stats
import astropy.constants as const
import astropy.units as u
from astropy.coordinates import SkyCoord
import subprocess as sp
import os, re
import time
AU = const.au.cgs.value
RSUN = const.R_sun.cgs.value
REARTH = const.R_earth.cgs.value
MSUN = const.M_sun.cgs.value
DAY = 86400 #seconds
G = const.G.cgs.value
import logging
def semimajor(P,mtotal=1.):
"""
Returns semimajor axis in AU given P in days, total mass in solar masses.
"""
return ((P*DAY/2/np.pi)**2*G*mtotal*MSUN)**(1./3)/AU
def eclipse_probability(R1, R2, P, M1, M2):
return (R1 + R2) *RSUN / (semimajor(P , M1 + M2)*AU)
def centroid_PDF_source(pos,centroiddat):
cent_x, cent_y = centroiddat[0], centroiddat[1]
sig_x, sig_y = centroiddat[2], centroiddat[3]
return stats.multivariate_normal.pdf([pos[0],pos[1]],mean=[cent_x,cent_y],
cov=[[sig_x**(1/2.),0],[0,sig_y**(1/2.)]])
def bgeb_prior(centroid_val, star_density, skyarea, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.12):
'''
Centroid val is value at source (no integration over area). This allows comparison
to planet_prior without having two planet_prior functions.
'''
return centroid_val * skyarea * star_density * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2)
def bgtp_prior(centroid_val, star_density, skyarea, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2):
'''
Centroid val is value at source (no integration over area). This allows comparison
to planet_prior without having two planet_prior functions.
'''
return centroid_val * skyarea * star_density * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp)
def eb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.027):
'''
centroid pdf at source location
f_binary = 0.3 (moe + di stefano 2017) - valid for 0.8-1.2 Msun!
could improve to be average over all types?
f_close = 0.027 (moe + di stefano 2017) fraction of binaries with P between 3.2-32d
eclipse prob
works for defined source EBs too, just use appropriate centroid pdf value.
'''
return centroid_val * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2)
def heb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_triple=0.1, f_close=1.0):
'''
centroid pdf at source location
f_triple = 0.1 (moe + di stefano 2017) - valid for 0.8-1.2 Msun!
could improve to be average over all types?
f_close = 1.0 implies all triples have a close binary. May be over-generous
eclipse prob
'''
return centroid_val * f_triple * f_close * eclipse_probability(r1, r2, P, m1, m2)
def planet_prior(centroid_val, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2957):
'''
centroid pdf at source location
planet occurrence (fressin, any planet<29d)
eclipse prob
works for defined source planets too, just use appropriate centroid pdf value.
possibly needs a more general f_planet - as classifier will be using a range of planets.
should prior then be the prior of being in the whole training set, rather than the specific depth seen?
if so, need to change to 'fraction of ALL stars with planets' (i.e. including EBs etc).
Also look into default radii and masses. Precalculate mean eclipse probability for training set?
'''
return centroid_val * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp)
def fp_fressin(rp,dr=None):
if dr is None:
dr = rp*0.3
fp = quad(fressin_occurrence,rp-dr,rp+dr)[0]
return max(fp, 0.001) #to avoid zero
def fressin_occurrence(rp):
"""
Occurrence rates per bin from Fressin+ (2013)
"""
rp = np.atleast_1d(rp)
sq2 = np.sqrt(2)
bins = np.array([1/sq2,1,sq2,2,2*sq2,
4,4*sq2,8,8*sq2,
16,16*sq2])
rates = np.array([0,0.155,0.155,0.165,0.17,0.065,0.02,0.01,0.012,0.01,0.002,0])
return rates[np.digitize(rp,bins)]
def trilegal_density(ra,dec,kind='target',maglim=21.75,area=1.0,mapfile=None):
if kind=='interp' and mapfile is None:
print('HEALPIX map file must be passed')
return 0
if kind not in ['target','interp']:
print('kind not recognised. Setting kind=target')
kind = 'target'
if kind=='target':
basefilename = 'trilegal_'+str(ra)+'_'+str(dec)
h5filename = basefilename + '.h5'
if not os.path.exists(h5filename):
get_trilegal(basefilename,ra,dec,maglim=maglim,area=area)
else:
print('Using cached trilegal file. Sky area may be different.')
if os.path.exists(h5filename):
stars = pd.read_hdf(h5filename,'df')
with pd.HDFStore(h5filename) as store:
trilegal_args = store.get_storer('df').attrs.trilegal_args
if trilegal_args['maglim'] < maglim:
print('Re-calling trilegal with extended magnitude range')
get_trilegal(basefilename,ra,dec,maglim=maglim,area=area)
stars = pd.read_hdf(h5filename,'df')
stars = stars[stars['TESS_mag'] < maglim] #in case reading from file
#c = SkyCoord(trilegal_args['l'],trilegal_args['b'],
# unit='deg',frame='galactic')
#self.coords = c.icrs
area = trilegal_args['area']*(u.deg)**2
density = len(stars)/area
return density.value
else:
return 0
else:
import healpy as hp
#interpolate pre-calculated densities
coord = SkyCoord(ra,dec,unit='deg')
if np.abs(coord.galactic.b.value)<5:
print('Near galactic plane, Trilegal density may be inaccurate.')
#Density map will set mag limits
densitymap = hp.read_map(mapfile)
density = hp.get_interp_val(densitymap,ra,dec,lonlat=True)
return density
#maglim of 21 used following sullivan 2015
def get_trilegal(filename,ra,dec,folder='.', galactic=False,
filterset='TESS_2mass_kepler',area=1,maglim=21,binaries=False,
trilegal_version='1.6',sigma_AV=0.1,convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
"""
if galactic:
l, b = ra, dec
else:
try:
c = SkyCoord(ra,dec)
except:
c = SkyCoord(ra,dec,unit='deg')
l,b = (c.galactic.l.value,c.galactic.b.value)
if os.path.isabs(filename):
folder = ''
if not re.search('\.dat$',filename):
outfile = '{}/{}.dat'.format(folder,filename)
else:
outfile = '{}/{}'.format(folder,filename)
NONMAG_COLS = ['Gc','logAge', '[M/H]', 'm_ini', 'logL', 'logTe', 'logg',
'm-M0', 'Av', 'm2/m1', 'mbol', 'Mact'] #all the rest are mags
AV = get_AV_infinity(l,b,frame='galactic')
print(AV)
if AV is not None:
if AV<=1.5:
trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,outfile)
#cmd = './get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b,
# area,binaries,AV,sigma_AV,
# filterset,maglim,outfile)
#sp.Popen(cmd,shell=True).wait()
if convert_h5:
df = pd.read_table(outfile, sep='\s+', skipfooter=1, engine='python')
df = df.rename(columns={'#Gc':'Gc'})
for col in df.columns:
if col not in NONMAG_COLS:
df.rename(columns={col:'{}_mag'.format(col)},inplace=True)
if not re.search('\.h5$', filename):
h5file = '{}/{}.h5'.format(folder,filename)
else:
h5file = '{}/{}'.format(folder,filename)
df.to_hdf(h5file,'df')
with pd.HDFStore(h5file) as store:
attrs = store.get_storer('df').attrs
attrs.trilegal_args = {'version':trilegal_version,
'ra':ra, 'dec':dec,
'l':l,'b':b,'area':area,
'AV':AV, 'sigma_AV':sigma_AV,
'filterset':filterset,
'maglim':maglim,
'binaries':binaries}
os.remove(outfile)
else:
print('Skipping, AV > 10 or not found')
def trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,
outfile):
"""Calls TRILEGAL webserver and downloads results file.
:param trilegal_version:
Version of trilegal (only tested on 1.6).
:param l,b:
Coordinates (galactic) for line-of-sight simulation.
:param area:
Area of TRILEGAL simulation [sq. deg]
:param binaries:
Whether to have TRILEGAL include binary stars. Default ``False``.
:param AV:
Extinction along the line of sight.
:param sigma_AV:
Fractional spread in A_V along the line of sight.
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param maglim:
Limiting magnitude in mag (by default will be 1st band of filterset)
If want to limit in different band, then you have to
change function directly.
:param outfile:
Desired output filename.
"""
webserver = 'http://stev.oapd.inaf.it'
args = [l,b,area,AV,sigma_AV,filterset,maglim,1,binaries]
mainparams = ('imf_file=tab_imf%2Fimf_chabrier_lognormal.dat&binary_frac=0.3&'
'binary_mrinf=0.7&binary_mrsup=1&extinction_h_r=100000&extinction_h_z='
'110&extinction_kind=2&extinction_rho_sun=0.00015&extinction_infty={}&'
'extinction_sigma={}&r_sun=8700&z_sun=24.2&thindisk_h_r=2800&'
'thindisk_r_min=0&thindisk_r_max=15000&thindisk_kind=3&thindisk_h_z0='
'95&thindisk_hz_tau0=4400000000&thindisk_hz_alpha=1.6666&'
'thindisk_rho_sun=59&thindisk_file=tab_sfr%2Ffile_sfr_thindisk_mod.dat&'
'thindisk_a=0.8&thindisk_b=0&thickdisk_kind=0&thickdisk_h_r=2800&'
'thickdisk_r_min=0&thickdisk_r_max=15000&thickdisk_h_z=800&'
'thickdisk_rho_sun=0.0015&thickdisk_file=tab_sfr%2Ffile_sfr_thickdisk.dat&'
'thickdisk_a=1&thickdisk_b=0&halo_kind=2&halo_r_eff=2800&halo_q=0.65&'
'halo_rho_sun=0.00015&halo_file=tab_sfr%2Ffile_sfr_halo.dat&halo_a=1&'
'halo_b=0&bulge_kind=2&bulge_am=2500&bulge_a0=95&bulge_eta=0.68&'
'bulge_csi=0.31&bulge_phi0=15&bulge_rho_central=406.0&'
'bulge_cutoffmass=0.01&bulge_file=tab_sfr%2Ffile_sfr_bulge_zoccali_p03.dat&'
'bulge_a=1&bulge_b=-2.0e9&object_kind=0&object_mass=1280&object_dist=1658&'
'object_av=1.504&object_avkind=1&object_cutoffmass=0.8&'
'object_file=tab_sfr%2Ffile_sfr_m4.dat&object_a=1&object_b=0&'
'output_kind=1').format(AV,sigma_AV)
cmdargs = [trilegal_version,l,b,area,filterset,1,maglim,binaries,mainparams,
webserver,trilegal_version]
cmd = ("wget -o lixo -Otmpfile --post-data='submit_form=Submit&trilegal_version={}"
"&gal_coord=1&gc_l={}&gc_b={}&eq_alpha=0&eq_delta=0&field={}&photsys_file="
"tab_mag_odfnew%2Ftab_mag_{}.dat&icm_lim={}&mag_lim={}&mag_res=0.1&"
"binary_kind={}&{}' {}/cgi-bin/trilegal_{}").format(*cmdargs)
complete = False
while not complete:
notconnected = True
busy = True
print("TRILEGAL is being called with \n l={} deg, b={} deg, area={} sqrdeg\n "
"Av={} with {} fractional r.m.s. spread \n in the {} system, complete down to "
"mag={} in its {}th filter, use_binaries set to {}.".format(*args))
sp.Popen(cmd,shell=True).wait()
if os.path.exists('tmpfile') and os.path.getsize('tmpfile')>0:
notconnected = False
else:
print("No communication with {}, will retry in 2 min".format(webserver))
time.sleep(120)
if not notconnected:
with open('tmpfile','r') as f:
lines = f.readlines()
for line in lines:
if 'The results will be available after about 2 minutes' in line:
busy = False
break
sp.Popen('rm -f lixo tmpfile',shell=True)
if not busy:
filenameidx = line.find('<a href=../tmp/') +15
fileendidx = line[filenameidx:].find('.dat')
filename = line[filenameidx:filenameidx+fileendidx+4]
print("retrieving data from {} ...".format(filename))
while not complete:
time.sleep(120)
modcmd = 'wget -o lixo -O{} {}/tmp/{}'.format(filename,webserver,filename)
modcall = sp.Popen(modcmd,shell=True).wait()
if os.path.getsize(filename)>0:
with open(filename,'r') as f:
lastline = f.readlines()[-1]
if 'normally' in lastline:
complete = True
print('model downloaded!..')
if not complete:
print('still running...')
else:
print('Server busy, trying again in 2 minutes')
time.sleep(120)
sp.Popen('mv {} {}'.format(filename,outfile),shell=True).wait()
print('results copied to {}'.format(outfile))
def get_AV_infinity(ra,dec,frame='icrs'):
"""
Gets the A_V exctinction at infinity for a given line of sight.
Queries the NED database using ``curl``.
.. note::
It would be desirable to rewrite this to avoid dependence
on ``curl``.
:param ra,dec:
Desired coordinates, in degrees.
:param frame: (optional)
Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
"""
coords = SkyCoord(ra,dec,unit='deg',frame=frame).transform_to('icrs')
rah,ram,ras = coords.ra.hms
decd,decm,decs = coords.dec.dms
if decd > 0:
decsign = '%2B'
else:
decsign = '%2D'
url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon='+'%i' % rah + \
'%3A'+'%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs(decd) + '%3A' + '%i' % abs(decm) + '%3A' + '%05.2f' % abs(decs) + \
'&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0'
tmpfile = '/tmp/nedsearch%s%s.html' % (ra,dec)
cmd = 'curl -s \'%s\' -o %s' % (url,tmpfile)
sp.Popen(cmd,shell=True).wait()
AV = None
try:
with open(tmpfile, 'r') as f:
for line in f:
m = re.search('V \(0.54\)\s+(\S+)',line)
if m:
AV = float(m.group(1))
os.remove(tmpfile)
except:
logging.warning('Error accessing NED, url={}'.format(url))
return AV | DJArmstrong/autovet | FPPcalc/priorutils.py | Python | gpl-3.0 | 16,928 |
"""
/*
* Custom handlers for the BBB
*
*/
"""
import Adafruit_BBIO.GPIO as GPIO
GPIO.setup("P9_12", GPIO.OUT)
def alexaHandler(client, userdata, message):
print "Received payload: " + str(message.payload.decode())
# Assume only 1 and 0 are send here.
if message.payload == "1":
GPIO.output("P9_12", GPIO.HIGH)
print "Turned christmas tree On"
elif message.payload == "0":
GPIO.output("P9_12", GPIO.LOW)
print "Turned christmas tree Off"
def cleanUp():
GPIO.cleanup()
| Metonimie/Beaglebone | alexa-amazon/basicServer/gpio_handlers.py | Python | gpl-3.0 | 525 |
#!/usr/bin/python
# Copyright (C) 2013 rapidhere
#
# Author: rapidhere <[email protected]>
# Maintainer: rapidhere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import skapp
from optparse import OptionParser
import sys
parser = OptionParser(
usage = "%prog [options]",
description = """A simple snake game.Suggest that resize your terminal window at a property size befor playing!""",
epilog = "[email protected]",
version = "0.1"
)
parser.add_option(
"","--key-help",
action = "store_true",default = False,
help = "show game keys"
)
opts,args = parser.parse_args()
parser.destroy()
if opts.key_help:
print "'w' or 'W' or UP-Arrow up"
print "'a' or 'A' or LF-Arrow left"
print "'s' or 'S' or DW-Arrow down"
print "'d' or 'D' or RG-Arrpw right"
print "'q' or 'Q' quit"
sys.exit(0)
else:
app = skapp.SKApp()
app.run()
| rapidhere/snake_game | snake_game.py | Python | gpl-3.0 | 1,496 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
try:
import regex
_regex_available = True
except ImportError:
_regex_available = False
import phonenumbers
from six.moves import zip
from language_utilities.constant import ENGLISH_LANG
from ner_v2.detectors.base_detector import BaseDetector
from ner_v2.detectors.numeral.number.number_detection import NumberDetector
class PhoneDetector(BaseDetector):
"""
This method is used to detect phone numbers present in text. The phone detector takes into
consideration domestic as well as international phone numbers.
Attributes:
text(str): string provided to extract phone numbers detection
phone (list): list of detected entity values
original_phone_text (list): list to store substrings of the text detected as phone numbers
"""
def __init__(self, entity_name, language=ENGLISH_LANG, locale=None):
"""
Args:
entity_name (str): A string by which the detected numbers would be replaced with
on calling detect_entity()
language (str, optional): language code of number text, defaults to 'en'
locale(str, optional): locale of the country from which you are dialing. Ex: 'en-IN'
"""
self._supported_languages = NumberDetector.get_supported_languages()
super(PhoneDetector, self).__init__(language, locale)
self.language = language
self.locale = locale or 'en-IN'
if _regex_available:
# This will replace all types of dashes(em or en) by hyphen.
self.locale = regex.sub('\\p{Pd}', '-', self.locale)
self.text = ''
self.phone, self.original_phone_text = [], []
self.country_code = self.get_country_code_from_locale()
self.entity_name = entity_name
self.tag = '__' + self.entity_name + '__'
@property
def supported_languages(self):
"""
This method returns the list of languages supported by entity detectors
Return:
list: List of ISO 639 codes of languages supported by subclass/detector
"""
return self._supported_languages
def get_country_code_from_locale(self):
"""
This method sets self.country_code from given locale
"""
regex_pattern = re.compile('[-_](.*$)', re.U)
match = regex_pattern.findall(self.locale)
if match:
return match[0].upper()
else:
return 'IN'
def detect_entity(self, text, **kwargs):
"""Detects phone numbers in the text string
Args:
text: string to extract entities from
**kwargs: it can be used to send specific arguments in future.
Returns:
self.phone (list): list consisting the detected phone numbers and their country calling codes
self.original_phone_text (list): list containing their corresponding substrings in the original message.
Examples:
text = 'call +1 (408) 912-6172'
p = PhoneDetector(entity_name='phone_number', language='en', locale='en-US')
p.detect_entity(text=text)
([{'country_calling_code':'1', value':'4089126172'} ],
[u'+1 (408) 912-6172'])
text = '+९१ ९८१९९८३१३२ पर कॉल करें और संदेश ९८२०३३४४१६ पर कॉल करें'
p = PhoneDetector(entity_name='phone_number', language='hi', locale='en-IN')
p.detect_entity(text=text)
([{'country_calling_code':'91', value':'9819983132'}
,{ 'country_calling_code':'91', value:'9820334416'} ],
[u'+९१ ९८१९९८३१३२', u'+९१ ९८१९९८३१३२'])
"""
self.text = " " + text.lower().strip() + " "
self.phone, self.original_phone_text = [], []
for match in phonenumbers.PhoneNumberMatcher(self.text, self.country_code, leniency=0):
if match.number.country_code == phonenumbers.country_code_for_region(self.country_code):
self.phone.append(self.check_for_country_code(str(match.number.national_number)))
self.original_phone_text.append(self.text[match.start:match.end])
else:
# This means our detector has detected some other country code.
self.phone.append({"country_calling_code": str(match.number.country_code),
"value": str(match.number.national_number)})
self.original_phone_text.append(self.text[match.start:match.end])
self.phone, self.original_phone_text = self.check_for_alphas()
return self.phone, self.original_phone_text
def check_for_alphas(self):
"""
checks if any leading or trailing alphabets in the detected phone numbers and removes those numbers
"""
validated_phone = []
validated_original_text = []
for phone, original in zip(self.phone, self.original_phone_text):
if re.search(r'\W' + re.escape(original) + r'\W', self.text, re.UNICODE):
validated_phone.append(phone)
validated_original_text.append(original)
return validated_phone, validated_original_text
def check_for_country_code(self, phone_num):
"""
:param phone_num: the number which is to be checked for country code
:return: dict with country_code if it's in phone_num or phone_number with current country code
Examples:
phone_num = '919123456789'
countryCallingCode = 'IN'
{countryCallingCode:"91",value:"9123456789"}
"""
phone_dict = {}
if len(phone_num) > 10:
check_country_regex = re.compile(r'^({country_code})\d{length}$'.
format(country_code='911|1|011 91|91', length='{10}'), re.U)
p = check_country_regex.findall(phone_num)
if len(p) == 1:
phone_dict['country_calling_code'] = p[0]
country_code_sub_regex = re.compile(r'^{detected_code}'.format(detected_code=p[0]))
phone_dict['value'] = country_code_sub_regex.sub(string=phone_num, repl='')
else:
phone_dict['country_calling_code'] = str(phonenumbers.country_code_for_region(self.country_code))
phone_dict['value'] = phone_num
else:
phone_dict['country_calling_code'] = str(phonenumbers.country_code_for_region(self.country_code))
phone_dict['value'] = phone_num
return phone_dict
| hellohaptik/chatbot_ner | ner_v2/detectors/pattern/phone_number/phone_number_detection.py | Python | gpl-3.0 | 6,667 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Yann GUIBET <[email protected]>
# See LICENSE for details.
import sys, os
from gevent import select, monkey, spawn, Greenlet, GreenletExit, sleep, socket
from base64 import b64encode
from hashlib import md5
from struct import pack, unpack
from zlib import adler32
from Proto import Proto
from Index import Index
from Config import *
class Client(Proto):
def __init__(self, vpn):
self.vpn = vpn
def close(self):
try:
self.sock.close()
except:
pass
def error(self, exp):
self.close()
def connect(self, host, port, pubkey):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.handshake(pubkey)
except Exception as e:
self.error(e)
raise
def handshake(self, pubkey):
self.send_id()
myiv = self.send_iv()
iv = self.get_iv(pubkey)
self.init_cipher(pubkey, myiv, iv)
def recv_file(self):
if self.srecvall(1) != "\x01":
self.ssend("\xFF")
raise Exception, "Bad Flags (0x01 expected)"
size = self.srecvall(4)
checksum = self.srecvall(4)
if adler32(size) != unpack('!I',checksum)[0]:
self.ssend("\xFF")
raise Exception, "Bad checksum"
size = unpack('!I', size)[0]
buffer = self.srecvall(size)
hash = self.srecvall(16)
if md5(buffer).digest() != hash:
self.ssend("\xFF")
raise Exception, "Bad md5 ..."
return buffer
def get_file(self, id, name):
path = os.path.join(inbox, name)
while os.path.exists(path):
name = "_"+name
path = os.path.join(inbox, name)
#raise Exception, "%s already exist ..." % path
self.ssend("\x02"+pack('!I',id))
buff = self.recv_file()
with open(path, "wb") as f:
f.write(buff)
def get_index(self, id):
index = Index(id)
buffer = index.get_xml().encode('utf-8')
hash = md5(buffer).digest()
self.ssend('\x03'+hash)
flag = self.srecvall(1)
if flag == "\x04":
buffer = self.recv_file()
index.set_xml(buffer)
elif flag == "\x05":
pass
else:
raise Exception, "Protocol Error"
| yann2192/vpyn | Client.py | Python | gpl-3.0 | 2,459 |
import shesha.config as conf
simul_name = "bench_scao_sh_16x16_8pix"
layout = "layoutDeFab_SH"
# loop
p_loop = conf.Param_loop()
p_loop.set_niter(1000)
p_loop.set_ittime(0.002) # =1/500
# geom
p_geom = conf.Param_geom()
p_geom.set_zenithangle(0.)
# tel
p_tel = conf.Param_tel()
p_tel.set_diam(4.0)
p_tel.set_cobs(0.2)
# atmos
p_atmos = conf.Param_atmos()
p_atmos.set_r0(0.16)
p_atmos.set_nscreens(1)
p_atmos.set_frac([1.0])
p_atmos.set_alt([0.0])
p_atmos.set_windspeed([10.])
p_atmos.set_winddir([45.])
p_atmos.set_L0([1.e5])
# target
p_target = conf.Param_target()
p_targets = [p_target]
# p_target.set_ntargets(1)
p_target.set_xpos(0.)
p_target.set_ypos(0.)
p_target.set_Lambda(1.65)
p_target.set_mag(10.)
# wfs
p_wfs0 = conf.Param_wfs(roket=True)
p_wfss = [p_wfs0]
p_wfs0.set_type("sh")
p_wfs0.set_nxsub(8)
p_wfs0.set_npix(8)
p_wfs0.set_pixsize(0.3)
p_wfs0.set_fracsub(0.8)
p_wfs0.set_xpos(0.)
p_wfs0.set_ypos(0.)
p_wfs0.set_Lambda(0.5)
p_wfs0.set_gsmag(8.)
p_wfs0.set_optthroughput(0.5)
p_wfs0.set_zerop(1.e11)
p_wfs0.set_noise(3.)
p_wfs0.set_atmos_seen(1)
# lgs parameters
# p_wfs0.set_gsalt(90*1.e3)
# p_wfs0.set_lltx(0)
# p_wfs0.set_llty(0)
# p_wfs0.set_laserpower(10)
# p_wfs0.set_lgsreturnperwatt(1.e3)
# p_wfs0.set_proftype("Exp")
# p_wfs0.set_beamsize(0.8)
# dm
p_dm0 = conf.Param_dm()
p_dm1 = conf.Param_dm()
p_dms = [p_dm0, p_dm1]
p_dm0.set_type("pzt")
p_dm0.set_file_influ_fits("test_custom_dm.fits")
p_dm0.set_alt(0.)
p_dm0.set_thresh(0.3)
p_dm0.set_unitpervolt(0.01)
p_dm0.set_push4imat(100.)
p_dm0.set_diam_dm_proj(4.1)
p_dm1.set_type("tt")
p_dm1.set_alt(0.)
p_dm1.set_unitpervolt(0.0005)
p_dm1.set_push4imat(10.)
# centroiders
p_centroider0 = conf.Param_centroider()
p_centroiders = [p_centroider0]
p_centroider0.set_nwfs(0)
p_centroider0.set_type("cog")
# p_centroider0.set_type("corr")
# p_centroider0.set_type_fct("model")
# controllers
p_controller0 = conf.Param_controller()
p_controllers = [p_controller0]
p_controller0.set_type("ls")
p_controller0.set_nwfs([0])
p_controller0.set_ndm([0, 1])
p_controller0.set_maxcond(1500.)
p_controller0.set_delay(1.)
p_controller0.set_gain(0.4)
p_controller0.set_modopti(0)
p_controller0.set_nrec(2048)
p_controller0.set_nmodes(216)
p_controller0.set_gmin(0.001)
p_controller0.set_gmax(0.5)
p_controller0.set_ngain(500)
| ANR-COMPASS/shesha | data/par/par4tests/test_custom_dm_diam_dm_proj.py | Python | gpl-3.0 | 2,303 |
# import libraries
import math
import random
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
width, height = 800, 600
screen = pygame.display.set_mode((width, height))
keys = [False, False, False, False]
player = [100, 520]
invaders = []
bullets = []
bombs = []
rockets = []
rocketpieces = []
bgimg = pygame.image.load("g:/invaders/paragliding_2017_4_bsl-73.jpg")
invaderimg = pygame.transform.scale(pygame.image.load("g:/invaders/Space-Invaders-PNG-Clipart.png"), (64, 64))
playerimg = pygame.transform.scale(pygame.image.load("g:/invaders/space-invaders-1again.png"), (64, 64))
bulletimg = pygame.transform.scale(pygame.image.load("g:/invaders/square-rounded-512.png"), (16, 16))
# 4 - keep looping through
running = 1
exitcode = 0
invadersmv = 1
# create invaders
for i in range (0, 734, 96):
for j in range (0, 300, 64):
invaders.append([i, j])
while running:
# 5 - clear the screen before drawing it again
movedown=False
#screen.fill(0)
# 6 - draw the screen elements
screen.blit(bgimg, (0, 0))
screen.blit(playerimg, player)
for invader in invaders:
screen.blit(invaderimg, invader)
for invader in invaders:
if invader[0] >= 736:
invadersmv = -1
movedown=True
break
if invader[0] <= 0:
invadersmv = 1
movedown=True
break
for invader in invaders:
invader[0] += invadersmv
if movedown: invader[1] += 2
for bullet in bullets:
screen.blit(bulletimg, bullet)
bullet[1] -= 1
if len(bullets) > 0 and bullets[0][1] <= -16:
bullets.pop(0)
# collision check
destroyedinvaders = []
destroyedbullets = []
for bullet in bullets:
for invader in invaders:
if bullet[0] < invader[0] + 16 and bullet[0] + 64 > invader[0] and bullet[1] < invader[1] + 16 and invader[1] + 16 > bullet[1]:
destroyedbullets.append(bullet)
destroyedinvaders.append(invader)
#print('collision')
bullets = [item for item in bullets if item not in destroyedbullets]
invaders = [item for item in invaders if item not in destroyedinvaders]
# 9 - Move player
## if keys[0]:
## player[1] -= 5
## elif keys[2]:
## player[1] += 5
if keys[1] and player[0] >= 0:
player[0] -= 5
elif keys[3] and player[0] <= 736:
player[0] += 5
# 7 - update the screen
pygame.display.flip()
# 8 - check events
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_w:
keys[0] = True
elif event.key == K_a:
keys[1] = True
elif event.key == K_s:
keys[2] = True
elif event.key == K_d:
keys[3] = True
if event.type == KEYUP:
if event.key == K_w:
keys[0] = False
elif event.key == K_a:
keys[1] = False
elif event.key == K_s:
keys[2] = False
elif event.key == K_d:
keys[3] = False
if event.type == QUIT:
pygame.quit()
exit(0)
if event.type == MOUSEBUTTONDOWN:
#shoot.play()
if len(bullets) < 3: # up to three bullets
bullets.append([player[0]+32, player[1]-32])
| vlna/another-py-invaders | another-py-invaders.py | Python | gpl-3.0 | 3,451 |
""" Configuration and utilities for all the X509 unit tests """
import os
import sys
from datetime import datetime
from pytest import fixture
# We use certificates stored in the same folder as this test file
CERTDIR = os.path.join(os.path.dirname(__file__), "certs")
HOSTCERT = os.path.join(CERTDIR, "host/hostcert.pem")
HOSTKEY = os.path.join(CERTDIR, "host/hostkey.pem")
USERCERT = os.path.join(CERTDIR, "user/usercert.pem")
USERKEY = os.path.join(CERTDIR, "user/userkey.pem")
VOMSPROXY = os.path.join(CERTDIR, "voms/proxy.pem")
ENCRYPTEDKEY = os.path.join(CERTDIR, "key/encrypted_key_pass_0000.pem")
ENCRYPTEDKEYPASS = "0000"
CERTS = (HOSTCERT, USERCERT)
CERTKEYS = (HOSTKEY, USERKEY)
CERTCONTENTS = {
"HOSTCERTCONTENT": """-----BEGIN CERTIFICATE-----
MIIGQTCCBCmgAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwVDEYMBYGA1UECgwPRElS
QUMgQ29tcHV0aW5nMTgwNgYDVQQDDC9ESVJBQyBDb21wdXRpbmcgU2lnbmluZyBD
ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xODA4MjIwOTE4MTdaFw0zNzEwMjEw
OTE4MTdaMDkxGDAWBgNVBAoMD0RpcmFjIENvbXB1dGluZzENMAsGA1UECgwEQ0VS
TjEOMAwGA1UEAwwFVk9Cb3gwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
AQDjV5Y6AQI61nZHy6hjr1MziFFeh/z1DdAgkPfiUnHQLxWtvXGcc4sX/tBcD6tv
NKTzJCwyFVAML0WNTD/w480TUmGILlRtg+17qfSWfeCvDygSbGNINX+la0auEqY7
u5oXtwhFAEnqBe+6pzvgfTpzh8eOtBSrqgJUwMtaI81P6LQn5urIQbJ7hg9HKh9d
AX+mR/mwxDTPpzTP6YT5oiqXE5hRaPAO6ibeGGduyphFiAwVzAV2B5UfB4tL8C/S
eyPX7+70W+paHD7ffJaHLKFQjdA9q7EHRGbm068+aPRmNCKtl1ptgbYquVmp0DiO
5qOSq+LU2v8W5/y8W75DajyqGbJuMdo4zMjCvOafOvHHabOfYrOHcI6MNJx2Z6v/
G0C7mMVwcBPcuLkqtia2uPnzwDcwxVL3wK/uJiHHw3T6odmOE/6KxYM+SJf9weBf
RFW/fCfkWYfEA1FJhncfDZPzwiJnQJTrRls367rwnNLH0VkvxDLOHY7Lhl+j1vwd
dnjONYrKVMttf1IfFN5QdMX2rRrkLX2jZXXaJ4IBeVBWWPVmWj8e892dh2FpzZV8
8XE72y17YRx+uX7x/76p3J9H3vEI0Lj/53q3lxH/W3VRGnbac7tT7kvVoqeUaXc4
AQiIF2tlR2dtjHbOAA3Sl7KCxJBvad8yq7YSm2I58sQN1wIDAQABo4IBNjCCATIw
CQYDVR0TBAIwADAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgU2Vy
dmVyIENlcnRpZmljYXRlMB0GA1UdDgQWBBTLQlHIlgopkniwA7yxCpuQ68gYgTCB
hAYDVR0jBH0we4AUBMIXrzhk4Ia/H8kAbpdvG7tOhx+hWKRWMFQxGDAWBgNVBAoM
D0RJUkFDIENvbXB1dGluZzE4MDYGA1UEAwwvRElSQUMgQ29tcHV0aW5nIFNpZ25p
bmcgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHmCCQCsvNC5K0fF2DAOBgNVHQ8BAf8E
BAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMBsGA1UdEQQUMBKC
BVZPQm94gglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggIBAB38IzhseSjULM80
ZdiG6GlYaOiBGIRglBJJqDeslhhei9upgn35yz64RqMoM4bFWSae0gFCMGNAdV5D
IXUZiTfZIRKqN35zOEZvbAU/t5Hi70ted3DPOAXM4XaghnFGg26ZTB86Z6Dph33Q
JLqNkqU8oaOfl1ET4TDoimpolQI0M82datPlhDe2EkvPjJaclNXKGZ0kX5gquZKK
pTYe+cj/4E7AG9mAQTB9M6XXpx5i/E+NLkGLjCm05QZdblhLmJ4Mjj2iCGMOL/z2
/bhncJYVyceAAFG/fTb2Yk6uXo/yDakq3SfyrOpSy5/bcy5YVcaGOlah74ppB26l
bO/cJWAOcTm6zroLzQteorJDif96EsSJj5fxGKDnSRcg+K+2sA3c+G/395FHn1qK
RRlcNm/yIWySrkUjtbSkZHChSU5vfjwlIq5acV/XtkXJpY7L4scQ0AeFDKdIhbXx
8ajVwBrU/GzyMmw7+p0PVvzNFZSn006D6zI6DRwUcPp/NRNi1oxrnzv1XVZ/MtiW
FNZgz+mnqpakOUAsCGt9YiElVFanmS7iMkqhobt54UlFXhfd+FQyRI2kSrW8kL8e
Is33dZgJZTT/KSsG8e883ISBb5zDeN47pxjU5pF/uhk2/eBY1EwEevpYdQPokY0R
Hia1xkpBKOPRY0BrSGCdEUT5+ict
-----END CERTIFICATE-----
""",
"USERCERTCONTENT": """-----BEGIN CERTIFICATE-----
MIIFszCCA5ugAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwVDEYMBYGA1UECgwPRElS
QUMgQ29tcHV0aW5nMTgwNgYDVQQDDC9ESVJBQyBDb21wdXRpbmcgU2lnbmluZyBD
ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xODA4MjIwOTE1MTRaFw0zNzEwMjEw
OTE1MTRaMDoxGDAWBgNVBAoMD0RpcmFjIENvbXB1dGluZzENMAsGA1UECgwEQ0VS
TjEPMA0GA1UEAwwGTXJVc2VyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
AgEAqfZnf9wK+a+qx8kfRlIaehzD2ix+6TKZJ+w9aBlh11b5cPfmIMOmTEXe8rD5
G6WKofOKNBiQ4vX2tEv7psYpetMwQ9R5ks67RN/YGFkzEEO7jzYFtWsS2jbsdHVf
/2wejICPhABYP1sGaQbRWtcp690fZ97cM1c7AuN/fFZ9m3mAoop5Bc6p1hqWSXyZ
ce/0J+/SjtrLeWY8yvMx4ztR+8wQG+hXEAifnT77zwxeH7pPkwj3IFpRozimTmaP
g0wpwUJXUd8LpPnF6pBeZPMybJ4b4TfoddCXSF/wT7q9UfTKptcoLayFCLp+mNJI
KkKUzm/1CBMFkhenzSP7uhjhu3Swr6SXlz1pEW7B9FFyyghLd7FMEuDIAu8ULqLA
ATFR95p5ec3GbObV4OX4G1Up9f6vDle+qhwkQ81uWxebsaVWveUo38Hsl37dqxB9
IxNOC/nTQu58l3KnLodMOweCmDnzHFrC5V96pYrKOaFj2Ijg6TO5maQHo0hfwiAC
FNIvYDb8AxNmDzOVAAZkd/Y0nbYeaO6/eNJzRiwJGKZMnXC3UpzRmIBenDTVMCjE
O1ZjsXe0hwjS0/sRytZHN1jWztnMuYftu3BLUQJQL0cmkWvPGjXKBd9kHhuYjtZu
+SEyLni+6VXJJCyR7/2kmlkq9UimB+RLA+EemW7Ik0oDI48CAwEAAaOBqDCBpTAJ
BgNVHRMEAjAAMB0GA1UdDgQWBBRKwv3rLMXxY6XyF2JDa52CbJoTJDAfBgNVHSME
GDAWgBQEwhevOGTghr8fyQBul28bu06HHzAOBgNVHQ8BAf8EBAMCBeAwEwYDVR0l
BAwwCgYIKwYBBQUHAwIwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2VuZXJhdGVk
IENsaWVudCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAgEAOe2uEU17UWOU
iDsZWLDVYC820sXcC19ijco9zNDVfCkKzPMKKPlEA56dY/Kt0cWAtiklPOiWEtKy
bsM7ayZ2FEiPdBSd9P8qHYFMlbsXcyib5QXpdHebcipu9ORzp+hlFvTA1fFErDn+
nPW+xTCp19tdlrNywxDWXbB4KJZ/VxSVuT4lMZYn6wUOMFN/xj41evGqqQfJm+yT
feW3n2ClDCDbk3br/3KY8eCPLUllZfdJgnN24SWrS4S0tBuOZt+hTt7LISPSPIix
xXNsxLCXq7KsElIlzPPbMsdqDJ/lhDUoHPZZu9chi4t8F5JGkzcn1MOSmn5d74kx
SYD1QTgvX77t0A1E7G55NYiZJTSjoaIQiQwBNEak7Oz9QCh+5qHwR/Np4vo4+d4p
yuWxpzHHBuQrV6dDZ0mONBWx6gxpkFN42mt8EUd26faG7kebbeVoUt1VBTcp9HHH
DKQq9loodgGokarycFeJ8l+ZMM93YoPPVlsijG6Jmn+UrZNzwbi5JcE731qEurGY
U4kjpzpirauwCnOgSm7DwawNoilLFOSSh3/iZgDjMyhspGJ2FwXBlJm7wBWyS+0q
TnsekqTamuTDTAPJRhb2LPVFl0L8+frk1gkpw4KTCzGw4rKW++EUjS1i09sq2Dv6
/fW/ybqxpROqmyLHbqEExj0/hPxPKPw=
-----END CERTIFICATE-----
""",
}
# This is not just a copy paste of the key file content.
# The key file is an RSA key (PKCS1)
# What PyGSI and M2Crypto will print are PKCS8 format.
# To go from RSA to generic key:
# openssl pkcs8 -topk8 -nocrypt -in privkey.pem
# Look for 'BEGIN RSA PRIVATE KEY' in the link bellow
# https://tls.mbed.org/kb/cryptography/asn1-key-structures-in-der-and-pem
KEYCONTENTS_PKCS8 = {
HOSTKEY: """-----BEGIN PRIVATE KEY-----
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDjV5Y6AQI61nZH
y6hjr1MziFFeh/z1DdAgkPfiUnHQLxWtvXGcc4sX/tBcD6tvNKTzJCwyFVAML0WN
TD/w480TUmGILlRtg+17qfSWfeCvDygSbGNINX+la0auEqY7u5oXtwhFAEnqBe+6
pzvgfTpzh8eOtBSrqgJUwMtaI81P6LQn5urIQbJ7hg9HKh9dAX+mR/mwxDTPpzTP
6YT5oiqXE5hRaPAO6ibeGGduyphFiAwVzAV2B5UfB4tL8C/SeyPX7+70W+paHD7f
fJaHLKFQjdA9q7EHRGbm068+aPRmNCKtl1ptgbYquVmp0DiO5qOSq+LU2v8W5/y8
W75DajyqGbJuMdo4zMjCvOafOvHHabOfYrOHcI6MNJx2Z6v/G0C7mMVwcBPcuLkq
tia2uPnzwDcwxVL3wK/uJiHHw3T6odmOE/6KxYM+SJf9weBfRFW/fCfkWYfEA1FJ
hncfDZPzwiJnQJTrRls367rwnNLH0VkvxDLOHY7Lhl+j1vwddnjONYrKVMttf1If
FN5QdMX2rRrkLX2jZXXaJ4IBeVBWWPVmWj8e892dh2FpzZV88XE72y17YRx+uX7x
/76p3J9H3vEI0Lj/53q3lxH/W3VRGnbac7tT7kvVoqeUaXc4AQiIF2tlR2dtjHbO
AA3Sl7KCxJBvad8yq7YSm2I58sQN1wIDAQABAoICAAtXAhpQlJDkw6+fG/4k76yB
XzWs6NQ8ZSZKtOKoJB8zSgyJh5I7PTPsNO5ypaV9ZcDvC/lPkNeawAhlRkc4xbDy
CgVl8jYoP39MofOjwcJZqjEJEQa4DG7u4+6o5XvTRsNqENKISiePNj8EOntfI7xB
iJW4q9NIPqeFml8brBERVXMsFIf6pvF8ZWSyWDAmc/ySWIUVtGCrQXohds2Q5jj0
9EMTTe4gheHMK9Sd7GyDdb7cl2Ukya5rjOozx97i343U3QF5WD44bHZvW37QnhdL
i5iX6NOo+M0IwBQH3jD+5r/r7cnKj5CgADX1Oez+2iflxQHDDrhQyA2JMftg4Dev
xus6PsNUcsafhIsXlLP1Zx6dq1u3sBUw1s1TMaSP8g611tyiwrNqiaCR+WAd705Q
EGWfp4ddRcuB2BvV6NDQb8Z+A9vTqmEW+yqQdtji9VlH0XcPEu8qwjeSw6IrE7UV
dW/6HWKfRLoV+kajZwPkHHfS97/3T4jWPt3dZrEyT3T3Zno9hLbNFUXfDvvAqjOP
PkOgSMjUl/92J7SOu/fiPHjl4klxmSrG0OE79CKUU3C7a8Id81AYFKgr+3XNUvwJ
ZgjvKsHXDkoka8/y1YYeMEmH7dD4y02hd055mYfTIvYWdDIfaQcxvnCPvV7HUhpb
JMzvx7hveyxsHpRMRgk5AoIBAQD/oed5cl5mwvt3QcenBhvwgzz2uOocs42MPzvp
77RCn5cur80pBTgez1GZFnWBZEu1ygwKsu7l9ES+szlAMs37yD0LbNALTVHNQdbH
KZ7TyzY1vFQXyw730BvyKGVLnRm+/wuWnJuSDPGomATOon+ILDK5ps1NiYLpvbBR
ogAdk+llpIk/sRuoTCVlY9BYfd/XSiHyUEtVzq6CtG75Gqq7/MGEKl1xVDyXip92
6+KNr2CN6+/0lwdVUVWKCJpjrD7Yk4BwOzeGKIIsdNIaG+fl5O9UNe/njb0+joM4
177Lf1oaaBjjHwpqi9q8B78ud0/Jl+xFGB1HOBrHV7n52w8zAoIBAQDjq0TtCByO
HBdwn7Q/6JLMCU475dTs0DKBhbPfyK9GD26BTFccMcp8OuRX4S62Gkvq47s9UKAW
3R4x0ZFAkIHo+kxt9H4Sw8PPWDlVSbb6qf+rOhSPlEeW8nf6BJHreqMaWxTnzr+j
cQRY4O9GvKv3Y/fWqOe/iToQKkhjtnmtGyRVdsRVKkUrW+Ly/oxaxxe9eXOkUTOd
4UXxxSMbic6GJ57HRAfDpNYrnhbIk6JXYeuuArJeFBFmJ8vd0Nwd99y/uQ19kaxb
/F0km2zLI+2S+1j6I6p1dA7G2oA+K54er4jgGAF6guq1F/SVPO545x41YPkxGXNF
qwEz5OCyy5bNAoIBADJoP5ewGLNUwXdbrj3eM4YyqsPP5MIyGbhNA8h2buowRAR9
wAvVrqJMqT9xsUwJdfBr3gICFJ+dkiy0dJaXLgz3CCqHk2KXJYk+8VYme94xlQf1
kfN7JAFztP8EPi0x1lDWQ/e3++lJyiE/kLsaSeGVLY90N8mRUxI6SFlgg3tRnlVf
o3y+tMBz+2/JxdydPZVbVeRNNv29mqXFZJiUTJRzG8mu/OwK+0O6nwU5MFxV98kk
fBWT7mtBdYeZeLAs19unAk2fL6yxsjGH+6IQXKL1iMfnNt5HEckTGwcLa+D+xMqu
OjIW/dvSphgrwuQrvLz4yys4vRU9F/K09sQxEQcCggEBAMxFtXg/mO9hAR8KDD5z
PJNZnhpcIum//DD+d9/IPotL+UiF6Hrhqd5BMPQwlSrK+Wbtoehn2Nvq1da5Q+x8
PDN/sOfPQPcxMxVtATQnCchqk31chWo2Du2+7Cslwo9X39Qb+OvsM0JAezgLymTb
kChOR+cQca8HP1OVvJHK/e11tun/wDTx0lIPBdgk0GX60LAusrWyLe/wWkONL+zb
frQcBHih75143rkQBT0+SaDBuSbOQJ/svZe9CUwiw/0XkbdsIFCUTePS0PexhLHX
sKf6YWE+cwkjcsa08e/WTu8VbGg04c68fD60Gb11iDpulEoskimdvjG6N0AKkhma
VdkCggEBAJC5Byfjk5SMFbH4uIP2yScQAJ3lwrNsxOgnnm6C5vANWqMEsXyH+Qcs
lawDdGUmb0E/7eaYoxgsEq8OUPluZNVFgA3O9iZfyF49G36PvKGRBHtHkiE9n13n
c85Ksre6haNHO4BboojNovPMF0bqvseAoWTPaCYjktBcqB1I8Y/EzApN+zuZQWCQ
vhBLq/cZi5jOwECbR2LMebth521/4C/j2E3Ssy+5uTMlDFQh0yYZnaS8OaecQ0Hc
qRk0GL7AI33fPBBPD7b/Ptc8HHeeB0F61vzIE2ZOJEwLDtHqQr5fZs7Qn9aiN7Nc
CrerHYr0zdgIXTt+xus9RGGmZi1mfjI=
-----END PRIVATE KEY-----
""",
USERKEY: """-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCp9md/3Ar5r6rH
yR9GUhp6HMPaLH7pMpkn7D1oGWHXVvlw9+Ygw6ZMRd7ysPkbpYqh84o0GJDi9fa0
S/umxil60zBD1HmSzrtE39gYWTMQQ7uPNgW1axLaNux0dV//bB6MgI+EAFg/WwZp
BtFa1ynr3R9n3twzVzsC4398Vn2beYCiinkFzqnWGpZJfJlx7/Qn79KO2st5ZjzK
8zHjO1H7zBAb6FcQCJ+dPvvPDF4fuk+TCPcgWlGjOKZOZo+DTCnBQldR3wuk+cXq
kF5k8zJsnhvhN+h10JdIX/BPur1R9Mqm1ygtrIUIun6Y0kgqQpTOb/UIEwWSF6fN
I/u6GOG7dLCvpJeXPWkRbsH0UXLKCEt3sUwS4MgC7xQuosABMVH3mnl5zcZs5tXg
5fgbVSn1/q8OV76qHCRDzW5bF5uxpVa95SjfweyXft2rEH0jE04L+dNC7nyXcqcu
h0w7B4KYOfMcWsLlX3qliso5oWPYiODpM7mZpAejSF/CIAIU0i9gNvwDE2YPM5UA
BmR39jSdth5o7r940nNGLAkYpkydcLdSnNGYgF6cNNUwKMQ7VmOxd7SHCNLT+xHK
1kc3WNbO2cy5h+27cEtRAlAvRyaRa88aNcoF32QeG5iO1m75ITIueL7pVckkLJHv
/aSaWSr1SKYH5EsD4R6ZbsiTSgMjjwIDAQABAoICAC4S8/+/QOJq8pryNJ41h6Pu
xFESmtzQsKAX9JWRu+pKU5iCO0pKf3xRvJyBySXrfGdmw+JXfn9oOhaqOm/9bCU1
tvHMWaColi+XltcS5zrTgbbS6D1D53psRTFU2E8/mhBwkXcxOLsEC/rQtFQx29Vq
vibETWFFlmO0FE06jRZmm650Z1ZhrbyyvGbzdg1jBQcGhkffnCUux/AkeTOmUxU1
PnCyTVe1Xr+b4VtBeQqU0RmE5qlIkrTymHLMbr8jGHaha1ZwZpG0fCiYNl6bZuH3
AovNQiEeCMS/7T9P2h6rg3wy+1tWV0IEfGklKBb8saY8x2oG7g2qh/yecpECSb68
Cauh18mXJ5JsT6P8dwDoxTxR1/lImvOU2Nys7T7nEhXrls1Dc0tv6Emi37hNwihn
vAnzXYx0MwIh0N85LrdbRtVM+dis2LLpDScVt9CHS+Vl0+qO9fsgDnUKYYGONYq+
MHjtDdTMB0DhxTNjaWOU0J1RgmlAFV63lx7iWs0twH44Fbylo6DYYkAiNGOUvpKD
7GNz/aooEtrTf/3GnHoB2UBdvsmI8RZ7TSXCsoCkldQRsJJnzjo5fxTyH8ufCeEh
Umw+lmK2OFldkPSrVL8eBPV8QTECbJOyFQC8IpVy/QnJhZlDmgrOJAVtl6xjkaEf
qPV2sLruhNBqxh2zgsMhAoIBAQDfXwQBa+sf1J6oOo872ev68aQ5Zx5bZWV6Vke/
sxjab/GiZ44A33TRTUpIermdR3zJ5D0B9vh6IW6tbuU3wBgJACjs9VjWFfb5T46M
Z5FNtN3zNvxJ9YhbQ2RJc4GRzCNcGAquDecD9xUk91k9kI07UZKUIDywGA2OGKra
USRdS8LqAfpAxANu3JvinlqTQFfOxT3AZY03UWmXJI9xXtgxX1KLB+46Luy5GIWs
/BNFi1Nk12OHql19woMKpx4iw89cA3S26FjViuGX0g9domT+biatPNan96Refp4s
/jTHOFZ4HuhmWGugb1J9yhcHEZp9XreUtbrm8Xm++16f9bdJAoIBAQDCyir3lw94
X0EfNE4dMO0KlQiYXobTxQ7y0aZdL9a58t5C0Pq5nTyvX7NcIyk2KcxhMjJDJC1M
mVmQz2dvb3aJt+VKhVl2q0H/qSRI2Rp5QB5o7BlpszVkMt5CP36HZE7xz1LXZ+74
WMEsePkbn1GrRts/QsAy3iqmoBsy/fq8rqU3tXaajAzORb3KFNKkbdBX7nXnS8v+
xizWccKMTf0QuaLiC/Wcdi9vPB4UQogpa8vpAl8gM5YqaDs94eVpSv23UMhNrvAg
V3tn7FNSQNh+ugnLBwNqwam95fBMteGUh4HapnoEDlOezE7qUwGAaTswk5TnxiON
VIjpQlk2VkwXAoIBAQC1l4orGbABpZoCS/EsCCMXVKFc5V9BkDIqfcBAsXowAzfe
/u7r+L4AdiRAvjzuBzME8t9CHKSurUVMC86fPzSLBK1AzskU6rBoyGur63quQK77
ziTWf50GDMiYCiY5AEty0DzGeZjomVOARPIw4bZflhZjA74yrqs+bQFhEPxOOIxS
L59iTbg4xXKZjoE2GuYHvERSiHyAj1gXPuq6kQ+TO9pgGudqN8HNTIlIM3n7XKRE
Y/KPVUpCNgLQg0I1oxiNxmV5WXT2zbxO77/8MEyIp8Ybqk0cKnBfPfKbw2Hm3/80
EnR+171PpZDboJKN9Zqx93GpnQBARenjAHpR8rG5AoIBAH1JnbNchUXONsvET830
zzJ0Q3AFtMD3SbMi59eeUoWN0im10t6aZRMEAhBsSTCeV+fYan3HAh/3rqU20ffa
AKt6DdANz0pFwxCXEVCN27pLZIPmAD59VwUYtt5zioW5HhHoYQdNwWYZaD6bnNaI
dfYtgA3DeG3/ef1sk7ILrD+6MWiQnjWviPkP4I/fLtE2FMDKDynzFcXMX8CasSCf
dPtR+5NbT+IQHlh0mYA8funtfN1lehvzMk4adqhJ6M39vw0ut3dH4wlaW3Svi7Qn
I1j3fh8JZsg+wlfzUsl0XyCyu/IQDAEZ2e0UyllrhFa82KZY9njRd8KKsfkehNUv
UocCggEAGFGpLq8flL4lU4AnetR5Gs2BFaHBeqyGL1pWY1oPgF8jE/aNafIDs6Nq
wMBIOQmekhEOxBf9Ti9qJDaTkTNyIiPFYS3/sm+thfqJFVMZX8LKnjSntSCp/pGD
YELJ+GOYwOnqcni7psF4+cvxQmRkI1LHpIwiUOMniwcfPVCtoEHdJ5Pn0jFFkcAV
VPWLyXcPH0WpgklFGvCNvvVthRkZTuT4Zy2QXgP6dfIK/2UAUDE6Uk1odkNyAtw9
d2tkfZjxzb8djGdcmTCbVzyRdkkhRsp/grQbg+qXfmiTlAyPE3uB5VFPJYcx5gJL
oYjpqlB4Kj08eIAI5vcWnt/RcE1tLw==
-----END PRIVATE KEY-----
""",
}
# This contains the attributes of the certificates in order to be compared in the tests
# If they are the same, they are directly at the root, otherwise,
# they are in subdirectory
CERT_ATTRS = {
# Just take the date, it is the same for both
"endDate": datetime.strptime("2037-10-21", "%Y-%m-%d").date(),
"startDate": datetime.strptime("2018-08-22", "%Y-%m-%d").date(),
"issuerDN": "/O=DIRAC Computing/CN=DIRAC Computing Signing Certification Authority",
HOSTCERT: {
"subjectDN": "/O=Dirac Computing/O=CERN/CN=VOBox",
"serial": 4098,
"availableExtensions": [
"authorityKeyIdentifier",
"basicConstraints",
"extendedKeyUsage",
"keyUsage",
"nsComment",
"subjectAltName",
"subjectKeyIdentifier",
],
"basicConstraints": "CA:FALSE",
"subjectAltName": "DNS:VOBox, DNS:localhost",
"extendedKeyUsage": "TLS Web Server Authentication, TLS Web Client Authentication",
"content": CERTCONTENTS["HOSTCERTCONTENT"],
"keyFile": HOSTKEY,
},
USERCERT: {
"subjectDN": "/O=Dirac Computing/O=CERN/CN=MrUser",
"serial": 4097,
"availableExtensions": [
"authorityKeyIdentifier",
"basicConstraints",
"extendedKeyUsage",
"keyUsage",
"nsComment",
"subjectKeyIdentifier",
],
"basicConstraints": "CA:FALSE",
"subjectAltName": "DNS:VOBox, DNS:localhost",
"extendedKeyUsage": "TLS Web Client Authentication",
"content": CERTCONTENTS["USERCERTCONTENT"],
"keyFile": USERKEY,
},
}
VOMS_PROXY_ATTR = {
"notBefore": datetime(2018, 10, 23, 9, 11, 44),
"notAfter": datetime(2024, 7, 6, 17, 11, 44),
"fqan": ["/fakevo/Role=user/Capability=NULL"],
"vo": "fakevo",
"subject": "/O=Dirac Computing/O=CERN/CN=MrUser",
"issuer": "/O=Dirac Computing/O=CERN/CN=VOBox",
}
def getCertOption(cert, optionName):
"""Return a given option of a given certificate, taken from CERT_ATTRS
:param cert: effectively, path to the certificate in question
:param optionName: name of the options
:returns: the option
"""
if optionName in CERT_ATTRS:
return CERT_ATTRS[optionName]
return CERT_ATTRS[cert][optionName]
def deimportDIRAC():
"""clean all what has already been imported from DIRAC.
This method is extremely fragile, but hopefully, we can get ride of all these
messy tests soon, when PyGSI has gone.
"""
if len(X509CHAINTYPES) != 1 or len(X509REQUESTTYPES) != 1:
raise NotImplementedError(
"This no longer de-imports DIRAC, if we want to test another SSL wrapper "
"we will have to find another way of doing this or run a separate pytest "
"process again"
)
# for mod in list(sys.modules):
# # You should be careful with what you remove....
# if (mod == 'DIRAC' or mod.startswith('DIRAC.')) and not mod.startswith('DIRAC.Core.Security.test'):
# sys.modules.pop(mod)
X509CHAINTYPES = ("M2_X509Chain",)
# This fixture will return a pyGSI or M2Crypto X509Chain class
# https://docs.pytest.org/en/latest/fixture.html#automatic-grouping-of-tests-by-fixture-instances
@fixture(scope="function", params=X509CHAINTYPES)
def get_X509Chain_class(request):
"""Fixture to return either the X509Certificate class.
It also 'de-import' DIRAC before and after
"""
# Clean before
deimportDIRAC()
x509Class = request.param
if x509Class == "M2_X509Chain":
from DIRAC.Core.Security.m2crypto.X509Chain import X509Chain
else:
raise NotImplementedError()
yield X509Chain
# Clean after
deimportDIRAC()
X509REQUESTTYPES = ("M2_X509Request",)
# This fixture will return a X509Request class
# https://docs.pytest.org/en/latest/fixture.html#automatic-grouping-of-tests-by-fixture-instances
@fixture(scope="function", params=X509REQUESTTYPES)
def get_X509Request(request):
"""Fixture to return either the X509Request instance.
It also 'de-import' DIRAC before and after
"""
# Clean before
deimportDIRAC()
x509Class = request.param
if x509Class == "M2_X509Request":
from DIRAC.Core.Security.m2crypto.X509Request import X509Request
else:
raise NotImplementedError()
def _generateX509Request():
"""Instanciate the object
:returns: an X509Request instance
"""
return X509Request()
yield _generateX509Request
# Clean after
deimportDIRAC()
def get_X509Chain_from_X509Request(x509ReqObj):
"""This returns an X509Chain class from the same "type" as the X509Request
object given as param
:param x509ReqObj: instance of a X509Request object
:returns: X509Chain class
"""
# In principle, we should deimport Dirac everywhere, but I am not even sure it makes any difference
if "m2crypto" in x509ReqObj.__class__.__module__:
from DIRAC.Core.Security.m2crypto.X509Chain import X509Chain
else:
raise NotImplementedError()
return X509Chain
| DIRACGrid/DIRAC | src/DIRAC/Core/Security/test/x509TestUtilities.py | Python | gpl-3.0 | 17,182 |
# -*- coding: utf-8 -*-
# * Copyright (C) 2012-2014 Croissance Commune
# * Authors:
# * Arezki Feth <[email protected]>;
# * Miotte Julien <[email protected]>;
# * TJEBBES Gaston <[email protected]>
#
# This file is part of Autonomie : Progiciel de gestion de CAE.
#
# Autonomie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autonomie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autonomie. If not, see <http://www.gnu.org/licenses/>.
"""
Base tools for administrable options
"""
from sqlalchemy import (
Column,
Integer,
String,
Boolean,
ForeignKey,
)
from sqlalchemy.util import classproperty
from sqlalchemy.sql.expression import func
from autonomie_base.utils.ascii import camel_case_to_name
from autonomie_base.models.base import (
DBBASE,
default_table_args,
DBSESSION,
)
from autonomie.forms import (
get_hidden_field_conf,
EXCLUDED,
)
class ConfigurableOption(DBBASE):
"""
Base class for options
"""
__table_args__ = default_table_args
id = Column(
Integer,
primary_key=True,
info={'colanderalchemy': get_hidden_field_conf()}
)
label = Column(
String(100),
info={'colanderalchemy': {'title': u'Libellé'}},
nullable=False,
)
active = Column(
Boolean(),
default=True,
info={'colanderalchemy': EXCLUDED}
)
order = Column(
Integer,
default=0,
info={'colanderalchemy': get_hidden_field_conf()}
)
type_ = Column(
'type_',
String(30),
nullable=False,
info={'colanderalchemy': EXCLUDED}
)
@classproperty
def __mapper_args__(cls):
name = cls.__name__
if name == 'ConfigurableOption':
return {
'polymorphic_on': 'type_',
'polymorphic_identity': 'configurable_option'
}
else:
return {'polymorphic_identity': camel_case_to_name(name)}
@classmethod
def query(cls, *args):
query = super(ConfigurableOption, cls).query(*args)
query = query.filter(ConfigurableOption.active == True)
query = query.order_by(ConfigurableOption.order)
return query
def __json__(self, request):
return dict(
id=self.id,
label=self.label,
active=self.active,
)
def move_up(self):
"""
Move the current instance up in the category's order
"""
order = self.order
if order > 0:
new_order = order - 1
self.__class__.insert(self, new_order)
def move_down(self):
"""
Move the current instance down in the category's order
"""
order = self.order
new_order = order + 1
self.__class__.insert(self, new_order)
@classmethod
def get_next_order(cls):
"""
:returns: The next available order
:rtype: int
"""
query = DBSESSION().query(func.max(cls.order)).filter_by(active=True)
query = query.filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
)
query = query.first()
if query is not None and query[0] is not None:
result = query[0] + 1
else:
result = 0
return result
@classmethod
def _query_active_items(cls):
"""
Build a query to collect active items of the current class
:rtype: :class:`sqlalchemy.Query`
"""
return DBSESSION().query(cls).filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
).filter_by(active=True)
@classmethod
def insert(cls, item, new_order):
"""
Place the item at the given index
:param obj item: The item to move
:param int new_order: The new index of the item
"""
query = cls._query_active_items()
items = query.filter(cls.id != item.id).order_by(cls.order).all()
items.insert(new_order, item)
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
@classmethod
def reorder(cls):
"""
Regenerate order attributes
"""
items = cls._query_active_items().order_by(cls.order).all()
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
def get_id_foreignkey_col(foreignkey_str):
"""
Return an id column as a foreignkey with correct colander configuration
foreignkey_str
The foreignkey our id is pointing to
"""
column = Column(
"id",
Integer,
ForeignKey(foreignkey_str),
primary_key=True,
info={'colanderalchemy': get_hidden_field_conf()},
)
return column
| CroissanceCommune/autonomie | autonomie/models/options.py | Python | gpl-3.0 | 5,322 |
import json
from mflow_nodes.processors.base import BaseProcessor
from mflow_nodes.stream_node import get_processor_function, get_receiver_function
from mflow_nodes.node_manager import NodeManager
def setup_file_writing_receiver(connect_address, output_filename):
"""
Setup a node that writis the message headers into an output file for later inspection.
:param connect_address: Address the node connects to.
:param output_filename: Output file.
:return: Instance of ExternalProcessWrapper.
"""
# Format the output file.
with open(output_filename, 'w') as output_file:
output_file.write("[]")
def process_message(message):
with open(output_filename, 'r') as input_file:
test_data = json.load(input_file)
test_data.append(message.get_header())
with open(output_filename, 'w') as output:
output.write(json.dumps(test_data, indent=4))
processor = BaseProcessor()
processor.process_message = process_message
receiver = NodeManager(processor_function=get_processor_function(processor=processor,
connection_address=connect_address),
receiver_function=get_receiver_function(connection_address=connect_address),
processor_instance=processor)
return receiver
| datastreaming/mflow_nodes | tests/helpers.py | Python | gpl-3.0 | 1,389 |
#!/usr/bin/env python3
# Copyright 2016, 2017 Andrew Conrad
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tool to initialize Thrustmaster racing wheels."""
import argparse
import time
import tmdrv_devices
import usb1
from importlib import import_module
from os import path
from subprocess import check_call, CalledProcessError
device_list = ['thrustmaster_t500rs', 'thrustmaster_tmx', 'thrustmaster_tx', 'thrustmaster_tsxw']
_context = usb1.USBContext()
def initialize(device_name='thrustmaster_tx'):
try:
device = import_module('tmdrv_devices.' + device_name)
except ModuleNotFoundError:
print('Device name "' + device_name + '" is invalid.')
raise
try:
device
except UnboundLocalError:
print('Device name "' + device_name + '" is invalid.')
raise
# Send all control packets for initialization
for m in device.control:
try:
_control_init(
device.idVendor, device.idProduct[m['step'] - 1],
m['request_type'],
m['request'],
m['value'],
m['index'],
m['data'],
)
except usb1.USBErrorNotFound:
print('Error getting handle for device {:0=4x}:{:0=4x} ({} Step {}).'.format(device.idVendor, device.idProduct[m['step']-1], device.name, m['step']))
raise
except usb1.USBErrorNoDevice:
# Caught when device switches modes
pass
except usb1.USBErrorPipe:
# Possibly caught when device switches modes on older libusb
pass
except usb1.USBErrorIO:
# Possibly caught when device switches modes on newer
# libusb. This still has to be investigated, there might
# be another issue going on here.
pass
# Wait for device to switch
connected = False
while not connected:
handle = _context.openByVendorIDAndProductID(
device.idVendor, device.idProduct[m['step']],
)
if handle is not None:
connected = True
# Load configuration to remove deadzones
if device.jscal is not None:
dev_path = '/dev/input/by-id/' + device.dev_by_id
# Sometimes the device symlink is not ready in time, so we wait
n = 9
while not path.islink(dev_path):
if n > 0:
time.sleep(.5)
n -= 1
else:
print('Device "{}" not found, skipping device calibration'.format(dev_path))
raise FileNotFoundError
_jscal(device.jscal, dev_path)
def _jscal(configuration, device_file):
try:
check_call(['jscal', '-s', configuration, device_file])
except FileNotFoundError:
print('jscal not found, skipping device calibration.')
except CalledProcessError as err:
print('jscal non-zero exit code {}, device may not be calibrated'.format(str(err)[-1]))
def _control_init(idVendor, idProduct, request_type, request, value, index, data):
handle = _context.openByVendorIDAndProductID(
idVendor, idProduct,
)
if handle is None:
raise usb1.USBErrorNotFound('Device not found or wrong permissions')
handle.setAutoDetachKernelDriver(True)
handle.claimInterface(0)
# Send control packet that will switch modes
handle.controlWrite(
request_type,
request,
value,
index,
data,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--device', default='thrustmaster_tx',
help='Specify device to use')
parser.add_argument('-D', '--supported-devices', action='store_true',
help='List all supported devices')
args = parser.parse_args()
if args.supported_devices:
for d in device_list:
print(d)
else:
initialize(args.device)
| her001/tmdrv | tmdrv.py | Python | gpl-3.0 | 3,992 |
Subsets and Splits