repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
starbt/flea_market | market/migrations/0006_auto_20161206_2033.py | 1 | 1232 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-06 12:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('market', '0005_auto_20161206_1204'),
]
operations = [
migrations.AddField(
model_name='goods',
name='discount',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='goods',
name='goods_phone',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='goods',
name='goods_qq',
field=models.IntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name='goods',
name='picture_url',
field=models.CharField(blank=True, max_length=128),
),
migrations.AlterField(
model_name='userprofile',
name='picture_url',
field=models.CharField(blank=True, default='http://ershou.u.qiniudn.com/Android_1480732854630_186265.jpg?imageView2/5/w/800/h/800', max_length=128),
),
]
| mit | -7,127,938,892,819,470,000 | 29.8 | 160 | 0.571429 | false | 3.911111 | false | false | false |
UbiCastTeam/touchwizard | touchwizard/canvas.py | 1 | 16655 | # -*- coding: utf-8 -*
import clutter
import gobject
import easyevent
import logging
import os
import time
from touchwizard.loading import LoadingWidget
logger = logging.getLogger('touchwizard')
class Canvas(clutter.Actor, clutter.Container, easyevent.User):
"""Wizard main actor which manages the user interface and pages.
Listen for event:
- next_page (page_name)
Request for a new page identified by its name passed as content.
The current page becomes in top of the page history.
- previous_page
Request for displaying back the top of the page history. No content
expected. If the history is empty, quit the wizard.
- request_quit
Request for quitting the wizard. Call prepare_quit callback
if it exists and there launch the wizard_quit which should
be handled by the user main script.
Launch the event:
- wizard_quit
Sent after prepare_quit callback to notify the main script that it
can end the process.
"""
__gtype_name__ = 'Canvas'
# infobar_height = 104
# iconbar_height = 200
def __init__(self, first_page):
import touchwizard
clutter.Actor.__init__(self)
easyevent.User.__init__(self)
self.session = touchwizard.Session()
self.background = None
self.last_page_name = None
self.last_page_timestamp = None
self.previous_page_locked = False
self.previous_page_timeout_id = None
if touchwizard.canvas_bg:
if not os.path.exists(touchwizard.canvas_bg):
logger.error('Canvas background %s not found.', touchwizard.canvas_bg)
self.background = clutter.Texture(touchwizard.canvas_bg)
self.background.set_parent(self)
self.infobar = touchwizard.InfoBar()
self.infobar.set_parent(self)
self.iconbar = touchwizard.IconBar()
self.iconbar.set_parent(self)
self.loading = LoadingWidget()
self.loading.set_parent(self)
self.loading.hide()
self.loading_padding = 10
self.home_icon = touchwizard.Icon('shutdown')
self.home_icon.build()
self.previous_icon = touchwizard.IconRef(touchwizard.Icon('previous'))
# self.previous_icon.build()
easyevent.forward_event('icon_previous_actioned', 'previous_page')
self.history = list()
self.first_page = first_page
self.available_pages = dict()
self.current_page = None
self.register_event('next_page', 'previous_page', 'refresh_page', 'clear_history')
self.register_event('request_quit')
gobject.idle_add(self.lookup_pages)
gobject.idle_add(self.display_page, first_page)
def lookup_pages(self):
import touchwizard
origin = ''
path = touchwizard.page_path
if path is None:
if self.first_page is None:
return tuple()
self.available_pages[self.first_page.name] = self.first_page
import sys
origin = sys.modules[self.first_page.__module__].__file__
path = os.path.dirname(os.path.abspath(os.path.expanduser(origin)))
import imp
for f in os.listdir(path):
if f.endswith('.py') and f != os.path.basename(origin):
try:
module = imp.load_source(f[:-3], os.path.join(path, f))
except:
import traceback
logger.error('Cannot import page %s:\n%s', f[:-3], traceback.format_exc())
if not touchwizard.tolerant_to_page_import_error:
import sys
sys.exit(1)
continue
for attr_name in dir(module):
if attr_name.startswith('__'):
continue
attribute = getattr(module, attr_name)
if isinstance(attribute, type) \
and issubclass(attribute, touchwizard.Page) \
and attribute is not touchwizard.Page:
self.available_pages[attribute.name] = attribute
logger.info('%d pages found.', len(self.available_pages))
# print self.available_pages
def display_page(self, page, icons=None):
if isinstance(page, type):
self.current_page = page(self.session)
if self.current_page.reuse:
logger.info('Storing reusable page %s in cache.', self.current_page.name)
self.available_pages[self.current_page.name] = self.current_page
else:
self.current_page = page
logger.info('Reusing already instanciated page %s from cache.', self.current_page.name)
os.environ["TOUCHWIZARD_CURRENT_PAGE"] = self.current_page.name
os.environ.pop("TOUCHWIZARD_REQUESTED_PAGE", None)
if page.need_loading:
self.loading.hide()
self._build_iconbar(icons)
self.current_page.panel.set_parent(self)
self.current_page.panel.lower_bottom()
if hasattr(self.current_page.panel, 'prepare') and callable(self.current_page.panel.prepare):
self.current_page.panel.prepare()
self.current_page.panel.show()
self.previous_page_locked = False
self.last_page_name = page.name
def _build_iconbar(self, icons):
import touchwizard
self.iconbar.clear()
if icons is not None:
# cached icons
previous_icon = icons[0]
next_icon = icons[-1]
icons = icons[1:-1]
else:
# uninstanciated icons
icons = self.current_page.icons
previous_icon = self.current_page.previous
next_icon = self.current_page.next
# Icon "previous"
self.home_icon.unregister_all_events()
if previous_icon is None:
if self.history:
last_page, last_icons = self.history[-1]
previous_icon = last_page.my_icon
if previous_icon is None:
previous_icon = self.previous_icon
else:
self.home_icon.register_events()
previous_icon = self.home_icon
condition = True
if isinstance(previous_icon, touchwizard.IconRef):
if callable(previous_icon.condition):
condition = previous_icon.condition()
else:
condition = previous_icon.condition
previous_icon = previous_icon.get_icon()
if condition:
previous_icon.build()
self.iconbar.set_previous(previous_icon)
# Icon "next"
condition = True
if next_icon is not None:
if isinstance(next_icon, touchwizard.IconRef):
if callable(next_icon.condition):
condition = next_icon.condition()
else:
condition = next_icon.condition
next_icon = next_icon.get_icon()
if condition:
next_icon.build()
self.iconbar.set_next(next_icon)
# Other icons
for icon in icons:
if isinstance(icon, touchwizard.IconRef):
if callable(icon.condition):
condition = icon.condition()
else:
condition = icon.condition
if not condition:
continue
icon = icon.get_icon()
icon.build()
self.iconbar.append(icon)
def evt_next_page(self, event):
if self.last_page_name is None or self.last_page_name != event.content:
gobject.timeout_add(100, self.do_next_page, event, priority=gobject.PRIORITY_HIGH)
self.unregister_event('next_page')
def do_next_page(self, event):
now = time.time()
name = event.content
if not self.last_page_timestamp or (now - self.last_page_timestamp) > 0.5:
logger.info('Page %r requested.', name)
os.environ["TOUCHWIZARD_REQUESTED_PAGE"] = name
self.current_page.panel.hide()
self.current_page.panel.unparent()
icon_states = self.iconbar.get_icon_states()
self.history.append((self.current_page, icon_states))
new_page = self.available_pages[name]
self.iconbar.clear(keep_back=True)
if new_page.need_loading:
self.loading.show()
gobject.idle_add(self.display_page, new_page)
else:
logger.warning('Page %s requested too quickly twice in a row (less than 500ms), not displaying', name)
self.register_event('next_page')
self.last_page_timestamp = now
def evt_previous_page(self, event):
if not self.previous_page_locked:
self.previous_page_locked = True
if self.previous_page_timeout_id is not None:
gobject.source_remove(self.previous_page_timeout_id)
self.previous_page_timeout_id = gobject.timeout_add(300, self.do_previous_page, event, priority=gobject.PRIORITY_HIGH)
def do_previous_page(self, event):
name = None
if event.content:
name = event.content
for page, icons in self.history[::-1]:
try:
previous, icons = self.history.pop()
except IndexError:
# logger.error('Previous page requested but history is empty.')
self.evt_request_quit(event)
return
logger.info('Back to %r page.', previous.name)
os.environ["TOUCHWIZARD_REQUESTED_PAGE"] = previous.name
self.current_page.panel.hide()
gobject.idle_add(self.current_page.panel.unparent)
if previous.need_loading:
self.loading.show()
if not self.current_page.reuse:
gobject.idle_add(self.current_page.panel.destroy)
if name is None or page.name == name:
break
self.current_page = page
gobject.idle_add(self.display_page, previous, icons)
def evt_refresh_page(self, event):
gobject.idle_add(self.do_refresh_page, event)
self.unregister_event('refresh_page')
def do_refresh_page(self, event):
name = self.current_page.name
logger.info('Page %r refresh requested.', name)
self.current_page.panel.hide()
self.current_page.panel.unparent()
gobject.idle_add(self.current_page.panel.destroy)
new_page = self.available_pages[name]
self.iconbar.clear(keep_back=True)
if new_page.need_loading:
self.loading.show()
gobject.idle_add(self.display_page, new_page)
self.register_event('refresh_page')
def evt_clear_history(self, event):
for page, icons in self.history:
gobject.idle_add(page.panel.destroy)
self.history = list()
def evt_request_quit(self, event):
self.evt_request_quit = self.evt_request_quit_fake
logger.info('Quit requested.')
try:
prepare_quit = getattr(self.current_page, "prepare_quit", None)
if prepare_quit:
if not callable(prepare_quit):
prepare_quit = getattr(self.current_page.panel, prepare_quit, None)
if callable(prepare_quit):
logger.info('prepare_quit callback found')
prepare_quit()
except Exception, e:
logger.warning("Failed to call prepare_quit method in page %s: %s", self.current_page, e)
self.launch_event('wizard_quit')
def evt_request_quit_fake(self, event):
logger.error('Quit request rejected.')
def evt_request_session(self, event):
self.launch_event('dispatch_session', self.session)
def evt_update_session(self, event):
self.session.update(event)
self.launch_event('dispatch_session', self.session)
def do_remove(self, actor):
logger.info.debug('Panel "%s" removed.', actor.__name__)
def do_get_preferred_width(self, for_height):
import touchwizard
width = float(touchwizard.canvas_width)
return width, width
def do_get_preferred_height(self, for_width):
import touchwizard
height = float(touchwizard.canvas_height)
return height, height
def do_allocate(self, box, flags):
canvas_width = box.x2 - box.x1
canvas_height = box.y2 - box.y1
infobar_height = round(self.infobar.get_preferred_height(canvas_width)[1])
infobar_box = clutter.ActorBox()
infobar_box.x1 = 0
infobar_box.y1 = 0
infobar_box.x2 = canvas_width
infobar_box.y2 = infobar_height
self.infobar.allocate(infobar_box, flags)
iconbar_height = round(self.iconbar.get_preferred_height(canvas_width)[1])
iconbar_box = clutter.ActorBox()
iconbar_box.x1 = 0
iconbar_box.y1 = canvas_height - iconbar_height
iconbar_box.x2 = canvas_width
iconbar_box.y2 = canvas_height
self.iconbar.allocate(iconbar_box, flags)
loading_box = clutter.ActorBox()
loading_box.x1 = self.loading_padding
loading_box.y1 = infobar_height + self.loading_padding
loading_box.x2 = canvas_width - self.loading_padding
loading_box.y2 = canvas_height - iconbar_height - self.loading_padding
self.loading.allocate(loading_box, flags)
panel_box = clutter.ActorBox()
panel_box.x1 = 0
panel_box.y1 = infobar_height
panel_box.x2 = canvas_width
panel_box.y2 = canvas_height - iconbar_height
if self.background is not None:
self.background.allocate(panel_box, flags)
if self.current_page is not None:
self.current_page.panel.allocate(panel_box, flags)
clutter.Actor.do_allocate(self, box, flags)
def do_foreach(self, func, data=None):
children = [self.infobar, self.iconbar, self.loading]
if self.background:
children.append(self.background)
if self.current_page:
children.append(self.current_page.panel)
for child in children:
func(child, data)
def do_paint(self):
if self.background:
self.background.paint()
self.iconbar.paint()
if self.current_page:
self.current_page.panel.paint()
self.infobar.paint()
self.loading.paint()
def do_pick(self, color):
self.do_paint()
def quick_launch(page, width=None, height=None, overlay=None, main_loop_run_cb=None, main_loop_stop_cb=None):
if not logging._handlers:
# Install a default log handler if none set
import sys
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)s %(message)s',
stream=sys.stderr)
logger.info('Initializing touchwizard app.')
import touchwizard
stage = clutter.Stage()
if width == None and height == None:
width = touchwizard.canvas_width
height = touchwizard.canvas_height
else:
touchwizard.canvas_width = width
touchwizard.canvas_height = height
stage.set_size(width, height)
if page is not None:
stage.set_title(page.title)
canvas = Canvas(page)
stage.add(canvas)
if overlay is not None:
logger.info('Adding overlay %s', overlay)
stage.add(overlay)
overlay.show()
stage.show()
main_loop_name = 'External'
if main_loop_run_cb is None:
main_loop_run_cb = clutter.main
main_loop_name = 'Clutter'
if main_loop_stop_cb is None:
main_loop_stop_cb = clutter.main_quit
def quit(*args):
logger.info('Quitting %s main loop by stage destroy', main_loop_name)
main_loop_stop_cb()
import sys
gobject.timeout_add_seconds(2, sys.exit)
stage.connect('destroy', quit)
class Quitter(easyevent.Listener):
def __init__(self):
easyevent.Listener.__init__(self)
self.register_event('wizard_quit')
def evt_wizard_quit(self, event):
logging.info('Quitting %s main loop by touchwizard button', main_loop_name)
main_loop_stop_cb()
import sys
gobject.timeout_add_seconds(2, sys.exit)
Quitter()
logger.info('Running %s main loop.', main_loop_name)
main_loop_run_cb()
if __name__ == '__main__':
quick_launch(None)
| gpl-3.0 | 398,874,673,450,197,570 | 36.093541 | 130 | 0.593215 | false | 3.969256 | false | false | false |
AnthillTech/python-mewa-client | examples/main.py | 1 | 1354 | '''
Created on 27 lip 2014
@author: Krzysztof Langner
'''
from mewa.client import Connection
HOST_URL = "ws://mewa.cc:9001/ws"
# HOST_URL = "ws://localhost:9000/ws"
connection = Connection(HOST_URL)
def onConnected():
connection.getDevices()
connection.sendEvent("serviceA.event2", "78", True)
params = [{"type": "org.fi24.switch", "name": "switch2"}, {"type": "org.fi24.switch", "name": "switch1"}, {"type": "org.fi24.switch", "name": "switch0"}]
connection.sendMessage("device66", "serviceA.level", params)
def onEvent(timestamp, fromDevice, eventId, params):
print("received event %s from %s with params %s" % (eventId, fromDevice, params))
def onMessage(timestamp, fromDevice, msgId, params):
print(timestamp + ": received message %s from %s with params %s" % (timestamp, msgId, fromDevice, params))
def onDevicesEvent(timestamp, devices):
print(timestamp + ": Found devices:")
print(devices)
def onError(reason):
print("Error: " + reason)
def onAck():
print("ACK")
if __name__ == "__main__":
connection.onConnected = onConnected
connection.onEvent = onEvent
connection.onMessage = onMessage
connection.onDevicesEvent = onDevicesEvent
connection.onError = onError
connection.onAck = onAck
connection.connect("admin.test", "python", "l631vxqa", [""])
| bsd-2-clause | 3,212,027,003,129,120,000 | 27.208333 | 157 | 0.669129 | false | 3.351485 | false | false | false |
rickerc/cinder_audit | cinder/tests/db/test_finish_migration.py | 1 | 2226 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for finish_volume_migration."""
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests import utils as testutils
class FinishVolumeMigrationTestCase(test.TestCase):
"""Test cases for finish_volume_migration."""
def setUp(self):
super(FinishVolumeMigrationTestCase, self).setUp()
def tearDown(self):
super(FinishVolumeMigrationTestCase, self).tearDown()
def test_finish_volume_migration(self):
ctxt = context.RequestContext(user_id='user_id',
project_id='project_id',
is_admin=True)
src_volume = testutils.create_volume(ctxt, host='src',
migration_status='migrating',
status='available')
dest_volume = testutils.create_volume(ctxt, host='dest',
migration_status='target:fake',
status='available')
db.finish_volume_migration(ctxt, src_volume['id'],
dest_volume['id'])
src_volume = db.volume_get(ctxt, src_volume['id'])
expected_name = 'volume-%s' % dest_volume['id']
self.assertEqual(src_volume['_name_id'], dest_volume['id'])
self.assertEqual(src_volume['name'], expected_name)
self.assertEqual(src_volume['host'], 'dest')
self.assertEqual(src_volume['status'], 'available')
self.assertEqual(src_volume['migration_status'], None)
| apache-2.0 | 1,801,730,165,042,376,000 | 41 | 78 | 0.609164 | false | 4.533605 | true | false | false |
miguelut/utmbu | mbu/api/scout.py | 1 | 1603 | from django.contrib.admin.views.decorators import staff_member_required
from django.http import JsonResponse
from django.contrib.auth.decorators import permission_required
from rest_framework.decorators import api_view
from mbu.models import Scout, ScoutCourseInstance, ScoutCourseInstanceSerializer, RegistrationStatus
__author__ = 'michael'
@permission_required('mbu.edit_scout_schedule', raise_exception=True)
@api_view(http_method_names=['GET', 'POST'])
def scout_enrollments(request, scout_id):
user = request.user
scout = Scout.objects.get(user=user)
scout_check = Scout.objects.get(pk=scout_id)
assert(scout == scout_check)
enrollments = []
if request.method == 'POST' and _reg_is_open():
for d in request.data:
enrollments.append(ScoutCourseInstance.objects.get(pk=d['id']))
scout.enrollments = enrollments
scout.save()
return JsonResponse({'data': request.data})
else:
for enrollment in scout.enrollments.all():
serializer = ScoutCourseInstanceSerializer(enrollment)
enrollments.append(serializer.data)
result = {'enrollments': enrollments}
return JsonResponse(result)
@staff_member_required
@api_view(http_method_names=['POST'])
def check_in_scouts(request, scout_id):
scout = Scout.objects.get(pk=scout_id)
scout.checked_in = True
scout.save()
result = {"scout": scout_id}
return JsonResponse(result)
def _reg_is_open():
status = RegistrationStatus.objects.first()
if status:
status = status.status
return status == 'OPEN'
| mit | 6,069,730,055,826,966,000 | 32.395833 | 100 | 0.69869 | false | 3.634921 | false | false | false |
EqAfrica/machinekit | nosetests/test_netcmd.py | 1 | 3441 | #!/usr/bin/env python
from nose import with_setup
from machinekit.nosetests.realtime import setup_module,teardown_module
from machinekit.nosetests.support import fnear
from machinekit import hal
import os
def test_component_creation():
global c1,c2
c1 = hal.Component("c1")
c1.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=42)
c1.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c1.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c1.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=42)
c1.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c1.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c1.ready()
c2 = hal.Component("c2")
c2.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=4711)
c2.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c2.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c2.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=4711)
c2.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c2.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c2.ready()
def test_net_existing_signal_with_bad_type():
hal.new_sig("f", hal.HAL_FLOAT)
try:
hal.net("f", "c1.s32out")
raise "should not happen"
except TypeError:
pass
del hal.signals["f"]
def test_net_match_nonexistant_signals():
try:
hal.net("nosuchsig", "c1.s32out","c2.s32out")
raise "should not happen"
except TypeError:
pass
def test_net_pin2pin():
try:
hal.net("c1.s32out","c2.s32out")
#TypeError: net: 'c1.s32out' is a pin - first argument must be a signal name
raise "should not happen"
except TypeError:
pass
def test_net_existing_signal():
hal.new_sig("s32", hal.HAL_S32)
assert hal.pins["c1.s32out"].linked == False
hal.net("s32", "c1.s32out")
assert hal.pins["c1.s32out"].linked == True
hal.new_sig("s32too", hal.HAL_S32)
try:
hal.net("s32too", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
del hal.signals["s32"]
def test_new_sig():
floatsig1 = hal.new_sig("floatsig1", hal.HAL_FLOAT)
try:
hal.new_sig("floatsig1", hal.HAL_FLOAT)
# RuntimeError: Failed to create signal floatsig1: HAL: ERROR: duplicate signal 'floatsig1'
raise "should not happen"
except RuntimeError:
pass
try:
hal.new_sig(32423 *32432, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.new_sig(None, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.new_sig("badtype", 1234)
raise "should not happen"
except TypeError:
pass
def test_check_net_args():
try:
hal.net()
except TypeError:
pass
try:
hal.net(None, "c1.s32out")
except TypeError:
pass
try:
hal.net("c1.s32out")
# TypeError: net: 'c1.s32out' is a pin - first argument must be a signal name
except TypeError:
pass
assert "noexiste" not in hal.signals
hal.net("noexiste", "c1.s32out")
assert "noexiste" in hal.signals
ne = hal.signals["noexiste"]
assert ne.writers == 1
assert ne.readers == 0
assert ne.bidirs == 0
try:
hal.net("floatsig1", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
(lambda s=__import__('signal'):
s.signal(s.SIGTERM, s.SIG_IGN))()
| lgpl-2.1 | -2,024,453,520,305,950,200 | 24.87218 | 99 | 0.610869 | false | 2.913633 | true | false | false |
daanwierstra/pybrain | pybrain/rl/learners/search/incrementalcomplexity/incrementalcomplexity.py | 1 | 1767 | __author__ = 'Tom Schaul, [email protected]'
# TODO: inheritance!
class IncrementalComplexitySearch(object):
""" Draft of an OOPS-inspired search that incrementally expands the search space
and the allocated time (to a population of search processes). """
def __init__(self, initSearchProcess, maxPhases = 10, searchSteps = 50, desiredFitness = None):
self.maxPhases = maxPhases
self.searchSteps = searchSteps
self.desiredFitness = desiredFitness
self.processes = [initSearchProcess]
self.phase = 0
def optimize(self, **args):
while self.phase <= self.maxPhases and not self.problemSolved():
self._onePhase(**args)
# increase the number of processes
for p in self.processes[:]:
self.processes.append(p.newSimilarInstance())
self.increaseSearchSpace()
self.phase += 1
# return best evolvable
best = -1e100
for p in self.processes:
if p.bestFitness > best:
best = p.bestFitness
res = p.evolvable
return res
def _onePhase(self, verbose = True, **args):
if verbose:
print 'Phase', self.phase
for p in self.processes:
p.search(self.searchSteps, **args)
if verbose:
print '', p.bestFitness, p.evolvable.weightLengths
def increaseSearchSpace(self):
for p in self.processes:
p.increaseMaxComplexity()
def problemSolved(self):
if self.desiredFitness != None:
for p in self.processes:
if p.bestFitness > self.desiredFitness:
return True
return False | bsd-3-clause | 4,677,843,574,635,908,000 | 33.666667 | 99 | 0.578947 | false | 4.330882 | false | false | false |
marteinn/wagtail-alt-generator | wagtailaltgenerator/tests/demosite/settings.py | 1 | 2042 | #!/usr/bin/env python
import os
DEBUG = False
TIME_ZONE = "Europe/Stockholm"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3"}}
SECRET_KEY = "not needed"
USE_TZ = True
LANGUAGE_CODE = "en"
INSTALLED_APPS = [
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sites",
"django.contrib.admin",
"django.contrib.messages",
"wagtail.core",
"wagtail.sites",
"wagtail.users",
"wagtail.images",
"wagtail.documents",
"taggit",
"wagtailaltgenerator",
"wagtailaltgenerator.tests.demopages",
"wagtailaltgenerator.tests.demosite",
]
ROOT_URLCONF = "wagtailaltgenerator.tests.demosite.urls"
MIDDLEWARE = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"wagtail.core.middleware.SiteMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
)
ALT_GENERATOR_MIN_CONFIDENCE = 0
COMPUTER_VISION_API_KEY = getattr(os.environ, "COMPUTER_VISION_API_KEY", None)
COMPUTER_VISION_REGION = "canada"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
}
}
]
| mit | -7,871,863,058,548,776,000 | 27.760563 | 78 | 0.669931 | false | 3.816822 | false | true | false |
jparicka/twitter-tools | profiles/models.py | 1 | 1328 | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
class Profile(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=100, blank=True, verbose_name="name", db_index=True)
headline = models.CharField(max_length=512, blank=True, verbose_name="name", db_index=True)
secret = models.CharField(max_length=100, blank=True, verbose_name="secret_key", db_index=True)
country = models.CharField(max_length=10, blank=True, verbose_name="country", db_index=True)
language = models.CharField(max_length=10, blank=True, verbose_name="language", db_index=True)
mobile = models.CharField(max_length=20, blank=True, verbose_name="mobile_number_1")
picture = models.URLField(blank=True, verbose_name="picture")
oauth_token = models.CharField(max_length=200, blank=True)
oauth_secret = models.CharField(max_length=200, blank=True)
street_address_1 = models.CharField(max_length=100, blank=True, verbose_name="street_address_1")
street_address_2 = models.CharField(max_length=100, blank=True, verbose_name="street_address_2")
street_address_3 = models.CharField(max_length=100, blank=True, verbose_name="street_address_3")
initial_assessment = models.BooleanField(default=False)
| mit | -2,055,734,439,932,275,200 | 44.827586 | 100 | 0.730422 | false | 3.47644 | false | false | false |
rcatwood/Savu | savu/data/data_structures/data_type.py | 1 | 4089 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: data_type
:platform: Unix
:synopsis: A module containing classes for different input data types other
than hdf5.
.. moduleauthor:: Nicola Wadeson <[email protected]>
"""
import os
import numpy as np
import fabio
class DataTypes(object):
def __getitem__(self, index):
""" Override __getitem__ and map to the relevant files """
raise NotImplementedError("__getitem__ must be implemented.")
def get_shape(self):
""" Get full stiched shape of a stack of files"""
raise NotImplementedError("get_shape must be implemented.")
class FabIO(DataTypes):
""" This class loads any of the FabIO python module supported image
formats. """
def __init__(self, folder, Data, dim, shape=None):
self._data_obj = Data
self.nFrames = None
self.start_file = fabio.open(self.__get_file_name(folder))
self.frame_dim = dim
self.image_shape = (self.start_file.dim2, self.start_file.dim1)
if shape is None:
self.shape = (self.nFrames,)
else:
self.shape = shape
def __getitem__(self, index):
size = [len(np.arange(i.start, i.stop, i.step)) for i in index]
data = np.empty(size)
tiffidx = [i for i in range(len(index)) if i not in self.frame_dim]
# print "original = ", index
index, frameidx = self.__get_indices(index, size)
for i in range(len(frameidx)):
# print "amended = ", index[i]
# print frameidx[i], [index[i][n] for n in tiffidx]
data[index[i]] = \
self.start_file.getframe(self.start_no + frameidx[i])\
.data[[index[i][n] for n in tiffidx]]
return data
def __get_file_name(self, folder):
import re
files = os.listdir(folder)
self.nFrames = len(files)
fname = sorted(files)[0]
self.start_no = [int(s) for s in re.findall(r'\d+', fname)][-1]
print "start number is", self.start_no
return folder + "/" + fname
def get_shape(self):
return self.shape + self.image_shape
def __get_idx(self, dim, sl, shape):
c = int(np.prod(shape[0:dim]))
r = int(np.prod(shape[dim+1:]))
values = np.arange(sl.start, sl.stop, sl.step)
return np.ravel(np.kron(values, np.ones((r, c))))
def __get_indices(self, index, size):
""" Get the indices for the new data array and the file numbers. """
sub_idx = np.array(index)[np.array(self.frame_dim)]
sub_size = [size[i] for i in self.frame_dim]
idx_list = []
for dim in range(len(sub_idx)):
idx = self.__get_idx(dim, sub_idx[dim], sub_size)
idx_list.append(idx.astype(int))
lshape = idx_list[0].shape[0]
index = np.tile(index, (lshape, 1))
frameidx = np.zeros(lshape)
for dim in range(len(sub_idx)):
start = index[0][self.frame_dim[dim]].start
index[:, self.frame_dim[dim]] = \
[slice(i-start, i-start+1, 1) for i in idx_list[dim]]
frameidx[:] += idx_list[dim]*np.prod(self.shape[dim+1:])
return index.tolist(), frameidx.astype(int)
class Map_3d_to_4d_h5(DataTypes):
""" This class converts a 3D dataset to a 4D dataset. """
def __init__(self, backing_file, shape):
self.shape = shape
def __getitem__(self, index):
print index
def get_shape(self):
return self.shape
| gpl-3.0 | 2,109,054,481,640,022,500 | 33.361345 | 78 | 0.604304 | false | 3.518933 | false | false | false |
pablolizardo/dotfiles | inkscape/symbols/generate.py | 1 | 1934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
inkscapeSymbolGenerator: A inkscape symbol library generator
Copyright (C) 2015 Xavi Julián Olmos
See the file LICENSE for copying permission.
"""
import sys, os
import logging
from optparse import OptionParser
####Objetivo
#If select all merge all files in a single SVG and then
#If select file, clean it with SVGO
#Remove styles then
#Save
####Deberes
# Compactar el formato del script los ifs ver si existen alternativas
# Utilizar paths de python - construccion de paths pythonica.
# Utilizar el nombre del archivo.
# Buscar regex para eliminar etiquetas en cleanSVGStyles()
# Añadir contenido a un fichero.
# Migrar de OptionParser to https://docs.python.org/3/library/optparse.html
def cleanSVGStyles(file):
print('Cleaning SVG....')
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('--folder', help='Folder to generate icons from', dest='folder')
optp.add_option('--file', help='File to generate icon from', dest='file')
optp.add_option('-o', '--output', help='Output file', dest='output')
opts, args = optp.parse_args()
if opts.folder is None:
if opts.file is None:
optp.error('At list one value for file or folder is needed')
else:
if opts.output is None:
os.system('svgo ' + opts.file)
else:
os.system('svgo ' + opts.file + ' -o ' + opts.output)
else:
if opts.file is None:
if opts.output is None:
os.system('svgo ' + opts.folder)
else:
os.system('cat ' + opts.folder + '/*.svg > ' + opts.output)
os.system('svgo ' + opts.output + ' -o ' + opts.output)
cleanSVGStyles(opts.output)
else:
optp.error('File and folder cannot exist')
| gpl-2.0 | -4,980,966,128,883,437,000 | 31.2 | 84 | 0.623706 | false | 3.551471 | false | false | false |
broxtronix/distributed | distributed/diagnostics/progress.py | 1 | 9766 | from __future__ import print_function, division, absolute_import
from collections import defaultdict
import logging
import sys
import threading
import time
from timeit import default_timer
import dask
from toolz import valmap, groupby, concat
from tornado.ioloop import PeriodicCallback, IOLoop
from tornado import gen
from .plugin import SchedulerPlugin
from ..utils import sync, key_split, tokey
logger = logging.getLogger(__name__)
def dependent_keys(keys, who_has, processing, stacks, dependencies, exceptions,
complete=False):
""" All keys that need to compute for these keys to finish """
out = set()
errors = set()
stack = list(keys)
while stack:
key = stack.pop()
if key in out:
continue
if not complete and (who_has.get(key) or
key in processing or
key in stacks):
continue
if key in exceptions:
errors.add(key)
if not complete:
continue
out.add(key)
stack.extend(dependencies.get(key, []))
return out, errors
class Progress(SchedulerPlugin):
""" Tracks progress of a set of keys or futures
On creation we provide a set of keys or futures that interest us as well as
a scheduler. We traverse through the scheduler's dependencies to find all
relevant keys on which our keys depend. We then plug into the scheduler to
learn when our keys become available in memory at which point we record
their completion.
State
-----
keys: set
Set of keys that are not yet computed
all_keys: set
Set of all keys that we track
This class performs no visualization. However it is used by other classes,
notably TextProgressBar and ProgressWidget, which do perform visualization.
"""
def __init__(self, keys, scheduler, minimum=0, dt=0.1, complete=False):
self.keys = {k.key if hasattr(k, 'key') else k for k in keys}
self.keys = {tokey(k) for k in self.keys}
self.scheduler = scheduler
self.complete = complete
self._minimum = minimum
self._dt = dt
self.last_duration = 0
self._start_time = default_timer()
self._running = False
self.status = None
@gen.coroutine
def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.task_state):
yield gen.sleep(0.05)
self.keys = None
self.scheduler.add_plugin(self) # subtle race condition here
self.all_keys, errors = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
logger.debug("Set up Progress keys")
for k in errors:
self.transition(k, None, 'erred', exception=True)
def transition(self, key, start, finish, *args, **kwargs):
if key in self.keys and start == 'processing' and finish == 'memory':
logger.debug("Progress sees key %s", key)
self.keys.remove(key)
if not self.keys:
self.stop()
if key in self.all_keys and finish == 'erred':
logger.debug("Progress sees task erred")
self.stop(exception=kwargs['exception'], key=key)
if key in self.keys and finish == 'forgotten':
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True)
def restart(self, scheduler):
self.stop()
def stop(self, exception=None, key=None):
if self in self.scheduler.plugins:
self.scheduler.plugins.remove(self)
if exception:
self.status = 'error'
else:
self.status = 'finished'
logger.debug("Remove Progress plugin")
class MultiProgress(Progress):
""" Progress variant that keeps track of different groups of keys
See Progress for most details. This only adds a function ``func=``
that splits keys. This defaults to ``key_split`` which aligns with naming
conventions chosen in the dask project (tuples, hyphens, etc..)
State
-----
keys: dict
Maps group name to set of not-yet-complete keys for that group
all_keys: dict
Maps group name to set of all keys for that group
Examples
--------
>>> split = lambda s: s.split('-')[0]
>>> p = MultiProgress(['y-2'], func=split) # doctest: +SKIP
>>> p.keys # doctest: +SKIP
{'x': {'x-1', 'x-2', 'x-3'},
'y': {'y-1', 'y-2'}}
"""
def __init__(self, keys, scheduler=None, func=key_split, minimum=0, dt=0.1,
complete=False):
self.func = func
Progress.__init__(self, keys, scheduler, minimum=minimum, dt=dt,
complete=complete)
@gen.coroutine
def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.tasks):
yield gen.sleep(0.05)
self.keys = None
self.scheduler.add_plugin(self) # subtle race condition here
self.all_keys, errors = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
# Group keys by func name
self.keys = valmap(set, groupby(self.func, self.keys))
self.all_keys = valmap(set, groupby(self.func, self.all_keys))
for k in self.all_keys:
if k not in self.keys:
self.keys[k] = set()
for k in errors:
self.transition(k, None, 'erred', exception=True)
logger.debug("Set up Progress keys")
def transition(self, key, start, finish, *args, **kwargs):
if start == 'processing' and finish == 'memory':
s = self.keys.get(self.func(key), None)
if s and key in s:
s.remove(key)
if not self.keys or not any(self.keys.values()):
self.stop()
if finish == 'erred':
logger.debug("Progress sees task erred")
k = self.func(key)
if (k in self.all_keys and key in self.all_keys[k]):
self.stop(exception=kwargs.get('exception'), key=key)
if finish == 'forgotten':
k = self.func(key)
if k in self.all_keys and key in self.all_keys[k]:
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True)
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
>>> format_time(100000.4)
'27hr 46min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)
elif m:
return '{0:2.0f}min {1:4.1f}s'.format(m, s)
else:
return '{0:4.1f}s'.format(s)
class AllProgress(SchedulerPlugin):
""" Keep track of all keys, grouped by key_split """
def __init__(self, scheduler):
self.all = defaultdict(set)
self.nbytes = defaultdict(lambda: 0)
self.state = defaultdict(lambda: defaultdict(set))
self.scheduler = scheduler
for key, state in self.scheduler.task_state.items():
k = key_split(key)
self.all[k].add(key)
self.state[state][k].add(key)
if key in self.scheduler.nbytes:
self.nbytes[k] += self.scheduler.nbytes[key]
scheduler.add_plugin(self)
def transition(self, key, start, finish, *args, **kwargs):
k = key_split(key)
self.all[k].add(key)
try:
self.state[start][k].remove(key)
except KeyError: # TODO: remove me once we have a new or clean state
pass
if finish != 'forgotten':
self.state[finish][k].add(key)
else:
self.all[k].remove(key)
if not self.all[k]:
del self.all[k]
try:
del self.nbytes[k]
except KeyError:
pass
for v in self.state.values():
try:
del v[k]
except KeyError:
pass
if start == 'memory':
self.nbytes[k] -= self.scheduler.nbytes[key]
if finish == 'memory':
self.nbytes[k] += self.scheduler.nbytes[key]
def restart(self, scheduler):
self.all.clear()
self.state.clear()
| bsd-3-clause | -4,504,122,622,001,199,000 | 32.331058 | 81 | 0.574135 | false | 3.991009 | false | false | false |
iojancode/botija | plug.livolo.py | 1 | 1887 | import time
import sys
import RPi.GPIO as GPIO
off = '1242424352424342424242424242425342524342'
b0 = '12424243524243424242424242424242424242424242'
b1 = '124242435242434242424242424242534242424242'
b2 = '1242424352424342424242424242425353424242'
b3 = '124242435242434242424242424242424253424242'
b4 = '124242435242434242424242424242524342424242'
b5 = '124242435242434242424242424242425342424242'
b6 = '1242424352424342424242424242425342534242'
b7 = '124242435242434242424242424242424242534242'
b8 = '124242435242434242424242424242524243424242'
b9 = '124242435242434242424242424242425243424242'
if sys.argv[1:] == 'off':
NUM_ATTEMPTS = 1300
else:
NUM_ATTEMPTS = 170
TRANSMIT_PIN = 17
def transmit_code(code):
'''Transmit a chosen code string using the GPIO transmitter'''
GPIO.setmode(GPIO.BCM)
GPIO.setup(TRANSMIT_PIN, GPIO.OUT)
for t in range(NUM_ATTEMPTS):
for i in code:
if i == '1':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00055);
GPIO.output(TRANSMIT_PIN, 0)
elif i == '2':
GPIO.output(TRANSMIT_PIN, 0)
time.sleep(.00011);
GPIO.output(TRANSMIT_PIN, 1)
elif i == '3':
GPIO.output(TRANSMIT_PIN, 0)
time.sleep(.000303);
GPIO.output(TRANSMIT_PIN, 1)
elif i == '4':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00011);
GPIO.output(TRANSMIT_PIN, 0)
elif i == '5':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00029);
GPIO.output(TRANSMIT_PIN, 0)
else:
continue
GPIO.output(TRANSMIT_PIN, 0)
GPIO.cleanup()
if __name__ == '__main__':
for argument in sys.argv[1:]:
exec('transmit_code(' + str(argument) + ')') | gpl-3.0 | -279,637,103,844,104,030 | 32.122807 | 66 | 0.606253 | false | 2.867781 | false | false | false |
flipdazed/SoftwareDevelopment | common.py | 1 | 10121 | #!/usr/bin/env python
# encoding: utf-8
# contains the common actions
import random
from logs import *
class Card(object):
"""Creates the card objects used in game"""
def __init__(self, name, attack, money, cost, name_padding=15, num_padding=2):
self.name = name
self.cost = cost
self.attack = attack
self.money = money
self.name_padding = name_padding
self.num_padding = num_padding
self.padded_vals = (
str(self.cost).ljust(self.num_padding),
self.name.ljust(self.name_padding),
str(self.attack).ljust(self.num_padding),
str(self.money).ljust(self.num_padding),
)
def __str__(self):
"""outputs string of the card details when called as print Card()"""
s_out = "Cost: {0} ~ {1} ~ Stats ... Attack: {2}, Money: {3}".format(
*self.padded_vals)
return s_out
def get_attack(self):
return self.attack
def get_money(self):
return self.money
@wrap_all(log_me)
class CommonActions(object):
"""Contains the common actions
used by all game classes
"""
def __init__(self):
# self.art = Art()
pass
def deck_to_hand(self):
"""
Move cards from central.central deck
to active central.central deck
Container is the dictionary within the
class that need to be called with the
getattr()
"""
# For each index in player hand
# Refills player hand from player deck.
# If deck is empty, discard pile is shuffled
# and becomes deck
for i in xrange(0, self.hand_size):
# Shuffle deck computer.pC['hand_size times
# if length of deck = 0
# Will only be done once
if len(self.deck) == 0:
self.logger.debug("Deck length is zero!")
if len(self.discard) == 0:
self.logger.debug("Discard length is also zero!")
self.logger.debug("Exiting the deck_to_hand routine as no more cards.")
return
random.shuffle(self.discard) # Shuffle discard pile
self.logger.debug("shuffled deck")
self.deck = self.discard # Make deck the discard
self.discard = [] # empty the discard pile
self.logger.debug("Moved discard pile to deck. Discard pile set to empty.")
card = self.deck.pop()
self.hand.append(card)
self.logger.debug("Iteration #{}: Drawn {} from deck and added to hand".format(i,card.name))
pass
def print_active_cards(self, title=None, index=False):
"""Display cards in active"""
if title is None: title = "Your Played Cards"
# switch depending on player type
self.logger.debug("Actor is: {}".format(type(self).__name__))
title = self.art.make_title(title)
self.player_logger(title)
self._print_cards(self.active, index=index)
self.player_logger(self.art.underline)
pass
def deck_creator(self, deck_list):
"""Creates the deck from a list of dictionaries
_Input_
list of dicts.
dict contents:
"card" : dict containing all **kwargs for Card()
"count" : number of cards with these settings to create
_Output_
list of Card() types
Expected input example:
[{"count":1, "card":{"name":'Archer', "attack":3, "money":0, "cost":2}},
{"count":2, "card":{"name":'Baker', "attack":0, "money":0, "cost":2}}]
Expected Output example:
[Card('Archer', 3,0,2), Card('Baker', 0,0,2), Card('Baker', 0,0,2)]
"""
deck = [] # get deck ready
for card in deck_list:
for _ in xrange(card["count"]):
# passes the dictionary as a keyword arg (**kwarg)
deck.append(Card(
name_padding=self.parent.max_card_name_len,
num_padding=2,
**card["params"]
))
self.logger.debug("Created {}x{}".format(card["count"], card["params"]["name"]))
return deck
def _print_cards(self, cards, index=False):
"""Prints out the cards provided"""
# max card name length
if len(cards) == 0:
self.logger.game(self.art.index_buffer+ \
"Nothing interesting to see here...")
else:
for i, card in enumerate(cards):
num_str = "[{}] ".format(i) if index else self.art.index_buffer
self.logger.game(num_str + "{}".format(card))
pass
@wrap_all(log_me)
class CommonUserActions(object):
"""Contains actions for user and computer"""
def __init__(self):
pass
def newgame(self):
# revert to initial state
for attr, val in self.init.iteritems():
setattr(self, attr, val)
self.active = []
self.hand = []
self.discard = []
self.deck = self.deck_creator(self.deck_settings)
pass
def end_turn(self):
"""Ends the turn of the user"""
self.logger.debug("Ending Turn: {}".format(self.name))
# If player has cards in the hand add to discard pile
self.discard_hand()
# If there cards in active deck
# then move all cards from active to discard
self.discard_active_cards()
# Move cards from deck to hand
self.deck_to_hand()
pass
def play_all_cards(self):
"""transfer all cards from hand to active
add values in hand to current totals
should only be used by User and Computer
"""
for i in xrange(0, len(self.hand)):
card = self.hand.pop()
self.active.append(card)
self.logger.debug("Iteration #{}: Drawn {} from deck and added to active deck".format(i,card.name))
self.__add_values_to_total(card)
pass
def play_a_card(self, card_number):
"""plays a specific card...
Transfer card to active
add values in hand to current totals
"""
i=0
card_number = int(card_number)
# Transfer card to active
# add values in hand to current totals
card = self.hand.pop(card_number)
self.active.append(card)
self.logger.debug("Iteration #{}: Drawn {} from deck and added to active deck".format(i,card.name))
self.__add_values_to_total(card)
pass
def __add_values_to_total(self, card):
"""Adds money and attack to total"""
money_i = card.get_money()
attack_i = card.get_attack()
self.logger.debug("Money:{}+{} Attack:{}+{}".format(self.money, money_i, self.attack, attack_i))
self.money += money_i
self.attack += attack_i
pass
def discard_hand(self):
"""If there are cards in the hand add to discard pile"""
if (len(self.hand) > 0 ):
# Iterate through all cards in player hand
for i in xrange(0, len(self.hand)):
card = self.hand.pop()
self.logger.debug("Iteration #{}: Moving {} from hand and added to discard pile".format(i, card.name))
self.discard.append(card)
else:
self.logger.debug("Hand length is zero. No cards to discard.")
pass
def discard_active_cards(self):
"""If there cards in PC active deck
then move all cards from active to discard"""
if (len(self.active) > 0 ):
for i in xrange(0, len(self.active)):
card = self.active.pop()
self.logger.debug("Iteration #{}: Moving {} from hand and added to discard pile".format(i, card.name))
self.discard.append(card)
else:
self.logger.debug("Active Deck length is zero. No cards to discard.")
pass
def display_values(self, attack=None, money=None):
""" Display player values"""
# allows forced values
if attack is None: attack = self.attack
if money is None: money = self.money
padded_name = self.name.ljust(self.parent.max_player_name_len)
out_str = "{} Values :: ".format(padded_name)
out_str += " Attack: {} Money: {}".format(
attack, money)
self.player_logger("")
self.player_logger(out_str)
self.player_logger("")
pass
def show_health(self):
"""Shows players' health"""
# creates an attribute based on the class
padded_name = self.name.ljust(self.parent.max_player_name_len)
out_str = "{} Health : ".format(padded_name)
out_str += "{}".format(self.health)
self.player_logger(out_str)
pass
def attack_player(self, other_player):
""" Attack another player
other_player expected input is a class
that corresponds to another sibling player
an example of this from self = game.User() would be:
self.attack(self.parent.computer)
which would attack the computer form the player
"""
self.logger.debug("{0} Attacking {1} with strength {2}".format(self.name, other_player.name, self.attack))
self.logger.debug("{0} Health before attack: {1}".format(other_player.name, other_player.health))
other_player.health -= self.attack
self.attack = 0
self.logger.debug("{0} Attack: {1}".format(self.name, self.attack))
pass
def reset_vals(self):
"""resets money and attack"""
self.logger.debug("Money and Attack set to 0 for {}".format(self.name))
self.money = 0
self.attack = 0
pass | gpl-3.0 | 8,199,571,315,852,250,000 | 35.021352 | 118 | 0.545993 | false | 4.051641 | false | false | false |
tnagorra/nspell | lib/misc.py | 1 | 2396 | # cython: language_level=3
import re
class Mreplace:
def __init__(self, mydict):
self._mydict = mydict
self._rx = re.compile('|'.join(map(re.escape, self._mydict)))
def replace(self, text):
return self._rx.sub(lambda x: self._mydict[x.group(0)], text)
class Mmatch:
def __init__(self, mylist):
self._rx = re.compile('|'.join(mylist))
def match(self, text):
return self._rx.match(text)
_matches = {
'.*[?{}(),/\\"\';+=_*&^$#@!~`|\[\]]+.*',
'.*[a-zA-Z0-9]+.*',
'[-+]?[०-९]+(\.[०-९]+)?'
}
_validator = Mmatch(_matches)
def valid(mystr):
return not _validator.match(mystr)
_replacements = {
'':'',
'-':'-',
'—':'-',
'–':'-',
' :':' : ',
'।':' । ',
'’':' " ',
'‘':' " ',
'“':' " ',
'”':' " ',
'"':' " ',
"'":' " ',
'?':' ? ',
'!':' ! ',
',':' , ',
'/':' / ',
'÷':' ÷ ',
'…':' … ',
'{':' { ',
'}':' } ',
'[':' [ ',
']':' ] ',
'(':' ( ',
')':' ) ',
'=': ' = ',
'***': ' ',
'**':' ',
'*':' ',
'~': ' ',
'`': ' ',
'#': ' ',
'...': ' ... ',
'..': ' ... ',
'.': ' . '
}
_tokenizer = Mreplace(_replacements)
def tokenize(mystr):
return _tokenizer.replace(mystr).split()
# FIXME tokenizer to split the non-valid words
# TODO Use regex to match for 'ं'
# Dictionary of characters that have similar phonics, normalized words
# will have zero edit distance if they differ in only _phonics
_phonics = {
'ा':'आ',
'ो':'ओ',
'ी':'इ',
'ि':'इ',
'ई':'इ',
'ू':'उ',
'ु':'उ',
'ऊ':'उ',
'े':'ए',
'्':'',
'श':'स',
'ष':'स',
'व':'ब',
'':'', # Contains a non-joiner
}
_normalizer = Mreplace(_phonics)
# Normalize word (
def normalize(word):
return _normalizer.replace(word)
_dependent = {
'ँ':'',
'ं':'',
'ः':'',
'ा':'',
'ि':'',
'ी':'',
'ु':'',
'ू':'',
'ृ':'',
'े':'',
'ै':'',
'ो':'',
'ौ':'',
'्':'',
'':'',
}
_len = Mreplace(_dependent)
def length(mystr):
x = _len.replace(mystr)
return (len(x),x)
| gpl-3.0 | 3,902,317,311,436,835,300 | 16.40458 | 70 | 0.348246 | false | 2.882427 | false | false | false |
dkkline/CanSat14-15 | feeder/feeder.py | 1 | 3082 | """
Contains a tornado-based WebSocket server in charge of supplying
connected clients with live or replay data.
"""
import tornado.ioloop
import tornado.web
import tornado.websocket
from collections import deque
from pprint import pprint
import json
from .config import CACHE_SIZE, PORT, FREQUENCY
from groundstation.config import COM_FILE, BIND_ADDRESS
from groundstation.parse import parse_line
from groundstation.exceptions import InvalidLine
from groundstation.utilities import Buffer
com_handle = open(COM_FILE, "r")
buf = Buffer(com_handle)
clients = []
cache = deque(maxlen=CACHE_SIZE)
class BaseWebSocket(tornado.websocket.WebSocketHandler):
"""
A base class for all WebSocket interfaces.
"""
def check_origin(self, origin):
return True # All clients are welcome
class LiveDataWebSocket(BaseWebSocket):
"""
Serves clients connected to the live endpoint with live data.
"""
def open(self):
"""
Called when a client opens the connection.
"""
clients.append(self)
print("A client has opened a connection.")
for data_point in cache:
self.write_message(data_point)
def on_close(self):
"""
Called when a client closes the connection.
"""
clients.remove(self)
print("A client closed its connection.")
def on_message(self, message):
"""
Called when a client sends a message.
"""
print("[WARNNING] Got message: {}".format(message))
class ReplayWebSocket(BaseWebSocket):
"""
Serves clients connected to the replay endpoint.
"""
def broadcast(message):
"""
Broadcasts a message to all the connected clients.
"""
for client in clients:
client.write_message(message)
def get_data():
"""
Called by the ioloop to get data from the listener.
"""
try:
data = parse_line(buf)
except InvalidLine:
return
rel_data = {
"NTC": data["Temp_NTC"],
"Pressure": data["Press"],
"Height": data["Height"],
"Gyroscope": data["GyrZ"] / 360 * 60, # RPM
"Latitude": data["Lat"],
"Longitude": data["Long"]
}
# import random
# if random.randint(0, 10) == 5:
# rel_data["Latitude"] = float(random.randint(0, 10))
# rel_data["Longitude"] = float(random.randint(0, 10))
# rel_data["Height"] = float(random.randint(0, 1500))
pprint(rel_data)
post_data(rel_data)
# print(line, end="")
def post_data(data):
"""
Called by ``get_data``.
Sends ``data`` to the connected clients.
"""
json_data = json.dumps(data)
broadcast(json_data)
app = tornado.web.Application([
(r"/live", LiveDataWebSocket),
(r"/replay", ReplayWebSocket)
])
if __name__ == '__main__':
app.listen(PORT, BIND_ADDRESS)
loop = tornado.ioloop.IOLoop.instance()
getter = tornado.ioloop.PeriodicCallback(get_data, FREQUENCY,
io_loop=loop)
getter.start()
loop.start()
| mit | -8,494,810,209,948,938,000 | 21.661765 | 65 | 0.61843 | false | 3.857322 | false | false | false |
panthorstudios/Gold-Rush | oldgame.py | 1 | 10225 | from random import random
from random import randint
import pygame
from pygame.locals import *
from miner import Miner
from explosion import Explosion
class Game(object):
TITLE = "Gold Rush!"
BOARD_LEFT = 20
BOARD_TOP = 130
SQUARE_SIZE = 32
BLACK = (0,0,0)
GREEN=(128,255,128)
YELLOW=(255,255,128)
RED=(255,128,128)
FRAMES_PER_SECOND = 30
ASSAY_X = 540
ASSAY_Y = 84
CHARGES_X = 180
CASH_X = 20
CASH_OFFSET = 30
GOLD_X = 16
CHARGES_OFFSET = 32
HEALTH_X =CHARGES_X + 40
TITLE_X = 340
def display_gold(self):
scoretext='%03d' % self.gold
for i in range(len(scoretext)):
num=int(scoretext[i])*24
pos=i*24
self.screen.blit(self.digits,(self.CASH_X+self.CASH_OFFSET+(i*24),20),(num,0,24,35))
def display_charges(self):
scoretext='%02d' % self.charges
for i in range(len(scoretext)):
num=int(scoretext[i])*24
pos=i*24
self.screen.blit(self.digits,(self.CHARGES_X+self.CHARGES_OFFSET+(i*24),20),(num,0,24,35))
def display_cash(self):
scoretext='%05d' % self.cash
for i in range(len(scoretext)):
num=int(scoretext[i])*24
pos=i*24
self.screen.blit(self.digits,(self.CASH_X+self.CASH_OFFSET+(i*24),66),(num,0,24,35))
def display_health(self):
h=int(84*(self.health/100.0))
b=84-h
c=self.GREEN
if self.health<20:
c=self.RED
elif self.health<40:
c=self.YELLOW
self.screen.fill(c,(self.HEALTH_X,70,h,32))
self.screen.fill(self.BLACK,(self.HEALTH_X+h,70,b,32))
# num=int(scoretext[i])*24
# pos=i*24
# self.screen.blit(self.digits,(self.CASH_X+self.CASH_OFFSET+(i*24),66),(num,0,24,35))
def __init__(self):
pygame.mixer.pre_init(44100,-16,2,2048)
pygame.init()
self.screen=pygame.display.set_mode((680,600))
pygame.display.set_caption(self.TITLE)
self.pressedkey=None
self.bellsound=pygame.mixer.Sound('assets/sounds/bell.ogg')
self.chargesound=pygame.mixer.Sound('assets/sounds/bomb.ogg')
self.yeehawsound=pygame.mixer.Sound('assets/sounds/yeehaw.ogg')
self.kachingsound=pygame.mixer.Sound('assets/sounds/kaching.ogg')
self.board=[]
self.bgbase=pygame.image.load('assets/images/background.png')
self.bg=pygame.image.load('assets/images/background.png')
self.digits=pygame.image.load('assets/images/digits.png')
self.gamearea=pygame.Surface(self.bg.get_size())
self.is_playing=False
# currently 2 nugget images
self.nuggets=[]
self.nuggets.append(pygame.image.load('assets/images/gold01-%dpx.png' % self.SQUARE_SIZE))
self.nuggets.append(pygame.image.load('assets/images/gold02-%dpx.png' % self.SQUARE_SIZE))
self.explosion=Explosion(0,0,self.SQUARE_SIZE)
self.explosion_group=pygame.sprite.RenderPlain(self.explosion)
self.miner=Miner(0,0)
self.clock=pygame.time.Clock()
# add title
text=pygame.image.load('assets/images/text_title.png')
self.screen.blit(text,(self.TITLE_X,self.BOARD_LEFT))
# add assay office
self.office=pygame.image.load('assets/images/assayoffice.png')
self.screen.blit(self.office,(self.ASSAY_X+self.BOARD_LEFT,self.ASSAY_Y))
self.cash=0
self.gold=0
self.charges=10
self.health=100
# add "Gold"
text=pygame.image.load('assets/images/nugget.png')
self.screen.blit(text,(self.GOLD_X,self.BOARD_LEFT))
self.display_gold()
# add "Cash"
text=pygame.image.load('assets/images/text_cash.png')
self.screen.blit(text,(self.CASH_X,66))
self.display_cash()
# add "Charges"
text=pygame.image.load('assets/images/dynamite.png')
self.screen.blit(text,(self.CHARGES_X,16))
self.display_charges()
# add "Miner head"
text=pygame.image.load('assets/images/miner_head.png')
self.screen.blit(text,(self.CHARGES_X,66))
self.display_health()
self.setup()
def setup(self):
# initialize score items
self.cash=0
self.gold=0
self.charges=10
# load background image every time
self.bg=pygame.image.load('assets/images/background.png')
#redraw assay office
self.bg.blit(self.office,(self.ASSAY_X,self.ASSAY_Y-self.BOARD_TOP))
self.board=[]
# top row of empty spaces
pathsup=2
self.board.append([' ']*20)
self.board.append(['*']*20)
for y in range(2,14):
row=[]
for x in range(20):
c='*'
if random()<0.4:
# make a hole
self.bg.fill(self.BLACK,(x*self.SQUARE_SIZE,y*self.SQUARE_SIZE,self.SQUARE_SIZE,self.SQUARE_SIZE))
c=' '
if y>1:
c='G'
nugg=self.nuggets[0 if random()<0.5 else 1]
self.bg.blit(nugg,(x*self.SQUARE_SIZE,y*self.SQUARE_SIZE))
row.append(c)
self.board.append(row)
# add soil
self.gamearea.blit(self.bg,(0,0))
pygame.display.flip()
def print_board(self):
for row in self.board:
print ' '.join(row)
def mainloop(self):
deltat=self.clock.tick(self.FRAMES_PER_SECOND)
tx=self.miner.x
ty=self.miner.y
self.miner_group.clear(self.gamearea,self.bg)
self.explosion_group.clear(self.gamearea,self.bg)
pressedspace=False
for event in pygame.event.get():
#print event
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit(0)
elif event.key in (K_RIGHT,K_LEFT,K_UP,K_DOWN):
self.pressedkey= event.key
elif event.key == K_SPACE:
pressedspace = True
elif event.type == KEYUP:
if event.key in (K_RIGHT,K_LEFT,K_UP,K_DOWN):
if self.pressedkey == event.key:
self.pressedkey = None
#elif event.key == K_SPACE:
#pressedspace = False
# only draw explosion if necessary
if self.explosion.update(deltat):
self.explosion_group.update(deltat)
self.explosion_group.draw(self.gamearea)
else:
if pressedspace and self.pressedkey:
# Do explosion
pressedspace=False
bx=self.miner.x
by=self.miner.y
if self.pressedkey == K_LEFT:
bx-=1
if self.pressedkey == K_RIGHT:
bx+=1
if self.pressedkey == K_UP:
by-=1
if self.pressedkey == K_DOWN:
by+=1
if bx>=0 and bx<20 and (by>0 or (by==0 and self.pressedkey == K_DOWN)) and by<20 and self.charges>0:
self.explosion.explode(bx,by)
self.charges-=1
# print "(%d,%d)->(%d,%d) Boom! %d charges left." % (self.miner.x,self.miner.y,bx,by,self.charges)
self.board[by][bx]=' '
self.bg.fill(self.BLACK,(bx*self.SQUARE_SIZE,by*self.SQUARE_SIZE,self.SQUARE_SIZE,self.SQUARE_SIZE))
self.gamearea.blit(self.bg,(0,0))
self.display_charges()
#self.screen.blit(self.digits,(460+(i*24),20),(num,0,24,35))
self.chargesound.play()
for j in range(20):
x=randint(0,19)
y=randint(2,11)
o=self.board[y][x]
a=self.board[y-1][x]
if o==' ' and a=='*':
self.board[y][x]='*'
xpos=x*self.SQUARE_SIZE
ypos=y*self.SQUARE_SIZE
self.bg.blit(self.bgbase,(x*self.SQUARE_SIZE,y*self.SQUARE_SIZE),(xpos,ypos,self.SQUARE_SIZE,self.SQUARE_SIZE))
if self.pressedkey == K_RIGHT and self.miner.can_move():
if tx<19:
tx += 1
if self.pressedkey == K_LEFT and self.miner.can_move():
if tx>0:
tx -= 1
if self.pressedkey == K_UP and self.miner.can_move():
if ty>0:
ty -= 1
else:
if tx==17:
if self.gold!=0:
self.cash+=self.gold*self.charges
self.gold=0
self.kachingsound.play()
self.display_gold()
self.display_cash()
self.yeehawsound.play()
if self.pressedkey == K_DOWN and self.miner.can_move():
if ty<13:
ty += 1
o=self.board[ty][tx]
if (tx!=self.miner.x or ty!=self.miner.y) and o in ' G':
self.miner.set_location(tx,ty)
if o=='G':
self.board[ty][tx]=' '
self.gold += 1
self.bellsound.play()
self.bg.fill(self.BLACK,(self.miner.x*self.SQUARE_SIZE,self.miner.y*self.SQUARE_SIZE,self.SQUARE_SIZE,self.SQUARE_SIZE))
self.gamearea.blit(self.bg,(0,0))
self.display_gold()
self.miner.update_move()
self.miner_group.update(deltat)
self.miner_group.draw(self.gamearea)
if self.miner.y>0:
self.health-=0.25
if self.health<0:
self.health=0
pass
self.display_health()
else:
self.health+=1
if self.health>100:
self.health=100
self.display_health()
self.screen.blit(self.gamearea,(self.BOARD_LEFT,self.BOARD_TOP))
pygame.display.flip()
| mit | 4,087,435,778,622,428,000 | 32.52459 | 140 | 0.522836 | false | 3.374587 | false | false | false |
mpatacchiola/dissecting-reinforcement-learning | src/6/multi-armed-bandit/boltzman_agent_bandit.py | 1 | 4551 | #!/usr/bin/env python
# MIT License
# Copyright (c) 2017 Massimiliano Patacchiola
# https://mpatacchiola.github.io/blog/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#Average cumulated reward: 648.0975
#Std Cumulated Reward: 16.1566083616
#Average utility distribution: [ 0.29889418 0.49732589 0.79993241]
#Average utility RMSE: 0.0016711564118
from multi_armed_bandit import MultiArmedBandit
import numpy as np
import random
def return_rmse(predictions, targets):
"""Return the Root Mean Square error between two arrays
@param predictions an array of prediction values
@param targets an array of target values
@return the RMSE
"""
return np.sqrt(((predictions - targets)**2).mean())
def boltzmann(x, temperature):
"""Compute boltzmann distribution of array x.
@param x the input array
@param temperature
@return the boltzmann array
"""
exponent = np.true_divide(x - np.max(x), temperature)
return np.exp(exponent) / np.sum(np.exp(exponent))
def return_boltzmann_action(temperature, reward_counter_array):
"""Return an action using an epsilon greedy strategy
@return the action selected
"""
tot_arms = reward_counter_array.shape[0]
boltzmann_distribution = boltzmann(reward_counter_array, temperature)
return np.random.choice(tot_arms, p=boltzmann_distribution)
def main():
reward_distribution = [0.3, 0.5, 0.8]
my_bandit = MultiArmedBandit(reward_probability_list=reward_distribution)
temperature_start = 0.1
temperature_stop = 0.0001
epsilon = 0.1
tot_arms = 3
tot_episodes = 2000
tot_steps = 1000
print_every_episodes = 100
cumulated_reward_list = list()
average_utility_array = np.zeros(tot_arms)
temperature_array = np.linspace(temperature_start, temperature_stop, num=tot_steps)
print("Starting Boltzmann agent...")
for episode in range(tot_episodes):
cumulated_reward = 0
reward_counter_array = np.zeros(tot_arms)
action_counter_array = np.full(tot_arms, 1.0e-5)
for step in range(tot_steps):
temperature = temperature_array[step]
action = return_boltzmann_action(temperature, np.true_divide(reward_counter_array, action_counter_array))
reward = my_bandit.step(action)
reward_counter_array[action] += reward
action_counter_array[action] += 1
cumulated_reward += reward
# Append the cumulated reward for this episode in a list
cumulated_reward_list.append(cumulated_reward)
utility_array = np.true_divide(reward_counter_array, action_counter_array)
average_utility_array += utility_array
if episode % print_every_episodes == 0:
print("Episode: " + str(episode))
print("Cumulated Reward: " + str(cumulated_reward))
print("Reward counter: " + str(reward_counter_array))
print("Utility distribution: " + str(utility_array))
print("Utility RMSE: " + str(return_rmse(utility_array, reward_distribution)))
print("")
# Print the average cumulated reward for all the episodes
print("Average cumulated reward: " + str(np.mean(cumulated_reward_list)))
print("Std Cumulated Reward: " + str(np.std(cumulated_reward_list)))
print("Average utility distribution: " + str(average_utility_array / tot_episodes))
print("Average utility RMSE: " + str(return_rmse(average_utility_array/tot_episodes, reward_distribution)))
if __name__ == "__main__":
main()
| mit | -6,810,992,298,096,698,000 | 42.342857 | 117 | 0.702044 | false | 3.802005 | false | false | false |
oesteban/phantomas | phantomas/utils/shm.py | 1 | 6453 | """
This module contains an implementation of the real, antipodally symmetric
Spherical Harmonics basis as defined in [1]_.
References
----------
.. [1] Descoteaux, Maxime, Elaine Angelino, Shaun Fitzgibbons, and Rachid
Deriche. "Regularized, fast, and robust analytical Q-ball imaging"
Magnetic Resonance in Medicine 58, no. 3 (2007): 497-510
"""
import numpy as np
from scipy.misc import factorial
from scipy.special import lpmv, legendre, sph_harm
import hashlib
def angular_function(j, theta, phi):
"""
Returns the values of the spherical harmonics function at given
positions specified by colatitude and aximuthal angles.
Parameters
----------
j : int
The spherical harmonic index.
theta : array-like, shape (K, )
The colatitude angles.
phi : array-like, shape (K, )
The azimuth angles.
Returns
-------
f : array-like, shape (K, )
The value of the function at given positions.
"""
l = sh_degree(j)
m = sh_order(j)
# We follow here reverse convention about theta and phi w.r.t scipy.
sh = sph_harm(np.abs(m), l, phi, theta)
if m < 0:
return np.sqrt(2) * sh.real
if m == 0:
return sh.real
if m > 0:
return np.sqrt(2) * sh.imag
def spherical_function(j, x, y, z):
"""
Returns the values of the spherical harmonics function at given
positions specified by Cartesian coordinates.
Parameters
----------
x, y, z : array-like, shape (K, )
Cartesian coordinates.
Returns
-------
f : array-like, shape (K, )
The value of the function at given positions.
"""
theta = np.arccos(z)
phi = np.arctan2(y, x)
return angular_function(j, theta, phi)
def dimension(order):
r"""
Returns the dimension, :math:`R`, of the real, antipodally symmetric
spherical harmonics basis for a given truncation order.
Parameters
----------
order : int
The trunction order.
Returns
-------
R : int
The dimension of the truncated spherical harmonics basis.
"""
return (order + 1) * (order + 2) / 2
def j(l, m):
r"""
Returns the flattened spherical harmonics index corresponding to degree
``l`` and order ``m``.
Parameters
----------
l : int
Degree of the spherical harmonics. Should be even.
m : int
Order of the spherical harmonics, should verify :math:`-l \leq m \leq l`
Returns
-------
j : int
The associated index of the spherical harmonic.
"""
if np.abs(m) > l:
raise NameError('SphericalHarmonics.j: m must lie in [-l, l]')
return int(l + m + (2 * np.array(range(0, l, 2)) + 1).sum())
def sh_degree(j):
"""
Returns the degree, ``l``, of the spherical harmonic associated to index
``j``.
Parameters
----------
j : int
The flattened index of the spherical harmonic.
Returns
-------
l : int
The associated even degree.
"""
l = 0
while dimension(l) - 1 < j:
l += 2
return l
def sh_order(j):
"""
Returns the order, ``m``, of the spherical harmonic associated to index
``j``.
Parameters
----------
j : int
The flattened index of the spherical harmonic.
Returns
-------
m : int
The associated order.
"""
l = sh_degree(j)
return j + l + 1 - dimension(l)
class _CachedMatrix():
"""
Returns the spherical harmonics observation matrix.
Parameters
----------
theta : array-like, shape (K, )
The colatitude angles.
phi : array-like, shape (K, )
The azimuth angles.
order : int
The spherical harmonics truncation order.
cache : bool
Whether the result should be cached or not.
Returns
-------
H : array-like, shape (K, R)
The spherical harmonics observation matrix.
"""
def __init__(self):
self._cache = {}
def __call__(self, theta, phi, order=4, cache=True):
if not cache:
return self._eval_matrix(theta, phi, order)
key1 = self._hash(theta)
key2 = self._hash(phi)
if (key1, key2, order) in self._cache:
return self._cache[(key1, key2, order)]
else:
val = self._eval_matrix(theta, phi, order)
self._cache[(key1, key2, order)] = val
return val
def _hash(self, np_array):
return hashlib.sha1(np_array).hexdigest()
def _eval_matrix(self, theta, phi, order):
N = theta.shape[0]
dim_sh = dimension(order)
ls = [l for L in range(0, order + 1, 2) for l in [L] * (2*L + 1)]
ms = [m for L in range(0, order + 1, 2) for m in range(-L, L+1)]
ls = np.asarray(ls, dtype=np.int)[np.newaxis, :]
ms = np.asarray(ms, dtype=np.int)[np.newaxis, :]
sh = sph_harm(np.abs(ms), ls,
phi[:, np.newaxis], theta[:, np.newaxis])
H = np.where(ms > 0, sh.imag, sh.real)
H[:, (ms != 0)[0]] *= np.sqrt(2)
return H
matrix = _CachedMatrix()
def L(order=4):
"""Computees the Laplace-Beltrami operator matrix.
Parameters
----------
order : int
The truncation order (should be an even number).
"""
dim_sh = dimension(order)
L = np.zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = sh_degree(j)
L[j, j] = - (l * (l + 1))
return L
def P(order=4):
"""Returns the Funk-Radon operator matrix.
Parameters
----------
order : int
The truncation order (should be an even number).
"""
dim_sh = dimension(order)
P = zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = sh_degree(j)
P[j, j] = 2 * pi * legendre(l)(0)
return P
def convert_to_mrtrix(order):
"""
Returns the linear matrix used to convert coefficients into the mrtrix
convention for spherical harmonics.
Parameters
----------
order : int
Returns
-------
conversion_matrix : array-like, shape (dim_sh, dim_sh)
"""
dim_sh = dimension(order)
conversion_matrix = np.zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = sh_degree(j)
m = sh_order(j)
if m == 0:
conversion_matrix[j, j] = 1
else:
conversion_matrix[j, j - 2*m] = np.sqrt(2)
return conversion_matrix
| bsd-3-clause | 8,120,573,029,460,389,000 | 23.819231 | 80 | 0.561444 | false | 3.52623 | false | false | false |
sassoftware/mint | mint/db/mirror.py | 1 | 5677 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mint.lib import database
class InboundMirrorsTable(database.KeyedTable):
name = 'InboundMirrors'
key = 'inboundMirrorId'
fields = ['inboundMirrorId', 'targetProjectId', 'sourceLabels',
'sourceUrl', 'sourceAuthType', 'sourceUsername',
'sourcePassword', 'sourceEntitlement',
'mirrorOrder', 'allLabels']
def getIdByHostname(self, hostname):
cu = self.db.cursor()
cu.execute("""
SELECT MIN(inboundMirrorId) FROM InboundMirrors
JOIN Projects ON Projects.projectId = InboundMirrors.targetProjectId
WHERE Projects.fqdn = ?
""", hostname)
return cu.fetchone()[0]
class OutboundMirrorsTable(database.KeyedTable):
name = 'OutboundMirrors'
key = 'outboundMirrorId'
fields = ['outboundMirrorId', 'sourceProjectId', 'targetLabels',
'allLabels', 'recurse', 'matchStrings', 'mirrorOrder',
'useReleases',
]
def __init__(self, db, cfg):
self.cfg = cfg
database.KeyedTable.__init__(self, db)
def get(self, *args, **kwargs):
res = database.KeyedTable.get(self, *args, **kwargs)
if 'allLabels' in res:
res['allLabels'] = bool(res['allLabels'])
if 'recurse' in res:
res['recurse'] = bool(res['recurse'])
return res
def delete(self, id):
cu = self.db.transaction()
try:
cu.execute("""DELETE FROM OutboundMirrors WHERE
outboundMirrorId = ?""", id)
# Cleanup mapping table ourselves if we are using SQLite,
# as it doesn't know about contraints.
if self.cfg.dbDriver == 'sqlite':
cu.execute("""DELETE FROM OutboundMirrorsUpdateServices WHERE
outboundMirrorId = ?""", id)
except:
self.db.rollback()
raise
else:
self.db.commit()
return True
def getOutboundMirrors(self):
cu = self.db.cursor()
cu.execute("""SELECT outboundMirrorId, sourceProjectId,
targetLabels, allLabels, recurse,
matchStrings, mirrorOrder, fullSync,
useReleases
FROM OutboundMirrors
ORDER by mirrorOrder""")
return [list(x[:3]) + [bool(x[3]), bool(x[4]), x[5].split(), \
x[6], bool(x[7]), bool(x[8])] \
for x in cu.fetchall()]
class OutboundMirrorsUpdateServicesTable(database.DatabaseTable):
name = "OutboundMirrorsUpdateServices"
fields = [ 'updateServiceId', 'outboundMirrorId' ]
def getOutboundMirrorTargets(self, outboundMirrorId):
cu = self.db.cursor()
cu.execute("""SELECT obus.updateServiceId, us.hostname,
us.mirrorUser, us.mirrorPassword, us.description
FROM OutboundMirrorsUpdateServices obus
JOIN
UpdateServices us
USING(updateServiceId)
WHERE outboundMirrorId = ?""", outboundMirrorId)
return [ list(x[:4]) + [x[4] and x[4] or ''] \
for x in cu.fetchall() ]
def setTargets(self, outboundMirrorId, updateServiceIds):
cu = self.db.transaction()
updates = [ (outboundMirrorId, x) for x in updateServiceIds ]
try:
cu.execute("""DELETE FROM OutboundMirrorsUpdateServices
WHERE outboundMirrorId = ?""", outboundMirrorId)
except:
pass # don't worry if there is nothing to do here
try:
cu.executemany("INSERT INTO OutboundMirrorsUpdateServices VALUES(?,?)",
updates)
except:
self.db.rollback()
raise
else:
self.db.commit()
return updateServiceIds
class UpdateServicesTable(database.KeyedTable):
name = 'UpdateServices'
key = 'updateServiceId'
fields = [ 'updateServiceId', 'hostname',
'mirrorUser', 'mirrorPassword', 'description' ]
def __init__(self, db, cfg):
self.cfg = cfg
database.KeyedTable.__init__(self, db)
def getUpdateServiceList(self):
cu = self.db.cursor()
cu.execute("""SELECT %s FROM UpdateServices""" % ', '.join(self.fields))
return [ list(x) for x in cu.fetchall() ]
def delete(self, id):
cu = self.db.transaction()
try:
cu.execute("""DELETE FROM UpdateServices WHERE
updateServiceId = ?""", id)
# Cleanup mapping table ourselves if we are using SQLite,
# as it doesn't know about contraints.
if self.cfg.dbDriver == 'sqlite':
cu.execute("""DELETE FROM OutboundMirrorsUpdateServices WHERE
updateServiceId = ?""", id)
except:
self.db.rollback()
raise
else:
self.db.commit()
return True
| apache-2.0 | 475,335,617,005,576,450 | 34.93038 | 83 | 0.573014 | false | 4.31711 | false | false | false |
ikben/troposphere | examples/Route53_RoundRobin.py | 1 | 1994 | # Converted from Route53_RoundRobin.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import Join
from troposphere import Parameter, Ref, Template
from troposphere.route53 import RecordSet, RecordSetGroup
t = Template()
t.set_description(
"AWS CloudFormation Sample Template Route53_RoundRobin: Sample template "
"showing how to use weighted round robin (WRR) DNS entried via Amazon "
"Route 53. This contrived sample uses weighted CNAME records to "
"illustrate that the weighting influences the return records. It assumes "
" that you already have a Hosted Zone registered with Amazon Route 53. "
"**WARNING** This template creates an Amazon EC2 instance. "
"You will be billed for the AWS resources used if you create "
"a stack from this template.")
hostedzone = t.add_parameter(Parameter(
"HostedZone",
Description="The DNS name of an existing Amazon Route 53 hosted zone",
Type="String",
))
myDNSRecord = t.add_resource(RecordSetGroup(
"myDNSRecord",
HostedZoneName=Join("", [Ref(hostedzone), "."]),
Comment="Contrived example to redirect to aws.amazon.com 75% of the time "
"and www.amazon.com 25% of the time.",
RecordSets=[
RecordSet(
SetIdentifier=Join(" ", [Ref("AWS::StackName"), "AWS"]),
Name=Join("", [Ref("AWS::StackName"), ".", Ref("AWS::Region"), ".",
Ref(hostedzone), "."]),
Type="CNAME",
TTL="900",
ResourceRecords=["aws.amazon.com"],
Weight="3",
),
RecordSet(
SetIdentifier=Join(" ", [Ref("AWS::StackName"), "Amazon"]),
Name=Join("", [Ref("AWS::StackName"), ".", Ref("AWS::Region"), ".",
Ref(hostedzone), "."]),
Type="CNAME",
TTL="900",
ResourceRecords=["www.amazon.com"],
Weight="1",
),
],
))
print(t.to_json())
| bsd-2-clause | -3,320,936,269,870,272,000 | 35.254545 | 79 | 0.61334 | false | 3.894531 | false | false | false |
cloudtools/troposphere | troposphere/applicationautoscaling.py | 1 | 3322 | from . import AWSObject, AWSProperty
from .validators import boolean, double, integer, positive_integer
class ScalableTargetAction(AWSProperty):
props = {
"MaxCapacity": (integer, False),
"MinCapacity": (integer, False),
}
class ScheduledAction(AWSProperty):
props = {
"EndTime": (str, False),
"ScalableTargetAction": (ScalableTargetAction, False),
"Schedule": (str, True),
"ScheduledActionName": (str, True),
"StartTime": (str, False),
"Timezone": (str, False),
}
class SuspendedState(AWSProperty):
props = {
"DynamicScalingInSuspended": (boolean, False),
"DynamicScalingOutSuspended": (boolean, False),
"ScheduledScalingSuspended": (boolean, False),
}
class ScalableTarget(AWSObject):
resource_type = "AWS::ApplicationAutoScaling::ScalableTarget"
props = {
"MaxCapacity": (integer, True),
"MinCapacity": (integer, True),
"ResourceId": (str, True),
"RoleARN": (str, True),
"ScalableDimension": (str, True),
"ScheduledActions": ([ScheduledAction], False),
"ServiceNamespace": (str, True),
"SuspendedState": (SuspendedState, False),
}
class StepAdjustment(AWSProperty):
props = {
"MetricIntervalLowerBound": (integer, False),
"MetricIntervalUpperBound": (integer, False),
"ScalingAdjustment": (integer, True),
}
class StepScalingPolicyConfiguration(AWSProperty):
props = {
"AdjustmentType": (str, False),
"Cooldown": (integer, False),
"MetricAggregationType": (str, False),
"MinAdjustmentMagnitude": (integer, False),
"StepAdjustments": ([StepAdjustment], False),
}
class MetricDimension(AWSProperty):
props = {
"Name": (str, True),
"Value": (str, True),
}
class CustomizedMetricSpecification(AWSProperty):
props = {
"Dimensions": ([MetricDimension], False),
"MetricName": (str, False),
"Namespace": (str, False),
"Statistic": (str, False),
"Unit": (str, True),
}
class PredefinedMetricSpecification(AWSProperty):
props = {
"PredefinedMetricType": (str, True),
"ResourceLabel": (str, False),
}
class TargetTrackingScalingPolicyConfiguration(AWSProperty):
props = {
"CustomizedMetricSpecification": (CustomizedMetricSpecification, False),
"DisableScaleIn": (boolean, False),
"PredefinedMetricSpecification": (PredefinedMetricSpecification, False),
"ScaleInCooldown": (positive_integer, False),
"ScaleOutCooldown": (positive_integer, False),
"TargetValue": (double, True),
}
class ScalingPolicy(AWSObject):
resource_type = "AWS::ApplicationAutoScaling::ScalingPolicy"
props = {
"PolicyName": (str, True),
"PolicyType": (str, False),
"ResourceId": (str, False),
"ScalableDimension": (str, False),
"ServiceNamespace": (str, False),
"ScalingTargetId": (str, False),
"StepScalingPolicyConfiguration": (
StepScalingPolicyConfiguration,
False,
),
"TargetTrackingScalingPolicyConfiguration": (
TargetTrackingScalingPolicyConfiguration,
False,
),
}
| bsd-2-clause | -1,866,505,902,557,993,200 | 27.393162 | 80 | 0.617399 | false | 4.111386 | true | false | false |
sportorg/pysport | sportorg/modules/winorient/winorient_server.py | 1 | 2292 | import datetime
from socket import *
from sportorg.utils.time import time_to_hhmmss
"""
Format of WDB data package
- length is 1772 bytes
1) 36b text block at the beginning
2 4132500 0 0 3974600\n
bib - finish_time - disqual_status - 0 - start_time
2) binary part
bytes 128-131 - card number
bytes 136-139 - qty of punches
bytes 144-147 - start in card
bytes 152-155 - finish in card
starting from b172: 8b blocks * 200
- byte 1 control number
- bytes 4-7 punch time
"""
def int_to_time(value):
""" convert value from 1/100 s to time """
today = datetime.datetime.now()
ret = datetime.datetime(
today.year,
today.month,
today.day,
value // 360000 % 24,
(value % 360000) // 6000,
(value % 6000) // 100,
(value % 100) * 10000,
)
return ret
host = 'localhost'
port = 1212
addr = (host, port)
udp_socket = socket(AF_INET, SOCK_DGRAM)
udp_socket.bind(addr)
# main loop
while True:
print('wait data...')
# recvfrom - receiving of data
conn, addr = udp_socket.recvfrom(1772)
print('client addr: ', addr)
print('data: ', conn)
# string = ''
# for i in conn:
# string += str( hex(i)) + '-'
# print(string)
text_array = bytes(conn[0:34]).decode().split()
bib = text_array[0]
result = int_to_time(int(text_array[1]))
status = text_array[2]
start = int_to_time(int(text_array[4]))
byteorder = 'little'
punch_qty = int.from_bytes(conn[136:140], byteorder)
card_start = int_to_time(int.from_bytes(conn[144:148], byteorder))
card_finish = int_to_time(int.from_bytes(conn[152:156], byteorder))
init_offset = 172
punches = []
for i in range(punch_qty):
cp = int.from_bytes(
conn[init_offset + i * 8 : init_offset + i * 8 + 1], byteorder
)
time = int_to_time(
int.from_bytes(
conn[init_offset + i * 8 + 4 : init_offset + i * 8 + 8], byteorder
)
)
punches.append((cp, time_to_hhmmss(time)))
print('bib=' + bib + ' result=' + time_to_hhmmss(result) + ' punches=')
print(punches)
# sendto - responce
udp_socket.sendto(b'message received by the server', addr)
# udp_socket.close()
| gpl-3.0 | -1,973,854,375,864,043,800 | 23.126316 | 82 | 0.585951 | false | 3.187761 | false | false | false |
googleapis/python-compute | google/cloud/compute_v1/services/target_pools/pagers.py | 1 | 5656 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetPoolAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetPoolAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetPoolAggregatedList],
request: compute.AggregatedListTargetPoolsRequest,
response: compute.TargetPoolAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListTargetPoolsRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetPoolAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListTargetPoolsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetPoolAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.TargetPoolsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.TargetPoolsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetPoolList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetPoolList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetPoolList],
request: compute.ListTargetPoolsRequest,
response: compute.TargetPoolList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListTargetPoolsRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetPoolList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListTargetPoolsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetPoolList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.TargetPool]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | -9,127,447,600,097,576,000 | 35.727273 | 85 | 0.650813 | false | 4.3811 | false | false | false |
keyme/visual-diff | gui.py | 1 | 4290 | #!/usr/bin/python3
from functools import partial
from math import ceil
import tkinter as tk
from zoom_map import ZoomMap
class _Context(tk.Text):
CONTEXT_COUNT = 3 # Lines to display before/after the current one
# TODO: What about files with over 99,999 lines?
LINE_NUMBER_WIDTH = 5 # Number of characters to allocate for line numbers
PRELUDE_WIDTH = LINE_NUMBER_WIDTH + 2 # Line number, colon, space
# TODO: What about files with very long lines? They currently wrap around to
# the next line and push later context out of the widget. Should we truncate
# them instead? and if so, should we change which part gets cut based on the
# location of the token within the line?
TEXT_WIDTH = 80
def __init__(self, tk_parent, data, zoom_map):
height = 2 * self.CONTEXT_COUNT + 1
width = self.PRELUDE_WIDTH + self.TEXT_WIDTH
super().__init__(tk_parent, width=width, height=height,
state=tk.DISABLED, font="TkFixedFont")
self.pack()
# TODO: Use a NamedTuple?
self._tokens, self._lines, self._boundaries = data
self._zoom_map = zoom_map
def display(self, pixel):
# The zoom level is equivalent to the number of tokens described by the
# current pixel in the map.
zoom_level = self._zoom_map.zoom_level
first_token_index = int(pixel * zoom_level)
last_token_index = min(first_token_index + ceil(zoom_level),
len(self._boundaries)) - 1
if not (0 <= first_token_index < len(self._boundaries)):
# TODO: Restrict panning so that we can't go outside the image.
return # We're out of range of the image. Skip it.
line_number = self._boundaries[first_token_index][0][0]
# Recall that line_number comes from the token module, which starts
# counting at 1 instead of 0.
start = line_number - self.CONTEXT_COUNT - 1
end = line_number + self.CONTEXT_COUNT
lines = ["{:>{}}: {}".format(i + 1, self.LINE_NUMBER_WIDTH,
self._lines[i])
if 0 <= i < len(self._lines) else ""
for i in range(start, end)]
text = "\n".join(lines)
# Update the displayed code
self.configure(state=tk.NORMAL)
self.delete("1.0", tk.END)
self.insert(tk.INSERT, text)
# Highlight the tokens of interest...
(ar, ac) = self._boundaries[first_token_index][0]
(br, bc) = self._boundaries[last_token_index][1]
self.tag_add("token",
"{}.{}".format(self.CONTEXT_COUNT + 1,
ac + self.PRELUDE_WIDTH),
"{}.{}".format(self.CONTEXT_COUNT + 1 + br - ar,
bc + self.PRELUDE_WIDTH))
self.tag_config("token", background="yellow")
# ...but don't highlight the line numbers on multi-line tokens.
for i in range(self.CONTEXT_COUNT):
line = i + self.CONTEXT_COUNT + 2
self.tag_remove("token",
"{}.{}".format(line, 0),
"{}.{}".format(line, self.PRELUDE_WIDTH))
# Remember to disable editing again when we're done, so users can't
# modify the code we're displaying!
self.configure(state=tk.DISABLED)
class _Gui(tk.Frame):
def __init__(self, matrix, data_a, data_b, root):
super().__init__(root)
self.pack(fill=tk.BOTH, expand="true")
self._map = ZoomMap(self, matrix)
self._contexts = [_Context(self, data, self._map)
for data in (data_a, data_b)]
[self._map.bind(event, self._on_motion)
for event in ["<Motion>", "<Enter>"]]
def _on_motion(self, event):
# We're using (row, col) format, so the first one changes with Y.
self._contexts[0].display(self._map.canvasy(event.y))
self._contexts[1].display(self._map.canvasx(event.x))
def launch(matrix, data_a, data_b):
root = tk.Tk()
def _quit(event):
root.destroy()
[root.bind("<Control-{}>".format(char), _quit) for char in "wWqQ"]
gui = _Gui(matrix, data_a, data_b, root)
root.mainloop()
| gpl-3.0 | -6,514,295,874,511,223,000 | 39.471698 | 80 | 0.571329 | false | 3.816726 | false | false | false |
salexkidd/restframework-definable-serializer | definable_serializer/tests/for_test/migrations/0001_initial.py | 1 | 1872 | # Generated by Django 2.0 on 2017-12-04 00:09
import definable_serializer.models.compat
import definable_serializer.models.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', definable_serializer.models.compat.YAMLField()),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('definition', definable_serializer.models.fields.DefinableSerializerByYAMLField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='answer',
name='paper',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='for_test.Paper'),
),
migrations.AddField(
model_name='answer',
name='respondent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='answer',
unique_together={('paper', 'respondent')},
),
]
| mit | 4,701,542,029,549,797,000 | 33.036364 | 114 | 0.573184 | false | 4.425532 | false | false | false |
kamar42/merdeka-backend | merdeka/apps/mdk/views.py | 1 | 2179 | from django.shortcuts import render
from merdeka.apps.utils.func import find_model, make_response, json_response, jsonify, set_data, set_status, set_message, set_child
from .models import GoodsChilds
def api_view(request, **kwargs):
resp = make_response()
m = kwargs.get('model', None)
# drop if model is empty
if m is None:
set_status(resp, 'failed')
set_message(resp, 'Model Not Found')
return json_response(resp)
if '_' in m:
_model = ''
for _m in m.split('_'):
_model += _m[:1].upper() + _m[1:].lower()
else:
_model = m[:1].upper() + m[1:].lower()
model = find_model('mdk', _model)
# drop if model was not Found
if model is None:
set_status(resp, 'failed')
set_message(resp, 'Model Not Found')
return json_response(resp)
q = request.GET.get('slug', None)
records = model.objects.all()
if q:
records = model.objects.filter(unique_name=q)
# filtering goods and goodschild
if _model == 'Goods':
g = request.GET.get('goods', None)
if g:
records = model.objects.filter(commodity_id=g)
# c = GoodsChilds.objects.filter(goods=records)
# set_child(resp, [dict(
# id=_c.pk,
# name=_c.name,
# slug=_c.unique_name
# ) for _c in c])
elif _model == 'GoodsChilds':
g = request.GET.get('goods', None)
if g:
records = model.objects.filter(goods_id=g)
set_message(resp, 'We found '+str(records.count())+' records.')
if _model == 'Data':
set_data(resp, [dict(
id=r.pk,
commodity=r.commodity.name,
goods=r.goods.name,
goods_child=r.goods_child.name,
price=str(r.price),
unit=r.unit.name,
venue=r.venue.name,
province=r.province.name,
city=r.city.name
) for r in records])
else:
set_data(resp, [dict(
id=r.pk,
name=r.name,
slug=r.unique_name
) for r in records])
return json_response(resp)
| mit | 2,338,286,988,612,638,700 | 29.690141 | 131 | 0.535108 | false | 3.404688 | false | false | false |
DigitalCampus/django-oppia | tests/profile/models/test_models.py | 1 | 9519 | from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from oppia.test import OppiaTestCase
from profile.models import UserProfile, CustomField, UserProfileCustomField
class ProfileCustomFieldsTest(OppiaTestCase):
fixtures = ['tests/test_user.json',
'tests/test_oppia.json',
'tests/test_quiz.json',
'tests/test_course_permissions.json']
VALUE_STR_DEFAULT = "my string"
def test_custom_field_model_name(self):
custom_field = CustomField(
id='my_cf_key',
label='String',
required=True,
type='str')
custom_field.save()
self.assertEqual(str(custom_field), 'my_cf_key')
def test_teacher_only(self):
user = self.normal_user
self.assertFalse(user.userprofile.is_teacher_only())
'''
Upload permissions
'''
def test_get_can_upload_admin(self):
profile = UserProfile.objects.get(user=self.admin_user)
self.assertEqual(profile.get_can_upload(), True)
def test_get_can_upload_staff(self):
profile = UserProfile.objects.get(user=self.staff_user)
self.assertEqual(profile.get_can_upload(), True)
def test_get_can_upload_teacher(self):
profile = UserProfile.objects.get(user=self.teacher_user)
self.assertEqual(profile.get_can_upload(), True)
def test_get_can_upload_user(self):
profile = UserProfile.objects.get(user=self.normal_user)
self.assertEqual(profile.get_can_upload(), False)
def test_get_can_upload_activity_log_admin(self):
profile = UserProfile.objects.get(user=self.admin_user)
self.assertEqual(profile.get_can_upload_activitylog(), True)
def test_get_can_upload_activity_log_staff(self):
profile = UserProfile.objects.get(user=self.staff_user)
self.assertEqual(profile.get_can_upload_activitylog(), True)
def test_get_can_upload_activity_log_teacher(self):
profile = UserProfile.objects.get(user=self.teacher_user)
self.assertEqual(profile.get_can_upload_activitylog(), False)
def test_get_can_upload_activity_log_user(self):
profile = UserProfile.objects.get(user=self.normal_user)
self.assertEqual(profile.get_can_upload_activitylog(), False)
'''
Custom fields
'''
def test_user_custom_field_model_name(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=self.VALUE_STR_DEFAULT)
upcf.save()
self.assertEqual('str: demo', str(upcf))
# test get_value string
def test_custom_field_get_value_str(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=self.VALUE_STR_DEFAULT)
upcf.save()
self.assertEqual(upcf.get_value(), self.VALUE_STR_DEFAULT)
self.assertNotEqual(upcf.get_value(), True)
self.assertNotEqual(upcf.get_value(), False)
self.assertNotEqual(upcf.get_value(), None)
self.assertNotEqual(upcf.get_value(), 123)
# test get_value int
def test_custom_field_get_value_int(self):
custom_field = CustomField(
id='int',
label='Integer',
required=True,
type='int')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_int=123)
upcf.save()
self.assertEqual(upcf.get_value(), 123)
self.assertNotEqual(upcf.get_value(), "123")
self.assertNotEqual(upcf.get_value(), True)
self.assertNotEqual(upcf.get_value(), False)
self.assertNotEqual(upcf.get_value(), None)
# get get value bool
def test_custom_field_get_value_bool(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=True)
upcf.save()
self.assertEqual(upcf.get_value(), True)
self.assertNotEqual(upcf.get_value(), "True")
self.assertNotEqual(upcf.get_value(), 123)
self.assertNotEqual(upcf.get_value(), False)
self.assertNotEqual(upcf.get_value(), None)
# test multiple rows in userprofilecustomfield
def test_custom_field_multiple_rows(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=self.VALUE_STR_DEFAULT)
upcf.save()
with self.assertRaises(IntegrityError):
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str="my other string")
upcf.save()
def test_wrong_type_bool_in_int(self):
custom_field = CustomField(
id='int',
label='Integer',
required=True,
type='int')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_int=True)
upcf.save()
self.assertEqual(True, upcf.get_value())
upcf.value_int = False
upcf.save()
self.assertEqual(False, upcf.get_value())
def test_wrong_type_bool_in_str(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=True)
upcf.save()
self.assertEqual(True, upcf.get_value())
upcf.value_str = False
upcf.save()
self.assertEqual(False, upcf.get_value())
def test_wrong_type_int_in_bool_123(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
with self.assertRaises(ValidationError):
UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=123).save()
def test_wrong_type_int_in_bool_0(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=0)
upcf.save()
self.assertEqual(0, upcf.get_value())
def test_wrong_type_int_in_bool_1(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=1)
upcf.save()
self.assertEqual(1, upcf.get_value())
def test_wrong_type_int_in_str(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=123)
upcf.save()
self.assertEqual(123, upcf.get_value())
def test_wrong_type_str_in_bool(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
with self.assertRaises(ValidationError):
UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=self.VALUE_STR_DEFAULT).save()
def test_wrong_type_str_in_int(self):
custom_field = CustomField(
id='int',
label='Integer',
required=True,
type='int')
custom_field.save()
with self.assertRaises(ValueError):
UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_int=self.VALUE_STR_DEFAULT).save()
| gpl-3.0 | 2,434,349,495,123,271,000 | 35.193916 | 76 | 0.542809 | false | 4.19709 | true | false | false |
matrix-org/synapse | synapse/config/voip.py | 1 | 2161 | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class VoipConfig(Config):
section = "voip"
def read_config(self, config, **kwargs):
self.turn_uris = config.get("turn_uris", [])
self.turn_shared_secret = config.get("turn_shared_secret")
self.turn_username = config.get("turn_username")
self.turn_password = config.get("turn_password")
self.turn_user_lifetime = self.parse_duration(
config.get("turn_user_lifetime", "1h")
)
self.turn_allow_guests = config.get("turn_allow_guests", True)
def generate_config_section(self, **kwargs):
return """\
## TURN ##
# The public URIs of the TURN server to give to clients
#
#turn_uris: []
# The shared secret used to compute passwords for the TURN server
#
#turn_shared_secret: "YOUR_SHARED_SECRET"
# The Username and password if the TURN server needs them and
# does not use a token
#
#turn_username: "TURNSERVER_USERNAME"
#turn_password: "TURNSERVER_PASSWORD"
# How long generated TURN credentials last
#
#turn_user_lifetime: 1h
# Whether guests should be allowed to use the TURN server.
# This defaults to True, otherwise VoIP will be unreliable for guests.
# However, it does introduce a slight security risk as it allows users to
# connect to arbitrary endpoints without having first signed up for a
# valid account (e.g. by passing a CAPTCHA).
#
#turn_allow_guests: true
"""
| apache-2.0 | -7,243,922,222,192,386,000 | 35.016667 | 81 | 0.652938 | false | 4.001852 | true | false | false |
cognitivefashion/cf-sdk-python | dominant_colors_product.py | 1 | 1998 | #------------------------------------------------------------------------------
# Get the dominant colors for an image in the catalog.
# GET /v1/catalog/{catalog_name}/dominant_colors/{id}/{image_id}
#------------------------------------------------------------------------------
import os
import json
import requests
from urlparse import urljoin
from pprint import pprint
from props import *
# Replace this with the custom url generated for you.
api_gateway_url = props['api_gateway_url']
# Pass the api key into the header.
headers = {'X-Api-Key': props['X-Api-Key']}
# Query parameters.
params = {}
# Optional parameters.
#params['fraction_pixels_threshold'] = 0.1
# Path parameters
catalog_name = props['catalog_name']
id ='SHRES16AWFSDR9346B'
image_id = '1'
api_endpoint = '/v1/catalog/%s/dominant_colors/%s/%s'%(catalog_name,id,image_id)
url = urljoin(api_gateway_url,api_endpoint)
response = requests.get(url,
headers=headers,
params=params)
print response.status_code
pprint(response.json())
# Human friendly repsonse.
results = response.json()
print('[image url ] %s'%(results['image_url']))
image_location = '%s?api_key=%s'%(urljoin(api_gateway_url,results['image_location']),
props['X-Api-Key'])
print('[original image ] %s'%(image_location))
image_location = '%s&api_key=%s'%(urljoin(api_gateway_url,results['bounding_box']['image_location']),
props['X-Api-Key'])
print('[bounding box ] %s'%(image_location))
for color_info in results['dominant_colors']:
print('[dominant colors] %s - %1.2f - %s - %s - %s - %s'%(color_info['hex'],
color_info['fraction_pixels'],
color_info['name'],
color_info['entrylevel_name'],
color_info['universal_name'],
color_info['pantone_id']))
| apache-2.0 | -561,431,765,489,356,350 | 31.754098 | 101 | 0.544044 | false | 3.842308 | false | false | false |
esthermm/enco | enco_category/models/purchase_report.py | 1 | 4459 | # -*- coding: utf-8 -*-
# © 2017 Esther Martín - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, models
from openerp import tools
class PurchaseReport(models.Model):
_inherit = 'purchase.report'
purchase_categ_id = fields.Many2one(
comodel_name='crm.case.categ', readonly=True)
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
s.period_ack as period_ack,
s.purchase_categ_id as purchase_categ_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor,
s.purchase_categ_id,
s.period_ack
)
""")
| gpl-3.0 | 3,419,847,582,266,955,300 | 46.414894 | 163 | 0.475656 | false | 3.845557 | false | false | false |
oubiwann/myriad-worlds | myriad/story.py | 1 | 3923 | import yaml
from myriad.character import Player
from myriad.world import Map, World
from myriad.item import Item, OpenableItem, OpenableReadableItem, ReadableItem
# XXX maybe the story object should have a map attribute assigned based on
# story type... e.g., provided ASCII map, procedurally generated map, etc.
class Story(object):
def __init__(self, filename):
self.storyFile = filename
self.stream = open(self.storyFile)
self.data = yaml.load(self.stream)
# XXX map should be an attribute of the world
self.map = Map(self.data.get("map"))
self.world = World()
self.world.setScapes(self.map.getScapes())
# XXX what form do these take when the map is procedural?
self.createItems()
self.updateScapes()
self.createCharacters()
def _getItem(self, itemName):
for item in self.data.get("items"):
if item.get("name") == itemName:
return item
def getMap(self):
return self.map.getData()
def createItems(self):
itemsData = self.data.get("items")
if not itemsData:
return
for itemData in itemsData:
self.createItem(itemData)
def updateScapes(self):
scapesData = self.data.get("scapes")
if not scapesData:
return
for scapeData in scapesData:
scape = self.world.scapes.get(scapeData.get("room-key"))
startingPlace = scapeData.get("startingPlace")
if startingPlace:
scape.startingPlace = True
self.setStartingPlace(scape)
scape.name = scapeData.get("name")
self.world.scapes[scape.name] = scape
scape.desc = scapeData.get("description")
scape.gameOver = scapeData.get("gameOver")
itemsList = scapeData.get("items")
if not itemsList:
continue
for itemName in itemsList:
self.processItem(itemName, scape)
def createItem(self, itemData):
items = []
if itemData.has_key("items"):
itemNames = itemData.pop("items")
items = [Item.items[x] for x in itemNames]
if itemData.get("isOpenable") and itemData.get("isReadable"):
itemData.pop("isReadable")
item = OpenableReadableItem(itemData.get("name"), items)
elif itemData.get("isOpenable"):
item = OpenableItem(itemData.get("name"), items)
elif itemData.get("isReadable"):
itemData.pop("isReadable")
item = ReadableItem(**itemData)
else:
item = Item(**itemData)
return item
def processItem(self, itemName, scape):
# XXX I don't like the way that all items are tracked on the Item
# object... it doesn't make sense that every item in the world would
# know about all other items in the world. Once that's fixed, we just
# use the scape object's addItem method
self.world.putItemInScape(itemName, scape)
def setStartingPlace(self, tile):
self.map.setStartingPlace(tile)
def getStartingPlace(self):
return self.map.getStartingPlace()
def createCharacters(self):
charactersData = self.data.get("characters")
if not charactersData:
return
for characterData in charactersData:
if characterData.get("isPlayer") == True:
player = Player(characterData.get("name"))
for itemName in characterData.get("inventory"):
player.take(Item.items[itemName])
self.world.placeCharacterInScape(
player, self.getStartingPlace(), isPlayer=True)
def createLayers(self):
layersData = self.data.get("layers")
if not layersData:
return
for layerData in layersData:
pass
| mit | -3,348,018,576,291,918,000 | 35.663551 | 78 | 0.604384 | false | 4.103556 | false | false | false |
xflows/clowdflows-backend | workflows/api/serializers.py | 1 | 14887 | import json
from django.contrib.auth.models import User
from django.db.models import Prefetch
from django.template.loader import render_to_string
from rest_framework import serializers
from rest_framework.reverse import reverse
from mothra.settings import STATIC_URL, MEDIA_URL
from streams.models import Stream
from workflows.models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('username',)
class AbstractOptionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = AbstractOption
fields = ('name', 'value')
read_only_fields = ('name', 'value')
class AbstractInputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
options = AbstractOptionSerializer(many=True, read_only=True)
class Meta:
model = AbstractInput
fields = (
'id', 'name', 'short_name', 'description', 'variable', 'required', 'parameter', 'multi', 'default',
'parameter_type',
'order', 'options')
read_only_fields = (
'id', 'name', 'short_name', 'description', 'variable', 'required', 'parameter', 'multi', 'default',
'parameter_type',
'order', 'options')
class AbstractOutputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
class Meta:
model = AbstractOutput
fields = ('id', 'name', 'short_name', 'description', 'variable', 'order')
read_only_fields = ('id', 'name', 'short_name', 'description', 'variable', 'order')
class AbstractWidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
inputs = AbstractInputSerializer(many=True, read_only=True)
outputs = AbstractOutputSerializer(many=True, read_only=True)
cfpackage = serializers.SerializerMethodField()
visualization = serializers.SerializerMethodField()
def get_cfpackage(self, obj):
return obj.package
def get_visualization(self, obj):
return obj.visualization_view != ''
class Meta:
model = AbstractWidget
fields = ('id', 'name', 'interactive', 'visualization', 'static_image', 'order', 'outputs', 'inputs', 'cfpackage', 'description', 'always_save_results')
read_only_fields = ('id', 'name', 'interactive', 'visualization', 'static_image', 'order', 'outputs', 'inputs', 'cfpackage', 'always_save_results')
class CategorySerializer(serializers.HyperlinkedModelSerializer):
widgets = AbstractWidgetSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = ('name', 'user', 'order', 'children', 'widgets')
read_only_fields = ('name', 'user', 'order', 'children', 'widgets')
CategorySerializer._declared_fields['children'] = CategorySerializer(many=True, read_only=True)
class ConnectionSerializer(serializers.HyperlinkedModelSerializer):
output_widget = serializers.SerializerMethodField()
input_widget = serializers.SerializerMethodField()
def get_output_widget(self, obj):
request = self.context['request']
return request.build_absolute_uri(reverse('widget-detail', kwargs={'pk': obj.output.widget_id}))
# return WidgetListSerializer(obj.output.widget, context=self.context).data["url"]
def get_input_widget(self, obj):
request = self.context['request']
return request.build_absolute_uri(reverse('widget-detail', kwargs={'pk': obj.input.widget_id}))
# return WidgetListSerializer(obj.input.widget, context=self.context).data["url"]
class Meta:
model = Connection
class OptionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Option
fields = ('name', 'value')
class InputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
deserialized_value = serializers.SerializerMethodField()
options = OptionSerializer(many=True, read_only=True)
abstract_input_id = serializers.SerializerMethodField()
def get_deserialized_value(self, obj):
if obj.parameter:
try:
json.dumps(obj.value)
except:
return repr(obj.value)
else:
return obj.value
else:
return ''
def get_abstract_input_id(self, obj):
return obj.abstract_input_id
class Meta:
model = Input
exclude = ('value', 'abstract_input')
read_only_fields = ('id', 'url', 'widget')
class OutputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
abstract_output_id = serializers.SerializerMethodField()
def get_abstract_output_id(self, obj):
return obj.abstract_output_id
class Meta:
model = Output
exclude = ('value', 'abstract_output')
read_only_fields = ('id', 'url', 'widget')
def get_workflow_preview(request, obj):
min_x = 10000
min_y = 10000
max_x = 0
max_y = 0
max_width = 300
max_height = 200
normalized_values = {}
obj.normalized_widgets = obj.widgets.all()
obj.unique_connections = []
obj.pairs = []
for widget in obj.normalized_widgets:
if widget.x > max_x:
max_x = widget.x
if widget.x < min_x:
min_x = widget.x
if widget.y > max_y:
max_y = widget.y
if widget.y < min_y:
min_y = widget.y
for widget in obj.normalized_widgets:
x = (widget.x - min_x) * 1.0
y = (widget.y - min_y) * 1.0
normalized_max_x = max_x - min_x
if x == 0:
x = 1
if y == 0:
y = 1
if normalized_max_x == 0:
normalized_max_x = x * 2
normalized_max_y = max_y - min_y
if normalized_max_y == 0:
normalized_max_y = y * 2
widget.norm_x = (x / normalized_max_x) * max_width
widget.norm_y = (y / normalized_max_y) * max_height
normalized_values[widget.id] = (widget.norm_x, widget.norm_y)
for c in obj.connections.all():
if not (c.output.widget_id, c.input.widget_id) in obj.pairs:
obj.pairs.append((c.output.widget_id, c.input.widget_id))
for pair in obj.pairs:
conn = {}
conn['x1'] = normalized_values[pair[0]][0] + 40
conn['y1'] = normalized_values[pair[0]][1] + 15
conn['x2'] = normalized_values[pair[1]][0] - 10
conn['y2'] = normalized_values[pair[1]][1] + 15
obj.unique_connections.append(conn)
base_url = request.build_absolute_uri('/')[:-1]
images_url = '{}{}'.format(base_url, STATIC_URL)
preview_html = render_to_string('preview.html', {'w': obj, 'images_url': images_url})
return preview_html
class StreamSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
last_executed = serializers.DateTimeField(read_only=True)
period = serializers.IntegerField()
active = serializers.BooleanField(read_only=True)
class Meta:
model = Stream
class WorkflowListSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
user = UserSerializer(read_only=True)
stream = StreamSerializer()
is_subprocess = serializers.SerializerMethodField()
is_public = serializers.BooleanField(source='public')
can_be_streaming = serializers.SerializerMethodField()
def get_is_subprocess(self, obj):
if obj.widget == None:
return False
else:
return True
def get_can_be_streaming(self, obj):
return obj.can_be_streaming()
def get_stream_active(self, obj):
return None
class Meta:
model = Workflow
exclude = ('public',)
class WorkflowPreviewSerializer(WorkflowListSerializer):
preview = serializers.SerializerMethodField()
def get_preview(self, obj):
return get_workflow_preview(self.context['request'], obj)
class WidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
inputs = InputSerializer(many=True, read_only=True)
outputs = OutputSerializer(many=True, read_only=True)
description = serializers.CharField(source='abstract_widget.description', read_only=True)
icon = serializers.SerializerMethodField()
must_save = serializers.SerializerMethodField()
can_interact = serializers.SerializerMethodField()
workflow_link = serializers.HyperlinkedRelatedField(
read_only=True,
view_name='workflow-detail'
)
abstract_widget = serializers.PrimaryKeyRelatedField(queryset=AbstractWidget.objects.all(), allow_null=True)
def create(self, validated_data):
'''
Overrides the default create method to support nested creates
'''
w = Widget.objects.create(**validated_data)
aw = w.abstract_widget
input_order, param_order = 0, 0
for i in aw.inputs.all():
j = Input()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.required = i.required
j.parameter = i.parameter
j.value = None
j.abstract_input = i
if (i.parameter):
param_order += 1
j.order = param_order
else:
input_order += 1
j.order = input_order
if not i.multi:
j.value = i.default
j.parameter_type = i.parameter_type
if i.multi:
j.multi_id = i.id
j.save()
for k in i.options.all():
o = Option()
o.name = k.name
o.value = k.value
o.input = j
o.save()
outputOrder = 0
for i in aw.outputs.all():
j = Output()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.abstract_output = i
outputOrder += 1
j.order = outputOrder
j.save()
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return w
def update(self, widget, validated_data):
'''
Overrides the default update method to support nested creates
'''
# Ignore inputs and outputs on patch - we allow only nested creates
if 'inputs' in validated_data:
validated_data.pop('inputs')
if 'outputs' in validated_data:
validated_data.pop('outputs')
widget, _ = Widget.objects.update_or_create(pk=widget.pk, defaults=validated_data)
if widget.type == 'subprocess':
widget.workflow_link.name = widget.name
widget.workflow_link.save()
return widget
def get_must_save(self, widget):
'''
Some widget always require their inputs and outputs to be saved.
'''
must_save = False
if widget.abstract_widget:
must_save = widget.abstract_widget.interactive or widget.is_visualization() or widget.abstract_widget.always_save_results
return must_save
def get_can_interact(self, widget):
can_interact = False
if widget.abstract_widget:
can_interact = widget.abstract_widget.interactive
return can_interact
def get_icon(self, widget):
full_path_tokens = self.context['request'].build_absolute_uri().split('/')
protocol = full_path_tokens[0]
base_url = full_path_tokens[2]
icon_path = 'special_icons/question-mark.png'
static_or_media = STATIC_URL
if widget.abstract_widget:
if widget.abstract_widget.static_image:
icon_path = '{}/icons/widget/{}'.format(widget.abstract_widget.package,
widget.abstract_widget.static_image)
elif widget.abstract_widget.image:
static_or_media = MEDIA_URL
icon_path = widget.abstract_widget.image
elif widget.abstract_widget.wsdl:
icon_path = 'special_icons/ws.png'
elif hasattr(widget, 'workflow_link'):
icon_path = 'special_icons/subprocess.png'
elif widget.type == 'input':
icon_path = 'special_icons/forward-arrow.png'
elif widget.type == 'output':
icon_path = 'special_icons/forward-arrow.png'
elif widget.type == 'output':
icon_path = 'special_icons/loop.png'
icon_url = '{}//{}{}{}'.format(protocol, base_url, static_or_media, icon_path)
return icon_url
class Meta:
model = Widget
fields = (
'id', 'url', 'workflow', 'x', 'y', 'name', 'save_results', 'must_save', 'can_interact', 'abstract_widget', 'finished',
'error', 'running', 'interaction_waiting', 'description', 'icon', 'type', 'progress', 'inputs', 'outputs',
'workflow_link')
class WidgetPositionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Widget
fields = ('x', 'y')
class WidgetListSerializer(serializers.HyperlinkedModelSerializer):
abstract_widget = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Widget
# exclude = ('abstract_widget',)
class StreamWidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
class Meta:
model = Widget
fields = ('id', 'url', 'name')
class WorkflowSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
widgets = WidgetSerializer(many=True, read_only=True)
user = UserSerializer(read_only=True)
connections = ConnectionSerializer(many=True, read_only=True)
is_subprocess = serializers.SerializerMethodField()
is_public = serializers.BooleanField(source='public')
def get_is_subprocess(self, obj):
if obj.widget == None:
return False
else:
return True
class Meta:
model = Workflow
exclude = ('public',)
class StreamDetailSerializer(StreamSerializer):
workflow = WorkflowListSerializer(read_only=True)
stream_visualization_widgets = serializers.SerializerMethodField()
def get_stream_visualization_widgets(self, obj):
widgets = obj.stream_visualization_widgets()
data = StreamWidgetSerializer(widgets, many=True, read_only=True, context={'request': self.context['request']}).data
return data
| mit | 7,936,824,907,788,334,000 | 34.70024 | 160 | 0.623363 | false | 4.083105 | false | false | false |
ai-se/george | Models/usp05.py | 1 | 7552 | """
# The USP05 Data Set
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
@attribute ObjType {FT,PJ,RQ}
@attribute IntComplx {5.0,2.0,1.0,4.0,3.0,3.5,2.5,4.5,NULL}
@attribute DataFile {18.0,9.0,7.0,12.0,2.0,5.0,4.0,3.0,1.0,11.0,0.0,75.0,13.0,6.0,8.0,NULL,32.0}
@attribute DataEn {94.0,240.0,15.0,90.0,314.0,1.0,4.0,3.0,2.0,6.0,0.0,20.0,60.0,30.0,5.0,17.0,10.0,7.0,45.0,48.0,12.0,83.0,150.0,36.0,186.0,9.0,11.0,52.0,25.0,14.0,8.0,NULL,50.0,13.0}
@attribute DataOut {NULL,0.0,1.0,2.0,4.0,20.0,5.0,50.0,12.0,76.0,6.0,69.0,200.0,34.0,108.0,9.0,3.0,8.0,7.0,10.0,18.0,16.0,17.0,13.0,14.0,11.0}
@attribute UFP {NULL,0.0,2.0,3.0,4.0,50.0,46.0,66.0,48.0,36.0,44.0,14.0,8.0,10.0,20.0,25.0,35.0,1.0,6.0,49.0,19.0,64.0,55.0,30.0,180.0,190.0,250.0,1085.0,510.0,210.0,1714.0,11.0,5.0,7.0,17.0,27.0,34.0,154.0,18.0,321.0,90.0,75.0,60.0,40.0,95.0,29.0,23.0,15.0,32.0,31.0,26.0,37.0,12.0,16.0,224.0,22.0,235.0,59.0,147.0,153.0,166.0,137.0,33.0,56.0,57.0,76.0,104.0,105.0}
@attribute AppExpr numeric
@attribute Effort numeric
Data:
"""
def usp05(weighFeature = False,
split="median"):
vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0;
FT=0;PJ=1;RQ=2;NULL=0;
return data(indep= [
# 0..6
'ObjType','IntComplx','DataFile','DataEn','DataOut','UFP','AppExpr'],
less = ['effort'],
_rows=[
[FT,5,18,94,NULL,NULL,4,2.5],
[FT,5,9,240,NULL,NULL,4,2.5],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,5,3.5],
[FT,1,7,90,0,0,4,2],
[FT,2,9,90,0,0,5,2],
[FT,2,9,90,0,0,5,2],
[FT,5,12,314,0,0,5,16],
[FT,2,2,1,1,2,2,1],
[FT,1,2,4,1,0,1,2],
[FT,1,2,4,1,0,1,1],
[FT,4,2,3,1,0,3,5],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,3,3],
[FT,2,5,2,2,0,2,7],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,3,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,4,1,1,0,5,1],
[FT,1,2,1,1,0,2,1],
[FT,1,4,1,1,0,2,1],
[FT,4,4,6,1,4,4,1],
[FT,1,4,4,1,0,3,1],
[FT,2,4,2,1,0,3,1],
[FT,3,3,2,1,50,2,40],
[FT,2,3,1,1,46,2,40],
[FT,3,1,2,4,66,2,20],
[FT,3,2,1,2,48,2,20],
[FT,2,2,1,1,36,2,10],
[FT,4,2,3,1,44,2,20],
[FT,2,7,3,2,14,2,8],
[FT,3,2,2,1,8,4,3],
[FT,2,2,3,1,10,1,3],
[FT,2,12,0,0,10,1,6],
[FT,4,1,20,20,20,1,10],
[FT,3,5,20,5,25,2,6],
[FT,4,11,60,50,35,1,12],
[FT,1,4,30,12,20,3,8],
[FT,1,0,0,0,1,5,0.5],
[FT,1,0,0,0,1,4,1],
[FT,2,3,2,1,6,1,24],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,1,0,4,4,0.5],
[FT,1,2,0,2,6,4,0.5],
[FT,3,0,15,1,49,4,24],
[FT,2,0,5,1,19,4,8],
[FT,3,0,20,1,64,4,20],
[FT,2,0,17,1,55,4,4],
[FT,4,0,10,0,30,4,30],
[FT,3,0,7,1,25,4,8],
[FT,3,0,45,0,180,5,5],
[PJ,4,75,48,76,190,4,75],
[PJ,3,13,12,6,250,2,220],
[PJ,3,7,83,69,1085,3,400],
[PJ,3,12,150,200,510,2,100],
[PJ,2,5,36,34,210,4,70],
[PJ,3,12,186,108,1714,3,69],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,4,0,9,10,5,2],
[RQ,3,3,7,4,11,5,1.5],
[RQ,2,3,3,2,4,5,2],
[RQ,4,6,6,2,5,5,2.5],
[RQ,3,4,4,4,2,5,2.5],
[RQ,1,9,15,0,0,5,2],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,0.5],
[RQ,3,8,1,1,14,3,7],
[RQ,3,8,4,1,14,3,5],
[RQ,3,3,1,1,6,3,15],
[RQ,3,2,3,1,4,2,2],
[RQ,3,3,2,1,8,2,8],
[RQ,1,2,1,1,7,1,2],
[RQ,1,2,1,1,7,1,2],
[RQ,4,5,9,1,8,3,11],
[RQ,4,5,11,1,8,3,11],
[RQ,2,3,2,6,7,2,5],
[RQ,2,3,2,6,8,2,3],
[RQ,3,4,1,4,7,2,3],
[RQ,3,3,9,1,8,3,2],
[RQ,3,3,11,1,5,3,2],
[RQ,2,2,4,1,5,3,2],
[RQ,3,2,4,1,5,2,2],
[RQ,2,3,1,5,17,2,3],
[RQ,5,4,10,3,27,5,20],
[RQ,3,8,2,2,5,3,5],
[RQ,1,1,1,1,0,1,1],
[RQ,1,2,1,5,2,2,1],
[RQ,1,1,1,8,0,1,1],
[RQ,5,1,3,1,34,2,20],
[RQ,2,2,1,1,36,2,10],
[RQ,4,13,3,1,154,2,30],
[RQ,2,1,2,0,18,2,10],
[RQ,3.5,6,52,7,321,3.5,20],
[RQ,2.5,3,4,1,14,1,15],
[RQ,3.5,4,5,10,30,1,20],
[RQ,3.5,2,3,1,14,1,20],
[RQ,3.5,2,30,18,90,2,15],
[RQ,4,2,25,16,75,1,15],
[RQ,4.5,5,7,5,30,1,40],
[RQ,2,2,3,2,10,1,3],
[RQ,4,2,25,16,75,1,15],
[RQ,3,2,3,1,14,1,20],
[RQ,4,4,25,12,50,4,10],
[RQ,2,2,20,10,60,2,6],
[RQ,3,1,14,8,40,3,8],
[RQ,3,1,8,10,35,3,8],
[RQ,4,12,2,20,95,1,12],
[RQ,2,2,4,10,30,2,10],
[RQ,2,3,1,1,5,4,8],
[RQ,1,0,0,0,1,4,2],
[RQ,1,1,0,0,2,5,1],
[RQ,1,0,0,0,1,5,1.5],
[RQ,5,3,17,17,29,5,25],
[RQ,5,3,17,17,29,5,9],
[RQ,4,1,5,2,10,5,15],
[RQ,3,3,17,17,23,5,2],
[RQ,3,0,3,3,4,2,5],
[RQ,5,2,2,1,4,5,45],
[RQ,4,3,11,1,19,5,35],
[RQ,5,3,4,4,14,5,50],
[RQ,5,2,2,2,5,5,25],
[RQ,5,1,3,3,10,5,35],
[RQ,4,2,2,2,7,5,20],
[RQ,3,3,9,4,20,5,25],
[RQ,3,3,1,1,6,4,10],
[RQ,2,3,2,1,6,4,33],
[RQ,4,3,8,1,14,4,24],
[RQ,4,3,9,1,15,4,36],
[RQ,1,1,1,0,6,4,1],
[RQ,1,1,2,0,4,4,1],
[RQ,4,0,4,2,4,4,1],
[RQ,3,2,4,10,32,4,2],
[RQ,3,3,12,4,31,4,2],
[RQ,5,4,9,6,26,4,2],
[RQ,2,1,9,9,23,4,1],
[RQ,1,1,9,9,37,4,1],
[RQ,1,1,12,0,18,4,1],
[RQ,2,1,1,0,20,4,1],
[RQ,2,1,12,0,36,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,2,2,10,0,12,4,1],
[RQ,2,2,10,10,10,4,1],
[RQ,3,1,12,12,10,4,1],
[RQ,1,0,0,0,6,4,0.5],
[RQ,1,0,0,12,8,4,0.5],
[RQ,NULL,NULL,NULL,NULL,NULL,4,8],
[RQ,2,0,4,1,16,4,6],
[RQ,2,0,5,1,19,4,6],
[RQ,4,0,5,1,19,4,4],
[RQ,2,0,1,1,7,4,1],
[RQ,1,1,3,0,16,1,4],
[RQ,2,0,1,0,3,4,6],
[RQ,4,32,0,0,224,1,12],
[RQ,3,NULL,NULL,NULL,NULL,1,6],
[RQ,1,1,10,0,7,5,6],
[RQ,2,0,6,1,22,4,4],
[RQ,2,0,6,1,22,4,4],
[RQ,2,3,50,1,235,3,7],
[RQ,2,1,3,1,27,3,2],
[RQ,3,3,6,1,59,3,3],
[RQ,2,1,2,1,23,3,3],
[RQ,2,3,13,13,147,3,4],
[RQ,3,4,12,13,153,3,5],
[RQ,4,4,14,14,166,3,6],
[RQ,2,2,13,13,137,3,2],
[RQ,3,2,2,1,33,3,6],
[RQ,2,1,4,1,31,3,2],
[RQ,1,1,4,4,46,3,1],
[RQ,3,2,4,4,56,3,4],
[RQ,4,3,3,3,57,3,4],
[RQ,3,2,4,8,76,3,3],
[RQ,1,2,1,1,29,3,2],
[RQ,3,3,6,10,104,3,5],
[RQ,2,1,0,8,50,3,3],
[RQ,1,5,0,11,105,2,0.5]
],
_tunings =[[
# vlow low nom high vhigh xhigh
#scale factors:
'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[
'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[
'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[
'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[
'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]],
weighFeature = weighFeature,
_split = split,
_isCocomo = False,
ignores=[5]
)
"""
Demo code:
"""
def _usp05(): print(usp05())
#if __name__ == '__main__': eval(todo('_nasa93()')) | mit | -1,131,610,790,422,415,400 | 28.73622 | 366 | 0.431674 | false | 1.537146 | false | false | false |
V-FEXrt/Pokemon-Spoof-Plus | CableClub/cable_club_trade_center.py | 1 | 1950 | from AI.team_manager import TeamManager
from cable_club_constants import TradeCenterState, Com
def reset():
global tradeCenterState, counter, eat_byte, ate_byte, choice_byte
tradeCenterState = TradeCenterState.CHOOSING_TRADE
counter = 416
eat_byte = False
ate_byte = 0x0
choice_byte = 0
reset()
def set_reset_callback(func):
global reset_to_init
reset_to_init = func
def choosing_trade_process(byte):
global counter, tradeCenterState, eat_byte, choice_byte
## Eat 'random' 96 byte
if byte == 96 and counter > 0:
counter = 0
return byte
if byte >= 96 and byte <= 101:
# TODO: 'seen first wait' solves this eating bytes problem better. Should use it instead
if not eat_byte:
choice_byte = TeamManager.trade_center.offerIndex(byte)
eat_byte = True
return choice_byte
if eat_byte:
tradeCenterState = TradeCenterState.CONFIRMING_TRADE
eat_byte = False
return byte
return byte
def confirming_trade_process(byte):
global tradeCenterState, eat_byte, ate_byte, counter
if byte == 97 or byte == 98:
eat_byte = True
ate_byte = byte
return byte
if eat_byte:
eat_byte = False
if ate_byte == 97:
# Cancelled by partner
tradeCenterState = TradeCenterState.CHOOSING_TRADE
print "Trade cancelled by Player"
if ate_byte == 98:
# Confirmed by partner
print "Trade confirmed by Player"
reset_to_init()
reset()
TeamManager.trade_center.trade_confirmed()
return byte
functionSwitch = [choosing_trade_process, confirming_trade_process]
def trade_center_process_byte(byte):
if (tradeCenterState >= len(functionSwitch)):
print "Warning: no function for Trade Center State"
return byte
return functionSwitch[tradeCenterState](byte)
| mit | -6,544,293,436,902,257,000 | 25.712329 | 96 | 0.638974 | false | 3.77907 | false | false | false |
crs4/omero.biobank | test/kb/test_individual.py | 1 | 2096 | # BEGIN_COPYRIGHT
# END_COPYRIGHT
import os, unittest, logging
logging.basicConfig(level=logging.ERROR)
from bl.vl.kb import KnowledgeBase as KB
from kb_object_creator import KBObjectCreator
OME_HOST = os.getenv("OME_HOST", "localhost")
OME_USER = os.getenv("OME_USER", "root")
OME_PASS = os.getenv("OME_PASS", "romeo")
class TestKB(KBObjectCreator):
def __init__(self, name):
super(TestKB, self).__init__(name)
self.kill_list = []
def setUp(self):
self.kb = KB(driver='omero')(OME_HOST, OME_USER, OME_PASS)
def tearDown(self):
self.kill_list.reverse()
for x in self.kill_list:
self.kb.delete(x)
self.kill_list = []
def check_object(self, o, conf, otype):
try:
self.assertTrue(isinstance(o, otype))
for k in conf.keys():
v = conf[k]
# FIXME this is omero specific...
if hasattr(v, 'ome_obj'):
self.assertEqual(getattr(o, k).id, v.id)
self.assertEqual(type(getattr(o, k)), type(v))
elif hasattr(v, '_id'):
self.assertEqual(getattr(o, k)._id, v._id)
else:
self.assertEqual(getattr(o, k), v)
except:
pass
def test_individual(self):
conf, i = self.create_individual()
self.kill_list.append(i.save())
self.check_object(i, conf, self.kb.Individual)
def test_enrollment(self):
conf, e = self.create_enrollment()
self.kill_list.append(e.save())
self.check_object(e, conf, self.kb.Enrollment)
def test_enrollment_ops(self):
conf, e = self.create_enrollment()
e.save()
study = e.study
xe = self.kb.get_enrollment(study, conf['studyCode'])
self.assertTrue(not xe is None)
self.assertEqual(xe.id, e.id)
self.kb.delete(e)
self.assertEqual(self.kb.get_enrollment(study, conf['studyCode']), None)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestKB('test_individual'))
suite.addTest(TestKB('test_enrollment'))
suite.addTest(TestKB('test_enrollment_ops'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run((suite()))
| gpl-2.0 | -3,798,257,073,794,676,700 | 25.871795 | 76 | 0.640744 | false | 3.195122 | true | false | false |
wger-project/wger | wger/exercises/tests/test_categories.py | 1 | 3817 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
WgerAccessTestCase,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
WgerTestCase,
)
from wger.exercises.models import ExerciseCategory
class ExerciseCategoryRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(ExerciseCategory.objects.get(pk=1)), 'Category')
class CategoryOverviewTestCase(WgerAccessTestCase):
"""
Test that only admins see the edit links
"""
url = 'exercise:category:list'
anonymous_fail = True
user_success = 'admin'
user_fail = (
'manager1',
'manager2'
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5',
)
class DeleteExerciseCategoryTestCase(WgerDeleteTestCase):
"""
Exercise category delete test case
"""
object_class = ExerciseCategory
url = 'exercise:category:delete'
pk = 4
user_success = 'admin'
user_fail = 'test'
class EditExerciseCategoryTestCase(WgerEditTestCase):
"""
Tests editing an exercise category
"""
object_class = ExerciseCategory
url = 'exercise:category:edit'
pk = 3
data = {'name': 'A different name'}
class AddExerciseCategoryTestCase(WgerAddTestCase):
"""
Tests adding an exercise category
"""
object_class = ExerciseCategory
url = 'exercise:category:add'
data = {'name': 'A new category'}
class ExerciseCategoryCacheTestCase(WgerTestCase):
"""
Cache test case
"""
def test_overview_cache_update(self):
"""
Test that the template cache for the overview is correctly reseted when
performing certain operations
"""
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
old_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
category = ExerciseCategory.objects.get(pk=2)
category.name = 'Cool category'
category.save()
self.assertFalse(cache.get(make_template_fragment_key('exercise-overview', [2])))
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:muscle:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
new_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
self.assertNotEqual(old_exercise_overview, new_exercise_overview)
class ExerciseCategoryApiTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the exercise category overview resource
"""
pk = 2
resource = ExerciseCategory
private_resource = False
| agpl-3.0 | 3,603,420,777,804,712,400 | 27.066176 | 95 | 0.681163 | false | 3.992678 | true | false | false |
philanthropy-u/edx-platform | pavelib/prereqs.py | 1 | 11951 | """
Install Python and Node prerequisites.
"""
from __future__ import print_function
import hashlib
import os
import re
import sys
import subprocess
import io
from distutils import sysconfig
from paver.easy import BuildFailure, sh, task
from .utils.envs import Env
from .utils.timer import timed
PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache')
NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs"
NO_PYTHON_UNINSTALL_MESSAGE = 'NO_PYTHON_UNINSTALL is set. No attempts will be made to uninstall old Python libs.'
COVERAGE_REQ_FILE = 'requirements/edx/coverage.txt'
# If you make any changes to this list you also need to make
# a corresponding change to circle.yml, which is how the python
# prerequisites are installed for builds on circleci.com
if 'TOXENV' in os.environ:
PYTHON_REQ_FILES = ['requirements/edx/testing.txt']
else:
PYTHON_REQ_FILES = ['requirements/edx/development.txt']
# Developers can have private requirements, for local copies of github repos,
# or favorite debugging tools, etc.
if 'TOXENV' in os.environ:
PRIVATE_REQS = 'requirements/philu/testing.txt'
else:
PRIVATE_REQS = 'requirements/philu/base.txt'
if os.path.exists(PRIVATE_REQS):
PYTHON_REQ_FILES.append(PRIVATE_REQS)
def str2bool(s):
s = str(s)
return s.lower() in ('yes', 'true', 't', '1')
def no_prereq_install():
"""
Determine if NO_PREREQ_INSTALL should be truthy or falsy.
"""
return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False'))
def no_python_uninstall():
""" Determine if we should run the uninstall_python_packages task. """
return str2bool(os.environ.get('NO_PYTHON_UNINSTALL', 'False'))
def create_prereqs_cache_dir():
"""Create the directory for storing the hashes, if it doesn't exist already."""
try:
os.makedirs(PREREQS_STATE_DIR)
except OSError:
if not os.path.isdir(PREREQS_STATE_DIR):
raise
def compute_fingerprint(path_list):
"""
Hash the contents of all the files and directories in `path_list`.
Returns the hex digest.
"""
hasher = hashlib.sha1()
for path_item in path_list:
# For directories, create a hash based on the modification times
# of first-level subdirectories
if os.path.isdir(path_item):
for dirname in sorted(os.listdir(path_item)):
path_name = os.path.join(path_item, dirname)
if os.path.isdir(path_name):
hasher.update(str(os.stat(path_name).st_mtime))
# For files, hash the contents of the file
if os.path.isfile(path_item):
with io.open(path_item, "rb") as file_handle:
hasher.update(file_handle.read())
return hasher.hexdigest()
def prereq_cache(cache_name, paths, install_func):
"""
Conditionally execute `install_func()` only if the files/directories
specified by `paths` have changed.
If the code executes successfully (no exceptions are thrown), the cache
is updated with the new hash.
"""
# Retrieve the old hash
cache_filename = cache_name.replace(" ", "_")
cache_file_path = os.path.join(PREREQS_STATE_DIR, "{}.sha1".format(cache_filename))
old_hash = None
if os.path.isfile(cache_file_path):
with io.open(cache_file_path, "rb") as cache_file:
old_hash = cache_file.read()
# Compare the old hash to the new hash
# If they do not match (either the cache hasn't been created, or the files have changed),
# then execute the code within the block.
new_hash = compute_fingerprint(paths)
if new_hash != old_hash:
install_func()
# Update the cache with the new hash
# If the code executed within the context fails (throws an exception),
# then this step won't get executed.
create_prereqs_cache_dir()
with io.open(cache_file_path, "wb") as cache_file:
# Since the pip requirement files are modified during the install
# process, we need to store the hash generated AFTER the installation
post_install_hash = compute_fingerprint(paths)
cache_file.write(post_install_hash)
else:
print('{cache} unchanged, skipping...'.format(cache=cache_name))
def node_prereqs_installation():
"""
Configures npm and installs Node prerequisites
"""
# NPM installs hang sporadically. Log the installation process so that we
# determine if any packages are chronic offenders.
shard_str = os.getenv('SHARD', None)
if shard_str:
npm_log_file_path = '{}/npm-install.{}.log'.format(Env.GEN_LOG_DIR, shard_str)
else:
npm_log_file_path = '{}/npm-install.log'.format(Env.GEN_LOG_DIR)
npm_log_file = io.open(npm_log_file_path, 'wb')
npm_command = 'npm install --verbose'.split()
cb_error_text = "Subprocess return code: 1"
# Error handling around a race condition that produces "cb() never called" error. This
# evinces itself as `cb_error_text` and it ought to disappear when we upgrade
# npm to 3 or higher. TODO: clean this up when we do that.
try:
# The implementation of Paver's `sh` function returns before the forked
# actually returns. Using a Popen object so that we can ensure that
# the forked process has returned
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
except BuildFailure, error_text:
if cb_error_text in error_text:
print("npm install error detected. Retrying...")
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
else:
raise BuildFailure(error_text)
print("Successfully installed NPM packages. Log found at {}".format(
npm_log_file_path
))
def python_prereqs_installation():
"""
Installs Python prerequisites
"""
for req_file in PYTHON_REQ_FILES:
pip_install_req_file(req_file)
def pip_install_req_file(req_file):
"""Pip install the requirements file."""
pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'
sh("{pip_cmd} -r {req_file}".format(pip_cmd=pip_cmd, req_file=req_file))
@task
@timed
def install_node_prereqs():
"""
Installs Node prerequisites
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
prereq_cache("Node prereqs", ["package.json"], node_prereqs_installation)
# To add a package to the uninstall list, just add it to this list! No need
# to touch any other part of this file.
PACKAGES_TO_UNINSTALL = [
"South", # Because it interferes with Django 1.8 migrations.
"edxval", # Because it was bork-installed somehow.
"django-storages",
"django-oauth2-provider", # Because now it's called edx-django-oauth2-provider.
"edx-oauth2-provider", # Because it moved from github to pypi
"i18n-tools", # Because now it's called edx-i18n-tools
]
@task
@timed
def uninstall_python_packages():
"""
Uninstall Python packages that need explicit uninstallation.
Some Python packages that we no longer want need to be explicitly
uninstalled, notably, South. Some other packages were once installed in
ways that were resistant to being upgraded, like edxval. Also uninstall
them.
"""
if no_python_uninstall():
print(NO_PYTHON_UNINSTALL_MESSAGE)
return
# So that we don't constantly uninstall things, use a hash of the packages
# to be uninstalled. Check it, and skip this if we're up to date.
hasher = hashlib.sha1()
hasher.update(repr(PACKAGES_TO_UNINSTALL))
expected_version = hasher.hexdigest()
state_file_path = os.path.join(PREREQS_STATE_DIR, "Python_uninstall.sha1")
create_prereqs_cache_dir()
if os.path.isfile(state_file_path):
with io.open(state_file_path) as state_file:
version = state_file.read()
if version == expected_version:
print('Python uninstalls unchanged, skipping...')
return
# Run pip to find the packages we need to get rid of. Believe it or not,
# edx-val is installed in a way that it is present twice, so we have a loop
# to really really get rid of it.
for _ in range(3):
uninstalled = False
frozen = sh("pip freeze", capture=True)
for package_name in PACKAGES_TO_UNINSTALL:
if package_in_frozen(package_name, frozen):
# Uninstall the pacakge
sh("pip uninstall --disable-pip-version-check -y {}".format(package_name))
uninstalled = True
if not uninstalled:
break
else:
# We tried three times and didn't manage to get rid of the pests.
print("Couldn't uninstall unwanted Python packages!")
return
# Write our version.
with io.open(state_file_path, "wb") as state_file:
state_file.write(expected_version)
def package_in_frozen(package_name, frozen_output):
"""Is this package in the output of 'pip freeze'?"""
# Look for either:
#
# PACKAGE-NAME==
#
# or:
#
# blah_blah#egg=package_name-version
#
pattern = r"(?mi)^{pkg}==|#egg={pkg_under}-".format(
pkg=re.escape(package_name),
pkg_under=re.escape(package_name.replace("-", "_")),
)
return bool(re.search(pattern, frozen_output))
@task
@timed
def install_coverage_prereqs():
""" Install python prereqs for measuring coverage. """
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
pip_install_req_file(COVERAGE_REQ_FILE)
@task
@timed
def install_python_prereqs():
"""
Installs Python prerequisites.
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
uninstall_python_packages()
# Include all of the requirements files in the fingerprint.
files_to_fingerprint = list(PYTHON_REQ_FILES)
# Also fingerprint the directories where packages get installed:
# ("/edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages")
files_to_fingerprint.append(sysconfig.get_python_lib())
# In a virtualenv, "-e installs" get put in a src directory.
src_dir = os.path.join(sys.prefix, "src")
if os.path.isdir(src_dir):
files_to_fingerprint.append(src_dir)
# Also fingerprint this source file, so that if the logic for installations
# changes, we will redo the installation.
this_file = __file__
if this_file.endswith(".pyc"):
this_file = this_file[:-1] # use the .py file instead of the .pyc
files_to_fingerprint.append(this_file)
prereq_cache("Python prereqs", files_to_fingerprint, python_prereqs_installation)
@task
@timed
def install_prereqs():
"""
Installs Node and Python prerequisites
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
if not str2bool(os.environ.get('SKIP_NPM_INSTALL', 'False')):
install_node_prereqs()
install_python_prereqs()
log_installed_python_prereqs()
print_devstack_warning()
def log_installed_python_prereqs():
""" Logs output of pip freeze for debugging. """
sh("pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log"))
return
def print_devstack_warning():
if Env.USING_DOCKER: # pragma: no cover
print("********************************************************************************")
print("* WARNING: Mac users should run this from both the lms and studio shells")
print("* in docker devstack to avoid startup errors that kill your CPU.")
print("* For more details, see:")
print("* https://github.com/edx/devstack#docker-is-using-lots-of-cpu-time-when-it-should-be-idle")
print("********************************************************************************")
| agpl-3.0 | 451,403,621,817,372,500 | 32.759887 | 114 | 0.643293 | false | 3.708036 | false | false | false |
RedHatQE/cfme_tests | cfme/automate/dialogs/dialog_tab.py | 1 | 2309 | import attr
from navmazing import NavigateToAttribute
from widgetastic.widget import Text
from . import AddTabView
from . import TabForm
from .dialog_box import BoxCollection
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.modeling.base import parent_of_type
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
class EditTabView(TabForm):
@property
def is_displayed(self):
return (
self.in_customization and
self.title.text == "Editing Dialog {} [Tab Information]".format(self.tab_label)
)
class DetailsTabView(TabForm):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == 'Dialog "{}"'.format(self.context['object'].tab_label)
)
@attr.s
class Tab(BaseEntity):
"""A class representing one Tab in the UI."""
tab_label = attr.ib()
tab_desc = attr.ib(default=None)
_collections = {'boxes': BoxCollection}
@property
def boxes(self):
return self.collections.boxes
@property
def tree_path(self):
return self.parent.tree_path + [self.tab_label]
@property
def dialog(self):
""" Returns parent object - Dialog"""
from .service_dialogs import Dialog
return parent_of_type(self, Dialog)
@attr.s
class TabCollection(BaseCollection):
ENTITY = Tab
@property
def tree_path(self):
return self.parent.tree_path
def create(self, tab_label=None, tab_desc=None):
""" Create tab method"""
view = navigate_to(self, "Add")
view.new_tab.click()
view.edit_tab.click()
view.fill({'tab_label': tab_label, 'tab_desc': tab_desc})
view.save_button.click()
return self.instantiate(tab_label=tab_label, tab_desc=tab_desc)
@navigator.register(TabCollection)
class Add(CFMENavigateStep):
VIEW = AddTabView
prerequisite = NavigateToAttribute('parent.parent', 'Add')
def step(self, *args, **kwargs):
self.prerequisite_view.create_tab.click()
| gpl-2.0 | -5,686,583,473,684,868,000 | 25.848837 | 91 | 0.673452 | false | 3.712219 | false | false | false |
pyspace/test | pySPACE/missions/nodes/meta/same_input_layer.py | 1 | 22715 | """ Combine several other nodes together in parallel
This is useful to be combined with the
:class:`~pySPACE.missions.nodes.meta.flow_node.FlowNode`.
"""
import numpy
from pySPACE.environments.chains.node_chain import NodeChainFactory
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.prediction_vector import PredictionVector
# ensemble imports
import os
import fcntl
import fnmatch
import cPickle
import logging
from collections import defaultdict
from pySPACE.missions.nodes.meta.flow_node import FlowNode
from pySPACE.tools.filesystem import locate
class SameInputLayerNode(BaseNode):
""" Encapsulates a set of other nodes that are executed in parallel in the flow.
This node was a thin wrapper around MDP's SameInputLayer node
but is now an own implementation.
**Parameters**
:enforce_unique_names:
When combining time series channels or feature vectors,
the node adds the index of the current node to the channel names or
feature names as a prefix to enforce unique names.
(*optional, default: True*)
**Exemplary Call**
.. code-block:: yaml
-
node : Same_Input_Layer
parameters :
enforce_unique_names : True
nodes :
-
node : Time_Domain_Features
parameters :
moving_window_length : 1
-
node : STFT_Features
parameters :
frequency_band : [2.0, 8.0]
frequency_resolution : 1.0
"""
def __init__(self, nodes,enforce_unique_names=True,
store = False, **kwargs):
self.nodes = nodes # needed to find out dimensions and trainability, ...
super(SameInputLayerNode, self).__init__(**kwargs)
self.permanent_state.pop("nodes")
self.set_permanent_attributes(output_type = None,
names = None,
unique = enforce_unique_names)
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
# This node requires one parameters, namely a list of nodes
assert("parameters" in layer_spec
and "nodes" in layer_spec["parameters"]),\
"SameInputLayerNode requires specification of a list of nodes!"
# Create all nodes that are packed together in this layer
layer_nodes = []
for node_spec in layer_spec["parameters"]["nodes"]:
node_obj = BaseNode.node_from_yaml(node_spec)
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("nodes")
# Create the node object
node_obj = SameInputLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
def reset(self):
""" Also reset internal nodes """
nodes = self.nodes
for node in nodes:
node.reset()
super(SameInputLayerNode, self).reset()
self.nodes = nodes
def register_input_node(self, input_node):
""" All sub-nodes have the same input node """
super(SameInputLayerNode, self).register_input_node(input_node)
# Register the node as the input for all internal nodes
for node in self.nodes:
node.register_input_node(input_node)
def _execute(self, data):
""" Process the data through the internal nodes """
names = []
result_array = None
result_label = []
result_predictor = []
result_prediction = []
# For all node-layers
for node_index, node in enumerate(self.nodes):
# Compute node's result
node_result = node.execute(data)
# Determine the output type of the node
if self.output_type is None:
self.output_type = type(node_result)
else:
assert (self.output_type == type(node_result)), \
"SameInputLayerNode requires that all of its layers return "\
"the same type. Types found: %s %s" \
% (self.output_type, type(node_result))
# Merge the nodes' outputs depending on the type
if self.output_type == FeatureVector:
result_array = \
self.add_feature_vector(node_result, node_index,
result_array, names)
elif self.output_type == PredictionVector:
if type(node_result.label) == list:
result_label.extend(node_result.label)
else:
# a single classification is expected here
result_label.append(node_result.label)
if type(node_result.prediction) == list:
result_prediction.extend(node_result.prediction)
else:
result_prediction.append(node_result.prediction)
if type(node_result.predictor) == list:
result_predictor.extend(node_result.predictor)
else:
result_predictor.append(node_result.predictor)
else:
assert (self.output_type == TimeSeries), \
"SameInputLayerNode can not merge data of type %s." \
% self.output_type
if self.names is None and not self.unique:
names.extend(node_result.channel_names)
elif self.names is None and self.unique:
for name in node_result.channel_names:
names.append("%i_%s" % (node_index, name))
if result_array == None:
result_array = node_result
if self.dtype == None:
self.dtype = node_result.dtype
else :
result_array = numpy.concatenate((result_array,
node_result), axis=1)
# Construct output with correct type and names
if self.names is None:
self.names = names
if self.output_type == FeatureVector:
return FeatureVector(result_array, self.names)
elif self.output_type == PredictionVector:
return PredictionVector(label=result_label,
prediction=result_prediction,
predictor=result_predictor)
else:
return TimeSeries(result_array, self.names,
node_result.sampling_frequency,
node_result.start_time, node_result.end_time,
node_result.name, node_result.marker_name)
def add_feature_vector(self, data, index, result_array, names):
""" Concatenate feature vectors, ensuring unique names """
if self.names is None and self.unique:
for name in data.feature_names:
names.append("%i_%s" % (index,name))
elif self.names is None and not self.unique:
names.extend(data.feature_names)
if result_array == None:
result_array = data
else:
result_array = numpy.concatenate((result_array,data), axis=1)
return result_array
def is_trainable(self):
""" Trainable if one subnode is trainable """
for node in self.nodes:
if node.is_trainable():
return True
return False
def is_supervised(self):
""" Supervised if one subnode requires supervised training """
for node in self.nodes:
if node.is_supervised():
return True
return False
#
# def train_sweep(self, use_test_data):
# """ Train all internal nodes """
# for node in self.nodes:
# node.train_sweep(use_test_data)
def _train(self, x, *args, **kwargs):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.train(x, *args, **kwargs)
def _stop_training(self):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.stop_training()
def store_state(self, result_dir, index=None):
""" Stores all nodes in subdirectories of *result_dir* """
for i, node in enumerate(self.nodes):
node_dir = os.path.join(result_dir, (self.__class__.__name__+str(index).split("None")[0]+str(i)))
node.store_state(node_dir, index=i)
def _inc_train(self,data,label):
""" Forward data to retrainable nodes
So the single nodes do not need to buffer or *present_labels* does not
have to be reimplemented.
"""
for node in self.nodes:
if node.is_retrainable():
node._inc_train(data, label)
def set_run_number(self, run_number):
""" Informs all subnodes about the number of the current run """
for node in self.nodes:
node.set_run_number(run_number)
super(SameInputLayerNode, self).set_run_number(run_number)
class EnsembleNotFoundException(Exception): pass
class ClassificationFlowsLoaderNode(BaseNode):
""" Combine an ensemble of pretrained node chains
This node loads all "pickled" flows whose file names match
*ensemble_pattern* and are contained in the directory tree rooted at
*ensemble_base_dir*. If the *flow_select_list* is not empty, only the
flows with indices contained in flow_select_list are used. The index "-1"
corresponds to "all flows".
**Parameters**
:ensemble_base_dir:
The root directory under which the stored flow objects which constitute
the ensemble are stored.
:ensemble_pattern:
Pickled flows must match the given pattern to be included into the
ensemble.
:flow_select_list:
This optional parameter allows to select only a subset of the flows
that are found in ensemble_base_dir. It must be a list of indices.
Only the flows with the given index are included into the ensemble.
If -1 is contained in the list, all flows are automatically added to
the ensemble.
.. note::
The order of the flows in the ensemble is potentially random or at
least hard to predict. Thus, this parameter should not be used
to select a specific flow. In contrast, this parameter can be used
to select a certain number of flows from the available flows
(where it doesn't matter which ones). This can be useful for instance
in benchmarking experiments when one is interested in
the average performance of an ensemble of a certain size.
(*optional, default: [-1]*)
:cache_dir:
If this argument is given, all results of all ensembles are remembered
and stored in a persistent cache file in the given cache_dir. These
cached results can be later reused without actually loading and
executing the ensemble.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node : Ensemble_Node
parameters :
ensemble_base_dir : "/tmp/" # <- insert suitable directory here
ensemble_pattern : "flow*.pickle"
flow_select_list : "eval(range(10))"
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2010/05/20
"""
def __init__(self, ensemble_base_dir, ensemble_pattern,
flow_select_list=[-1], cache_dir=None, **kwargs):
super(ClassificationFlowsLoaderNode, self).__init__(**kwargs)
# Load all flow-pickle files that match the given ensemble_pattern
# in the directory tree rooted in ensemble_base_dir
flow_pathes = tuple(locate(ensemble_pattern, ensemble_base_dir))
if -1 not in flow_select_list:
# Select only flows for ensemble whose index is contained in
# flow_select_list
flow_pathes = tuple(flow_pathes[index] for index in flow_select_list)
if len(flow_pathes) == 0:
raise EnsembleNotFoundException("No ensemble found in %s for pattern %s" %
(ensemble_base_dir, ensemble_pattern))
self.feature_names = \
map(lambda s: "_".join(s.split(os.sep)[-1].split('_')[0:2]),
flow_pathes)
self.set_permanent_attributes(ensemble = None,
flow_pathes = flow_pathes,
cache_dir = cache_dir,
cache = None,
cache_updated = False,
store = True) # always store cache
def _load_cache(self):
self.cache = defaultdict(dict)
# Check if there are cached results for this ensemble
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
# Load ensemble cache
self._log("Loading flow cache from %s" % file_path)
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path] = cPickle.load(cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
def _load_ensemble(self):
self._log("Loading ensemble")
# Create a flow node for each flow pickle
flow_nodes = [FlowNode(subflow_path = flow_path)
for flow_path in self.flow_pathes]
# Create an SameInputLayer node that executes all flows independently
# with the same input
ensemble = SameInputLayerNode(flow_nodes, enforce_unique_names=True)
# We can now set the input dim and output dim
self.input_dim = ensemble.input_dim
self.output_dim = ensemble.output_dim
self.set_permanent_attributes(ensemble = ensemble)
def _train(self, data, label):
""" Trains the ensemble on the given data vector *data* """
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
return self.ensemble.train(data, label)
def _execute(self, data):
# Compute data's hash
data_hash = hash(tuple(data.flatten()))
# Load ensemble's cache
if self.cache == None:
if self.cache_dir:
self._load_cache()
else: # Caching disabled
self.cache = defaultdict(dict)
# Try to lookup the result of this ensemble for the given data in the cache
labels = []
predictions = []
for i, flow_path in enumerate(self.flow_pathes):
if data_hash in self.cache[flow_path]:
label, prediction = self.cache[flow_path][data_hash]
else:
self.cache_updated = True
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
node_result = self.ensemble.nodes[i].execute(data)
label = node_result.label
prediction = node_result.prediction
self.cache[flow_path][data_hash] = (label, prediction)
labels.append(label)
predictions.append(prediction)
result = PredictionVector(label=labels,
prediction=predictions,
predictor=self)
result.dim_names = self.feature_names
return result
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
# Store cache if caching is enabled and cache has changed
if self.cache_dir and self.cache_updated:
if not os.path.exists(self.cache_dir + os.sep + "ensemble_cache"):
os.makedirs(self.cache_dir + os.sep + "ensemble_cache")
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
self._log("Updating flow cache %s" % file_path)
# Update existing cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path].update(cPickle.load(cache_file))
cache_file.close()
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
else:
self._log("Writing flow cache %s" % file_path)
# Create new cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
class MultiClassLayerNode(SameInputLayerNode):
""" Wrap the one vs. rest or one vs. one scheme around the given node
The given class labels are forwarded to the internal nodes.
During training, data is relabeled.
Everything else is the same as in the base node.
Though this scheme is most important for classification it permits
other trainable algorithms to use this scheme.
**Parameters**
:class_labels:
This is the complete list of expected class labels.
It is needed to construct the necessary flows in the
initialization stage.
:node:
Specification of the wrapped node for the used scheme
As class labels , for the *1vsR* scheme,
this node has to use *REST* and *LABEL*.
*LABEL* is replaced with the different `class_labels`.
The other label should be *REST*.
For the *1vs1* scheme *LABEL1* and *LABEL2* have to be used.
:scheme:
One of *1v1* (One vs. One) or *1vR* (One vs. Rest)
.. note:: The one class approach is included by simply not giving
'REST' label to the classifier, but filtering it out.
(*optional, default:'1v1'*)
**Exemplary Call**
.. code-block:: yaml
-
node : MultiClassLayer
parameters :
class_labels : ["Target", "Standard","Artifact"]
scheme : 1vR
node :
-
node : 1SVM
parameters :
class_labels : ["LABEL","REST"]
complexity : 1
"""
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
assert("parameters" in layer_spec
and "class_labels" in layer_spec["parameters"]
and "node" in layer_spec["parameters"]),\
"Node requires specification of a node and classification labels!"
scheme = layer_spec["parameters"].pop("scheme","1vs1")
# Create all nodes that are packed together in this layer
layer_nodes = []
node_spec = layer_spec["parameters"]["node"][0]
classes = layer_spec["parameters"]["class_labels"]
if scheme=='1vR':
for label in layer_spec["parameters"]["class_labels"]:
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,{"LABEL":label}))
layer_nodes.append(node_obj)
else:
n=len(classes)
for i in range(n-1):
for j in range(i+1,n):
replace_dict = {"LABEL1":classes[i],"LABEL2":classes[j]}
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,replace_dict))
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("node")
layer_spec["parameters"].pop("class_labels")
# Create the node object
node_obj = MultiClassLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
_NODE_MAPPING = {"Ensemble_Node": ClassificationFlowsLoaderNode,
"Same_Input_Layer": SameInputLayerNode,
}
| gpl-3.0 | 4,007,869,671,616,826,000 | 41.22119 | 109 | 0.547215 | false | 4.593529 | false | false | false |
googleapis/python-game-servers | google/cloud/gaming_v1/services/realms_service/async_client.py | 1 | 28046 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.gaming_v1.services.realms_service import pagers
from google.cloud.gaming_v1.types import common
from google.cloud.gaming_v1.types import realms
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import RealmsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import RealmsServiceGrpcAsyncIOTransport
from .client import RealmsServiceClient
class RealmsServiceAsyncClient:
"""A realm is a grouping of game server clusters that are
considered interchangeable.
"""
_client: RealmsServiceClient
DEFAULT_ENDPOINT = RealmsServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = RealmsServiceClient.DEFAULT_MTLS_ENDPOINT
realm_path = staticmethod(RealmsServiceClient.realm_path)
parse_realm_path = staticmethod(RealmsServiceClient.parse_realm_path)
common_billing_account_path = staticmethod(
RealmsServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
RealmsServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(RealmsServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
RealmsServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
RealmsServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
RealmsServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(RealmsServiceClient.common_project_path)
parse_common_project_path = staticmethod(
RealmsServiceClient.parse_common_project_path
)
common_location_path = staticmethod(RealmsServiceClient.common_location_path)
parse_common_location_path = staticmethod(
RealmsServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceAsyncClient: The constructed client.
"""
return RealmsServiceClient.from_service_account_info.__func__(RealmsServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceAsyncClient: The constructed client.
"""
return RealmsServiceClient.from_service_account_file.__func__(RealmsServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> RealmsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
RealmsServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(RealmsServiceClient).get_transport_class, type(RealmsServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, RealmsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the realms service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.RealmsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = RealmsServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_realms(
self,
request: realms.ListRealmsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListRealmsAsyncPager:
r"""Lists realms in a given project and location.
Args:
request (:class:`google.cloud.gaming_v1.types.ListRealmsRequest`):
The request object. Request message for
RealmsService.ListRealms.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.services.realms_service.pagers.ListRealmsAsyncPager:
Response message for
RealmsService.ListRealms.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.ListRealmsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_realms,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListRealmsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_realm(
self,
request: realms.GetRealmRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.Realm:
r"""Gets details of a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.GetRealmRequest`):
The request object. Request message for
RealmsService.GetRealm.
name (:class:`str`):
Required. The name of the realm to retrieve. Uses the
form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.Realm:
A realm resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.GetRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_realm,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_realm(
self,
request: realms.CreateRealmRequest = None,
*,
parent: str = None,
realm: realms.Realm = None,
realm_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new realm in a given project and location.
Args:
request (:class:`google.cloud.gaming_v1.types.CreateRealmRequest`):
The request object. Request message for
RealmsService.CreateRealm.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm (:class:`google.cloud.gaming_v1.types.Realm`):
Required. The realm resource to be
created.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm_id (:class:`str`):
Required. The ID of the realm
resource to be created.
This corresponds to the ``realm_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, realm, realm_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.CreateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if realm is not None:
request.realm = realm
if realm_id is not None:
request.realm_id = realm_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def delete_realm(
self,
request: realms.DeleteRealmRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.DeleteRealmRequest`):
The request object. Request message for
RealmsService.DeleteRealm.
name (:class:`str`):
Required. The name of the realm to delete. Uses the
form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.DeleteRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def update_realm(
self,
request: realms.UpdateRealmRequest = None,
*,
realm: realms.Realm = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Patches a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.UpdateRealmRequest`):
The request object. Request message for
RealmsService.UpdateRealm.
realm (:class:`google.cloud.gaming_v1.types.Realm`):
Required. The realm to be updated. Only fields specified
in update_mask are updated.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https: //developers.google.com/protocol-buffers //
/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([realm, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.UpdateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if realm is not None:
request.realm = realm
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def preview_realm_update(
self,
request: realms.PreviewRealmUpdateRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.PreviewRealmUpdateResponse:
r"""Previews patches to a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.PreviewRealmUpdateRequest`):
The request object. Request message for
RealmsService.PreviewRealmUpdate.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.PreviewRealmUpdateResponse:
Response message for
RealmsService.PreviewRealmUpdate.
"""
# Create or coerce a protobuf request object.
request = realms.PreviewRealmUpdateRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.preview_realm_update,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-game-servers",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("RealmsServiceAsyncClient",)
| apache-2.0 | -6,299,150,042,904,482,000 | 38.894737 | 171 | 0.606111 | false | 4.626526 | false | false | false |
dgzzhb/GAOthello | board.py | 1 | 10425 | #!/usr/bin/env python
""" game.py Humberto Henrique Campos Pinheiro
Game logic.
"""
from config import WHITE, BLACK, EMPTY
from copy import deepcopy
class Board:
""" Rules of the game """
def __init__ ( self ):
self.board = [ [0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0] ]
self.board[3][4] = BLACK
self.board[4][3] = BLACK
self.board[3][3] = WHITE
self.board[4][4] = WHITE
self.valid_moves = []
def __getitem__ ( self, i, j):
return self.board[i][j]
def lookup ( self, row, column, color ):
""" Returns the possible positions that there exists at least one straight
(horizontal, vertical, or diagonal) line between the piece specified by (row,
column, color) and another piece of the same color.
"""
if color == BLACK:
other = WHITE
else:
other = BLACK
places = []
if ( row < 0 or row > 7 or column < 0 or column > 7 ):
return places
# For each direction search for possible positions to put a piece.
# north
i = row - 1
if ( i >= 0 and self.board[i][column] == other ):
i = i - 1
while ( i >= 0 and self.board[i][column] == other ):
i = i - 1
if ( i >= 0 and self.board[i][column] == 0 ):
places = places + [( i, column)]
# northeast
i = row - 1
j = column + 1
if ( i >= 0 and j < 8 and self.board[i][j] == other ) :
i = i - 1
j = j + 1
while ( i >= 0 and j < 8 and self.board[i][j] == other ):
i = i - 1
j = j + 1
if ( i >= 0 and j < 8 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# east
j = column + 1
if ( j < 8 and self.board[row][j] == other ) :
j = j + 1
while ( j < 8 and self.board[row][j] == other ):
j = j + 1
if ( j < 8 and self.board[row][j] == 0 ):
places = places + [(row, j)]
# southeast
i = row + 1
j = column + 1
if ( i < 8 and j < 8 and self.board[i][j] == other ) :
i = i + 1
j = j + 1
while ( i < 8 and j < 8 and self.board[i][j] == other ):
i = i + 1
j = j + 1
if ( i < 8 and j < 8 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# south
i = row + 1
if ( i < 8 and self.board[i][column] == other ):
i = i + 1
while ( i < 8 and self.board[i][column] == other ):
i = i + 1
if ( i < 8 and self.board[i][column] == 0 ):
places = places + [(i, column)]
# southwest
i = row + 1
j = column - 1
if ( i < 8 and j >= 0 and self.board[i][j] == other ):
i = i + 1
j = j - 1
while ( i < 8 and j >= 0 and self.board[i][j] == other ):
i = i + 1
j = j - 1
if ( i < 8 and j >= 0 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# west
j = column - 1
if ( j >= 0 and self.board[row][j] == other ):
j = j - 1
while ( j >= 0 and self.board[row][j] == other ):
j = j - 1
if ( j >= 0 and self.board[row][j] == 0 ):
places = places + [(row, j)]
# northwest
i = row - 1
j = column - 1
if ( i >= 0 and j >= 0 and self.board[i][j] == other):
i = i - 1
j = j - 1
while ( i >= 0 and j >= 0 and self.board[i][j] == other):
i = i - 1
j = j - 1
if ( i >= 0 and j >= 0 and self.board[i][j] == 0 ):
places = places + [(i, j)]
return places
def get_valid_moves ( self, color ):
""" Get the avaiable positions to put a piece of the given color. For each
piece of the given color we search its neighbours, searching for pieces of the
other color to determine if is possible to make a move. This method must be
called before apply_move."""
if color == BLACK:
other = WHITE
else:
other = BLACK
places = []
for i in range ( 8 ) :
for j in range ( 8 ) :
if self.board[i][j] == color :
places = places + self.lookup ( i, j, color )
places = list( set ( places ))
self.valid_moves = places
return places
def apply_move ( self, move, color ):
""" Determine if the move is correct and apply the changes in the game.
"""
if move in self.valid_moves:
self.board[move[0]][move[1]] = color
for i in range ( 1, 9 ):
self.flip ( i, move, color )
def flip ( self, direction, position, color ):
""" Flips (capturates) the pieces of the given color in the given direction
(1=North,2=Northeast...) from position. """
if direction == 1:
# north
row_inc = -1
col_inc = 0
elif direction == 2:
# northeast
row_inc = -1
col_inc = 1
elif direction == 3:
# east
row_inc = 0
col_inc = 1
elif direction == 4:
# southeast
row_inc = 1
col_inc = 1
elif direction == 5:
# south
row_inc = 1
col_inc = 0
elif direction == 6:
# southwest
row_inc = 1
col_inc = -1
elif direction == 7:
# west
row_inc = 0
col_inc = -1
elif direction == 8:
# northwest
row_inc = -1
col_inc = -1
places = [] # pieces to flip
i = position[0] + row_inc
j = position[1] + col_inc
if color == WHITE:
other = BLACK
else:
other = WHITE
if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:
# assures there is at least one piece to flip
places = places + [(i,j)]
i = i + row_inc
j = j + col_inc
while i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:
# search for more pieces to flip
places = places + [(i,j)]
i = i + row_inc
j = j + col_inc
if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == color:
# found a piece of the right color to flip the pieces between
for pos in places:
# flips
self.board[pos[0]][pos[1]] = color
def get_changes ( self ):
""" Return black and white counters. """
whites, blacks, empty = self.count_stones()
return ( self.board, blacks, whites )
def game_ended ( self ):
""" Is the game ended? """
# board full or wipeout
whites, blacks, empty = self.count_stones()
if whites == 0 or blacks == 0 or empty == 0:
return True
# no valid moves for both players
if self.get_valid_moves( BLACK ) == [] and self.get_valid_moves( WHITE ) == []:
return True
return False
def print_board ( self ):
for i in range ( 8 ):
print i, ' |',
for j in range ( 8 ):
if self.board[i][j] == BLACK:
print 'B',
elif self.board[i][j] == WHITE:
print 'W',
else:
print ' ',
print '|',
print
def count_stones( self ):
""" Returns the number of white pieces, black pieces and empty squares, in
this order.
"""
whites = 0
blacks = 0
empty = 0
for i in range( 8 ):
for j in range( 8 ):
if self.board[i][j] == WHITE:
whites += 1
elif self.board[i][j] == BLACK:
blacks += 1
else:
empty += 1
return whites, blacks, empty
def compare( self, otherBoard ):
""" Return a board containing only the squares that are empty in one of the boards
and not empty on the other.
"""
diffBoard = Board()
diffBoard.board[3][4] = 0
diffBoard.board[3][3] = 0
diffBoard.board[4][3] = 0
diffBoard.board[4][4] = 0
for i in range( 8 ):
for j in range( 8 ):
if otherBoard.board[i][j] != self.board[i][j]:
diffBoard.board[i][j] = otherBoard.board[i][j]
return otherBoard
def get_adjacent_count( self, color ):
""" Return how many empty squares there are on the board adjacent to the specified color."""
adjCount = 0
for x,y in [(a,b) for a in range( 8 ) for b in range( 8 ) if self.board[a][b] == color]:
for i,j in [(a,b) for a in [-1,0,1] for b in [-1,0,1]]:
if 0 <= x+i <= 7 and 0 <= y+j <= 7:
if self.board[x+i][y+j] == EMPTY:
adjCount += 1
return adjCount
def next_states( self, color ):
""" Given a player's color return all the boards resulting from moves that this player
cand do. It's implemented as an iterator.
"""
valid_moves = self.get_valid_moves( color )
for move in valid_moves:
newBoard = deepcopy( self )
newBoard.apply_move( move, color )
yield newBoard
| mit | -6,171,274,448,251,786,000 | 31.578125 | 100 | 0.424077 | false | 3.900112 | false | false | false |
sbg/Mitty | mitty/simulation/sequencing/syntheticsequencer.py | 1 | 1955 | """A fully synthetic read model that allows us to produce single end or paired end reads with arbitrary
read and template lengths. It's read model format is as follows
{
'model_class': 'illumina',
'model_description': '',
'paired': True/False,
'read_length': 100,
'mean_template_length': 300,
'std_template_length': 100,
'bq_mat': [],
'cum_bq_mat': []
}
"""
import pickle
import numpy as np
def create_model(
pkl,
read_length=100, mean_template_length=500, std_template_length=100, max_tlen=1000,
bq0=30, k=200, sigma=10,
comment=''):
description = """This is a synthetic read model that generates reads
with a length of {} bp, a template length of {} +/- {} bp.
The mean base quality follows the equation:
{} * exp(- b/l * {})
where b is the base in the read and l is the length of the read.
The base quality for a given base in a given read is drawn from a gaussian with standard deviation {}
{}""".format(
read_length,
mean_template_length,
std_template_length,
bq0, k, sigma,
comment)
bq = bq0 * (1 - np.exp(- k * np.linspace(1, 0, read_length) ** 2))
one_bq_mat = np.zeros((read_length, 94), dtype=float)
for n in range(read_length):
one_bq_mat[n, :] = np.exp(- 0.5 * ((np.arange(94) - bq[n]) / sigma) ** 2)
one_cum_bq_mat = one_bq_mat.cumsum(axis=1) / one_bq_mat.sum(axis=1).clip(1)[:, None]
tlen_mat = np.exp(- 0.5 * ((np.arange(max_tlen) - mean_template_length) / std_template_length) ** 2)
tlen_mat /= tlen_mat.sum()
cum_tlen = tlen_mat.cumsum() / tlen_mat.sum()
pickle.dump({
'model_class': 'illumina',
'model_description': description,
'min_mq': 0,
'bq_mat': np.array((one_bq_mat, one_bq_mat)),
'cum_bq_mat': np.array((one_cum_bq_mat, one_cum_bq_mat)),
'tlen': tlen_mat,
'cum_tlen': cum_tlen,
'mean_rlen': read_length,
'min_rlen': read_length,
'max_rlen': read_length,
'r_cnt': 1
}, open(pkl, 'wb'))
| apache-2.0 | 9,079,642,919,832,840,000 | 31.04918 | 103 | 0.623018 | false | 2.892012 | false | false | false |
CNS-OIST/STEPS_Example | publication_models/API_1/Anwar_J Neurosci_2013/extra/constants_hh.py | 1 | 2809 | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# constants_hh.py : provides a set of parameters and other constants for the
# Hodgkin-Huxley model in the above study.
# It is intended that this file is not altered.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import math
# # # # # # # # # # # # # # # # SIMULATION CONTROLS # # # # # # # # # # # # #
EF_DT = 1.0e-5 # The EField dt
NTIMEPOINTS = 5000
TIMECONVERTER = 1.0e-5
NITER = 1
############################ PARAMETERS ################################
init_pot = -65e-3
TEMPERATURE = 20.0
Q10 = 3
Qt = math.pow(Q10, ((TEMPERATURE-6.3)/10))
########## BULK RESISTIVITY ##########
Ra = 1.0
########## MEMBRANE CAPACITANCE ##########
memb_capac = 1.0e-2
# # # # # # # # # # # # # # # # # # CHANNELS # # # # # # # # # # # # # # # #
# Voltage range for gating kinetics in Volts
Vrange = [-100.0e-3, 50e-3, 1e-4]
# Hodgkin-Huxley gating kinetics
def a_n(V):
return ((0.01*(10-(V+65.))/(math.exp((10-(V+65.))/10.)-1)))
def b_n(V):
return ((0.125*math.exp(-(V+65.)/80.)))
def a_m(V):
return ((0.1*(25-(V+65.))/(math.exp((25-(V+65.))/10.)-1)))
def b_m(V):
return ((4.*math.exp(-(V+65.)/18.)))
def a_h(V):
return ((0.07*math.exp(-(V+65.)/20.)))
def b_h(V):
return ((1./(math.exp((30-(V+65.))/10.)+1)))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Potassium conductance = 0.036 S/cm2
# Sodium conductance = 0.120 S/cm2
# Potassium single-channel conductance
K_G = 20.0e-12 # Siemens
# Potassium channel density
K_ro = 18.0e12 # per square meter
# Potassium reversal potential
K_rev = -77e-3 # volts
# Sodium single-channel conductance
Na_G = 20.0e-12 # Siemens
# Sodium channel density
Na_ro = 60.0e12 # per square meter
# Sodium reversal potential
Na_rev = 50e-3 # volts
# Leak single-channel conductance
L_G = 1.0e-12 # Siemens
# Leak density
L_ro = 10.0e12 # per square meter
# Leak reveral potential
leak_rev = -50.0e-3 # volts
# A table of potassium channel initial population factors:
# n0, n1, n2, n3, n4
K_facs = [ 0.21768, 0.40513, 0.28093, 0.08647, 0.00979 ]
# A table of sodium channel initial population factors
# m0h0, m1h0, m2h0, m3h0, m0h1, m1h1, m2h1, m3h1:
Na_facs = [ 0.34412, 0.05733, 0.00327, 6.0e-05, \
0.50558, 0.08504, 0.00449, 0.00010 ]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
| gpl-2.0 | 7,726,932,273,181,009,000 | 22.805085 | 77 | 0.533286 | false | 2.311934 | false | false | false |
ContinuumIO/ashiba | enaml/enaml/widgets/datetime_selector.py | 1 | 2100 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Bool, Str, Typed, ForwardTyped, observe, set_default
from enaml.core.declarative import d_
from .bounded_datetime import BoundedDatetime, ProxyBoundedDatetime
class ProxyDatetimeSelector(ProxyBoundedDatetime):
""" The abstract defintion of a proxy DatetimeSelector object.
"""
#: A reference to the DatetimeSelector declaration.
declaration = ForwardTyped(lambda: DatetimeSelector)
def set_datetime_format(self, format):
raise NotImplementedError
def set_calendar_popup(self, popup):
raise NotImplementedError
class DatetimeSelector(BoundedDatetime):
""" A widget to edit a Python datetime.datetime object.
This is a geometrically smaller control than what is provided by
Calendar.
"""
#: A python date format string to format the datetime. If None is
#: supplied (or is invalid) the system locale setting is used.
#: This may not be supported by all backends.
datetime_format = d_(Str())
#: Whether to use a calendar popup for selecting the date.
calendar_popup = d_(Bool(False))
#: A datetime selector expands freely in width by default
hug_width = set_default('ignore')
#: A reference to the ProxyDateSelector object.
proxy = Typed(ProxyDatetimeSelector)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('datetime_format', 'calendar_popup'))
def _update_proxy(self, change):
""" An observer which updates the proxy with state change.
"""
# The superclass implementation is sufficient.
super(DatetimeSelector, self)._update_proxy(change)
| bsd-3-clause | -5,191,797,719,013,015,000 | 34.59322 | 79 | 0.605714 | false | 5.109489 | false | false | false |
CuteLemon/Learn | NewsAPI_Scraper/db_operation.py | 1 | 1050 | import pymongo as Mongo
DB_NAME = 'localhost'
DB_PORT = 27017
TEST_JSON = {'url':'http://hello.com','content':'Lemon Tree'}
class DB():
def __init__(self,db,port):
self.client = Mongo.MongoClient(db,port)
self.db = self.client.test
self.collect = self.db.test_collect
def insert(self,c):
self.collect.insert_one(c)
def find(self,k):
return self.collect.find(k)
def delete(self,k):
return self.collect.delete_many(k)
def close(self):
self.client.close()
if __name__ == '__main__':
# Client = Mongo.MongoClient(DB,PORT)
# db = Client.test
# collect = db.test_collect
# collect.insert(TEST_JSON)
# for x in collect.find({'content':'Lemon Tree'}):
# print x
# Client.close()
print 'mongodb test start:'
db = DB(DB_NAME,DB_PORT)
db.insert(TEST_JSON)
result = db.find({'content':'Lemon Tree'})
for x in result:
print x
db.delete({'content':'Lemon Tree'})
db.close()
print 'mongodb test complete!'
| gpl-3.0 | -5,302,895,376,558,269,000 | 22.333333 | 61 | 0.592381 | false | 3.322785 | true | false | false |
bbondy/brianbondy.gae | libs/werkzeug/testsuite/contrib/cache.py | 1 | 5814 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class CacheTestCase(WerkzeugTestCase):
make_cache = None
def test_generic_get_dict(self):
c = self.make_cache()
assert c.set('a', 'a')
assert c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_generic_set_many(self):
c = self.make_cache()
assert c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
assert c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
def test_generic_set_get(self):
c = self.make_cache()
for i in range(3):
assert c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i, result
def test_generic_get_set(self):
c = self.make_cache()
assert c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_generic_get_many(self):
c = self.make_cache()
assert c.set('foo', ['bar'])
assert c.set('spam', 'eggs')
self.assert_equal(list(c.get_many('foo', 'spam')), [['bar'], 'eggs'])
def test_generic_set_many(self):
c = self.make_cache()
assert c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_generic_expire(self):
c = self.make_cache()
assert c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_generic_add(self):
c = self.make_cache()
# sanity check that add() works like set()
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert not c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_generic_delete(self):
c = self.make_cache()
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert c.delete('foo')
assert c.get('foo') is None
def test_generic_delete_many(self):
c = self.make_cache()
assert c.add('foo', 'bar')
assert c.add('spam', 'eggs')
assert c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_generic_inc_dec(self):
c = self.make_cache()
assert c.set('foo', 1)
assert c.inc('foo') == c.get('foo') == 2
assert c.dec('foo') == c.get('foo') == 1
assert c.delete('foo')
def test_generic_true_false(self):
c = self.make_cache()
assert c.set('foo', True)
assert c.get('foo') == True
assert c.set('bar', False)
assert c.get('bar') == False
class SimpleCacheTestCase(CacheTestCase):
make_cache = cache.SimpleCache
class FileSystemCacheTestCase(CacheTestCase):
tmp_dir = None
def make_cache(self, **kwargs):
if self.tmp_dir is None:
self.tmp_dir = tempfile.mkdtemp()
return cache.FileSystemCache(cache_dir=self.tmp_dir, **kwargs)
def teardown(self):
if self.tmp_dir is not None:
shutil.rmtree(self.tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
c = self.make_cache(threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
assert c.set(str(i), i)
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
c = self.make_cache()
assert c.set('foo', 'bar')
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) == 1
assert c.clear()
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) == 0
class RedisCacheTestCase(CacheTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
assert c._client.set(c.key_prefix + 'foo', 'Awesome')
self.assert_equal(c.get('foo'), b'Awesome')
assert c._client.set(c.key_prefix + 'foo', '42')
self.assert_equal(c.get('foo'), 42)
class MemcachedCacheTestCase(CacheTestCase):
def make_cache(self):
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
assert c._client.set(c.key_prefix + b'foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
if memcache is not None:
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
return suite
| mit | 1,778,556,580,544,685,600 | 28.21608 | 91 | 0.576367 | false | 3.55814 | true | false | false |
jimgong92/allezViens | connect.py | 1 | 8842 | from models import *
from run import db
import sys
import math
import hashlib
import time
from communication import sendPickNotificationEmail
'''DATABASE INSERTION/UPDATE'''
#Adds driver to database
def addDriver(id, alias, oLat, oLon, dLat, dLon, date):
url = makeURL(id)
driver = Driver(id, alias, oLat, oLon, dLat, dLon, date, url)
db.session.add(driver)
save()
return driver
#Adds passenger to database
def addPassenger(id, alias, oLat, oLon, dLat, dLon, date):
url = makeURL(id)
passenger = Passenger(id, alias, oLat, oLon, dLat, dLon, date, url)
db.session.add(passenger)
save()
return passenger
#Adds a driver to a passenger's picks
def pickDriver(driverID, passengerID, add):
driver = getDriver(driverID)
passenger = getPassenger(passengerID)
#Toggle pick based on whether driver is already in passenger's picks
#currentPicks = findPassengerPicks(passengerID)
# if (driver in currentPicks):
# passenger.unpick(driver)
# else:
# passenger.pick(driver)
if(add):
passenger.pick(driver)
else:
passenger.unpick(driver)
save()
#Adds a passenger to a driver's picks
def pickPassenger(passengerID, driverID, add):
passenger = getPassenger(passengerID)
driver = getDriver(driverID)
# currentPicks = findDriverPicks(driverID)
# if (passenger in currentPicks):
# driver.unpick(passenger)
# else:
# driver.pick(passenger)
if(add):
driver.pick(passenger)
else:
driver.unpick(passenger)
save()
#Validates driver
def validateDriver(driverID):
driver = getDriver(driverID)
driver.validateDriver()
save()
#Validates passenger
def validatePassenger(passengerID):
passenger = getPassenger(passengerID)
passenger.validatePassenger()
save()
def updatePassenger(passengerDict):
passenger = getPassenger(passengerDict['email'])
return update(passenger,passengerDict)
def updateDriver(driverDict):
driver = getDriver(driverDict['email'])
return update(driver,driverDict)
#Updates given model
def update(model, dictionary):
if(model != ''):
model.oLat = dictionary['oLat']
model.oLon = dictionary['oLon']
model.dLat = dictionary['dLat']
model.dLon = dictionary['dLon']
model.date = dictionary['date']
model.alias = dictionary['alias']
db.session.add(model)
save()
return True
else:
return False
'''DATABASE GET'''
#Retrieve driver instance by ID
def getDriver(driverID):
try:
result = Driver.query.filter_by(email=driverID).first()
except:
result = ''
finally:
return result
#Retrieve passenger instance by ID
def getPassenger(passengerID):
try:
result = Passenger.query.filter_by(email=passengerID).first()
except:
result = ''
finally:
return result
#Returns all drivers that contain passenger route and same date
#Identifies drivers whose boundary box contains the passenger's route
#PARAMS: Passenger's origin and destination coordinates
def findMatchableDrivers(oLat, oLon, dLat, dLon, date):
drivers = Driver.query.filter(Driver.date == date).all()
res = []
for i in range(len(drivers)):
minLat, maxLat = min(drivers[i].oLat, drivers[i].dLat), max(drivers[i].oLat, drivers[i].dLat)
minLon, maxLon = min(drivers[i].oLon, drivers[i].dLon), max(drivers[i].oLon, drivers[i].dLon)
if (minLat <= oLat <= maxLat and minLat <= dLat <= maxLat):
if (minLon <= oLon <= maxLon and minLon <= dLon <= maxLon):
res.append(drivers[i])
return formatResults(res)
#Returns all passengers within given bound box and same date
#Returns passengers whose coordinates are in the driver's boundary box
#PARAMS: Driver's origin and destination coordinates
def findMatchablePassengers(oLat, oLon, dLat, dLon, date):
minLat, maxLat = min(oLat, dLat), max(oLat, dLat)
minLon, maxLon = min(oLon, dLon), max(oLon, dLon)
maxLat, minLon = makeBuffer(maxLat,minLon, 5, "NW")
minLat, maxLon = makeBuffer(minLat,maxLon, 5, "SE")
passengers = Passenger.query.filter(Passenger.date == date,
Passenger.oLat >= minLat, Passenger.oLat <= maxLat,
Passenger.dLat >= minLat, Passenger.dLat <= maxLat,
Passenger.oLon >= minLon, Passenger.oLon <= maxLon,
Passenger.dLon >= minLon, Passenger.dLon <= maxLon).all()
return formatResults(passengers)
#Returns all picks by given driver
def findDriverPicks(driverID):
return getDriver(driverID).picks
#Returns all picks by given driver
def findPassengerPicks(passengerID):
return getPassenger(passengerID).picks
#Returns object with user's email, origin, destination, and pick information
def getInfoByUrl(url):
match = Driver.query.filter_by(editURL=url).all()
if(len(match)>0):
driver = match[0]
picks = findDriverPicks(driver.email)
return 'D', objectifyWithPickInfo(driver, picks)
match = Passenger.query.filter_by(editURL=url).all()
if(len(match)>0):
passenger = match[0]
picks = findPassengerPicks(passenger.email)
return 'P', objectifyWithPickInfo(passenger, picks)
return 'NA', False
#Retrieves driver's info by email
def getDriverInfo(email):
driver = getDriver(email)
picks = findDriverPicks(driver.email)
return objectifyWithPickInfo(driver,picks)
#Retrieves passenger's info by email
def getPassengerInfo(email):
passenger = getPassenger(email)
picks = findPassengerPicks(passenger.email)
return objectifyWithPickInfo(passenger,picks)
#Validates existing urls
def urlExists(url, validate):
urlType, info = getInfoByUrl(url)
if(urlType == 'P'):
if(validate):
validatePassenger(info['email'])
return True
elif(urlType == 'D'):
if(validate):
validateDriver(info['email'])
return True
else:
return False
def sendMessage(to, sender, message, fromType):
sent = True
try:
if(fromType[0].upper()=='D'):
passenger = getPassenger(to)
url = passenger.editURL
else:
driver = getDriver(to)
url = driver.editURL
sendPickNotificationEmail(to, sender, url)
except:
sent = False
finally:
return sent
'''DATABASE DELETION'''
#Deletes driver + route from database
def deleteDriver(id):
driver = getDriver(id)
db.session.delete(driver)
save()
return ''
#Deletes passenger + route from database
def deletePassenger(id):
passenger = getPassenger(id)
db.session.delete(passenger)
save()
return ''
'''HELPER FUNCTIONS'''
#Commits db session changes
def save():
print 'save function'
for obj in db.session:
print obj
try:
db.session.commit()
except:
e = sys.exc_info()[0]
print e
print 'Error in session D:'
finally:
print 'after db.session.commit()'
#Returns JSON-friendly data from a model array
def formatResults(modelArray):
res = []
for i in range(len(modelArray)):
print 'in for loop'
res.append(objectify(modelArray[i]))
return res
#Pulls model data into JSON format
def objectify(model):
obj = {
"email": model.email,
"alias": model.alias,
"origin": [float(model.oLat), float(model.oLon)],
"destination": [float(model.dLat), float(model.dLon)],
"date": model.date
}
return obj
#Extends objectify with pick information
def objectifyWithPickInfo(model, picks):
obj = objectify(model)
obj["picks"] = parseUserPicks(model, picks)
return obj
#Takes users pick information and returns array of each pick denoting either CONFIRMED or PENDING status
def parseUserPicks(user, picks):
res = []
for pick in picks:
if (user in pick.picks):
res.append({"id": pick.email, "status": "CONFIRMED"})
else:
res.append({"id": pick.email, "status": "PENDING"})
return res
#Adds buffer around location
def makeBuffer(lat,lon,miles,direction):
#This earth radius in miles may not be entirely accurate - there are various numbers and the earth is not a perfect sphere
#for the case of a buffer though, probably doesn't really matter
earthRadiusMiles = 3959
northwest = math.radians(315)
southeast = math.radians(135)
lat = math.radians(lat)
lon = math.radians(lon)
#cast as float or this breaks, because angular direction is a tiny tiny number
angularDirection = float(miles)/float(earthRadiusMiles)
if(direction=="NW"):
bearing = northwest
if(direction=="SE"):
bearing = southeast
newLat = math.asin(math.sin(lat)*math.cos(angularDirection)) + math.cos(lat)*math.sin(angularDirection)*math.cos(bearing)
newLon = lon + math.atan2(math.sin(bearing)*math.sin(angularDirection)*math.cos(lat), math.cos(angularDirection)-math.sin(lat)*math.sin(newLat))
return math.degrees(newLat), math.degrees(newLon)
#Generates unique hash for trip route urls
def makeURL(id):
id = id + time.strftime("%M%S")
hash = hashlib.md5(id).hexdigest()
url = hash[0:8]
while(urlExists(url,False)):
id = id + time.strftime("%M%S")
hash = hashlib.md5(id).hexdigest()
url = hash[0:8]
return url
| mit | 5,327,332,219,861,663,000 | 27.990164 | 146 | 0.708776 | false | 3.272391 | false | false | false |
aldebaran/qibuild | python/qibuild/test/projects/usefoopymodule/test.py | 1 | 1167 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
"""
This is an equivalent of a C++ program trying to load a
Python module using libqi, but written in Python.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
def main():
""" Main Entry Point """
from_env = os.environ.get("QI_ADDITIONAL_SDK_PREFIXES")
if not from_env:
sys.exit("QI_ADDITIONAL_SDK_PREFIXES not set")
prefixes = from_env.split(os.path.pathsep)
found = False
for prefix in prefixes:
candidate = os.path.join(prefix, "share", "qi", "module", "foo.mod")
if os.path.exists(candidate):
found = True
with open(candidate, "r") as fp:
contents = fp.read()
if contents != "python\n":
sys.exit("Expected python\\n, got: " + contents)
if not found:
sys.exit("foo.mod not found")
import foo
if __name__ == "__main__":
main()
| bsd-3-clause | -454,405,756,292,281,000 | 29.710526 | 84 | 0.615253 | false | 3.635514 | false | false | false |
MikeDacre/fyrd | fyrd/job.py | 1 | 56470 | # -*- coding: utf-8 -*-
"""
Class and methods to handle Job submission.
This module only defines a single object: the Job class.
"""
import os as _os
import sys as _sys
from uuid import uuid4 as _uuid
from time import sleep as _sleep
from datetime import datetime as _dt
from traceback import print_tb as _tb
# Try to use dill, revert to pickle if not found
import dill as _pickle
from six import reraise as _reraise
from six import text_type as _txt
from six import string_types as _str
from six import integer_types as _int
###############################################################################
# Our functions #
###############################################################################
from . import run as _run
from . import conf as _conf
from . import queue as _queue
from . import logme as _logme
from . import script_runners as _scrpts
from . import batch_systems as _batch
from . import ClusterError as _ClusterError
from .submission_scripts import Function as _Function
_options = _batch.options
__all__ = ['Job']
###############################################################################
# The Job Class #
###############################################################################
class Job(object):
"""Information about a single job on the cluster.
Holds information about submit time, number of cores, the job script,
and more.
Below are the core attributes and methods required to use this class,
note that this is an incomplete list.
Attributes
----------
id : str
The ID number for the job, only set once the job has been submitted
name : str
The name of the job
command : str or callable
The function or shell script that will be submitted
args : list
A list of arguments to the shell script or function in command
kwargs : dict
A dictionary of keyword arguments to the function (not shell script) in
command
state : str
A slurm-style one word description of the state of the job, one of:
- Not_Submitted
- queued
- running
- completed
- failed
submitted : bool
written : bool
done : bool
running : bool
dependencies : list
A list of dependencies associated with this job
out : str
The output of the function or a copy of stdout for a script
stdout : str
Any output to STDOUT
stderr : str
Any output to STDERR
exitcode : int
The exitcode of the running processes (the script runner if the Job is
a function).
submit_time : datetime
A datetime object for the time of submission
start : datetime
A datetime object for time execution started on the remote node.
end : datetime
A datetime object for time execution ended on the remote node.
runtime : timedelta
A timedelta object containing runtime.
files : list
A list of script files associated with this job
nodes : list
A list of nodes associated with this job
modules : list
A list of modules associated with this job
clean_files : bool
If True, auto-delete script and function files on job completion
clean_outputs : bool
If True, auto-delete script outputs and error files on job completion
kwds : dict
Keyword arguments to the batch system (e.g. mem, cores, walltime), this
is initialized by taking every additional keyword argument to the Job.
e.g. Job('echo hi', profile=large, walltime='00:20:00', mem='2GB') will
result in kwds containing {walltime: '00:20:00', mem: '2GB'}. There is
**no need to alter this manually**.
submit_args : list
List of parsed submit arguments that will be passed at runtime to the
submit function. **Generated within the Job object**, no need to set
manually, use the `kwds` attribute instead.
Methods
-------
initialize()
Use attributes to prep job for running
gen_scripts()
Create script files (but do not write them)
write(overwrite=True)
Write scripts to files
submit(wait_on_max_queue=True)
Submit the job if it is ready and the queue is sufficiently open.
resubmit(wait_on_max_queue=True)
Clean all internal states with `scrub()` and then resubmit
kill(confirm=True)
Immediately kill the currently running job
clean(delete_outputs=True, get_outputs=True)
Delete any files created by this object
scrub(confirm=True)
Clean everything and reset to an unrun state.
update(fetch_info=True)
Update our status from the queue
wait()
Block until the job is done
get()
Block until the job is done and then return the output (stdout if job
is a script), by default saves all outputs to self (i.e. .out, .stdout,
.stderr) and deletes all intermediate files before returning. If `save`
argument is `False`, does not delete the output files by default.
Notes
-----
Printing or reproducing the class will display detailed job information.
Both `wait()` and `get()` will update the queue every few seconds
(defined by the queue_update item in the config) and add queue information
to the job as they go.
If the job disappears from the queue with no information, it will be listed
as 'completed'.
All jobs have a .submission attribute, which is a Script object containing
the submission script for the job and the file name, plus a 'written' bool
that checks if the file exists.
In addition, some batch systems (e.g. SLURM) have an .exec_script
attribute, which is a Script object containing the shell command to run.
This difference is due to the fact that some SLURM systems execute multiple
lines of the submission file at the same time.
Finally, if the job command is a function, this object will also contain a
`.function` attribute, which contains the script to run the function.
"""
id = None
name = None
suffix = None
submitted = False
written = False
found = False
disappeared = False
submit_time = None
state = None
kind = None
# Arguments
kwds = None
kwargs = None
submit_args = None
# Runtime
nodes = None
cores = None
modules = None
# Files
outfile = None
errfile = None
# Scripts
submission = None
exec_script = None
function = None
imports = None
# Dependencies
dependencies = None
# Pickled output file for functions
poutfile = None
# Holds queue information in torque and slurm
queue_info = None
# Output tracking
_got_out = False
_got_stdout = False
_got_stderr = False
_got_exitcode = False
_found_files = False
_out = None
_stdout = None
_stderr = None
_exitcode = None
# Time tracking
_got_times = False
start = None
end = None
# Track update status
_updating = False
# Track preparations
initialized = False
scripts_ready = False
_kwargs = None
# Auto Cleaning
clean_files = _conf.get_option('jobs', 'clean_files')
clean_outputs = _conf.get_option('jobs', 'clean_outputs')
def __init__(self, command, args=None, kwargs=None, name=None, qtype=None,
profile=None, queue=None, **kwds):
"""Initialization function arguments.
Parameters
----------
command : function/str
The command or function to execute.
args : tuple/dict, optional
Optional arguments to add to command, particularly useful for
functions.
kwargs : dict, optional
Optional keyword arguments to pass to the command, only used for
functions.
name : str, optional
Optional name of the job. If not defined, guessed. If a job of the
same name is already queued, an integer job number (not the queue
number) will be added, ie. <name>.1
qtype : str, optional
Override the default queue type
profile : str, optional
The name of a profile saved in the conf
queue : fyrd.queue.Queue, optional
An already initiated Queue class to use.
kwds
*All other keywords are parsed into cluster keywords by the options
system.* For available keywords see `fyrd.option_help()`
"""
########################
# Sanitize arguments #
########################
_logme.log('Args pre-check: {}'.format(kwds), 'debug')
kwds = _options.check_arguments(kwds)
_logme.log('Args post-check: {}'.format(kwds), 'debug')
# Create a unique short UUID for this job
self.uuid = str(_uuid()).split('-')[0]
# Path handling
[
kwds, self.runpath, self.outpath, self.scriptpath
] = _conf.get_job_paths(kwds)
# Save command
self.command = command
self.args = args
self.kwargs = kwargs
self.profile = profile
# Get environment
if not _batch.MODE:
_batch.get_cluster_environment()
if not qtype:
qtype = _batch.MODE
if queue:
if not isinstance(queue, _queue.Queue):
raise TypeError(
'queue must be fyrd.queue.Queue is {0}'.format(type(queue))
)
self.queue = queue
else:
self.queue = _queue.default_queue(qtype)
self.batch = _batch.get_batch_system(qtype)
self.qtype = qtype
self.state = 'Not_Submitted'
# Save keywords for posterity and parsing
self.kwds = kwds
self.name = self._update_name(name)
##########################################################################
# Public Methods #
##########################################################################
################
# Properties #
################
@property
def files(self):
"""Build a list of files associated with this class."""
files = [self.submission]
if self.kind == 'script':
files.append(self.exec_script)
if self.kind == 'function':
files.append(self.function)
return files
@property
def runtime(self):
"""Return the runtime."""
if not self.done:
_logme.log('Cannot get runtime as not yet complete.' 'warn')
return None
if not self.start:
self.get_times()
return self.end-self.start
@property
def done(self):
"""Check if completed or not.
Updates the Job and Queue.
Returns
-------
done : bool
"""
# We have the same statement twice to try and avoid updating.
if self.state in _batch.DONE_STATES:
return True
if not self._updating:
self.update()
if self.state in _batch.DONE_STATES:
return True
return False
@property
def running(self):
"""Check if running or not.
Updates the Job and Queue.
Returns
-------
running : bool
"""
# We have the same statement twice to try to avoid updating.
if self.state in _batch.ACTIVE_STATES:
return True
if not self._updating:
self.update()
if self.state in _batch.ACTIVE_STATES:
return True
return False
@property
def outfiles(self):
"""A list of all outfiles associated with this Job."""
outfiles = [self.outfile, self.errfile]
if self.poutfile:
outfiles.append(self.poutfile)
return outfiles
@property
def incomplete_outfiles(self):
"""A list of all outfiles that haven't already been fetched."""
outfiles = []
if self.outfile and not self._got_stdout:
outfiles.append(self.outfile)
if self.errfile and not self._got_stderr:
outfiles.append(self.errfile)
if self.poutfile and not self._got_out:
outfiles.append(self.poutfile)
return outfiles
@property
def exitcode(self):
"""Return exitcode."""
return self.get_exitcode()
@property
def code(self):
"""Return exitcode."""
return self.get_exitcode()
@property
def out(self):
"""Return output."""
return self.get_output()
@property
def stdout(self):
"""Return output."""
return self.get_stdout()
@property
def stderr(self):
"""Return stderr."""
return self.get_stderr()
@property
def err(self):
"""Return stderr."""
return self.get_stderr()
###############################
# Core Job Handling Methods #
###############################
def initialize(self):
"""Make self runnable using set attributes."""
kwds = self.kwds
# Override autoclean state (set in config file)
if 'clean_files' in kwds:
self.clean_files = kwds.pop('clean_files')
if 'clean_outputs' in kwds:
self.clean_outputs = kwds.pop('clean_outputs')
# Set suffix
self.suffix = kwds.pop('suffix') if 'suffix' in kwds \
else _conf.get_option('jobs', 'suffix')
# Merge in profile, this includes all args from the DEFAULT profile
# as well, ensuring that those are always set at a minumum.
profile = self.profile if self.profile else 'DEFAULT'
prof = _conf.get_profile(profile)
if not prof:
raise _ClusterError('No profile found for {}'.format(profile))
for k,v in prof.args.items():
if k not in kwds:
kwds[k] = v
# Use the default profile as a backup if any arguments missing
default_args = _conf.DEFAULT_PROFILES['DEFAULT']
default_args.update(_conf.get_profile('DEFAULT').args)
for opt, arg in default_args.items():
if opt not in kwds:
_logme.log('{} not in kwds, adding from default: {}:{}'
.format(opt, opt, arg), 'debug')
kwds[opt] = arg
# Set modules
self.modules = kwds.pop('modules') if 'modules' in kwds else None
if self.modules:
self.modules = _run.opt_split(self.modules, (',', ';'))
# Make sure args are a tuple
if self.args:
self.args = tuple(_run.listify(self.args))
# In case cores are passed as None
if 'nodes' not in kwds:
kwds['nodes'] = default_args['nodes']
if 'cores' not in kwds:
kwds['cores'] = default_args['cores']
self.nodes = kwds['nodes']
self.cores = kwds['cores']
# Set output files
if 'outfile' in kwds:
pth, fle = _os.path.split(kwds['outfile'])
if not pth:
pth = self.outpath
kwds['outfile'] = _os.path.join(pth, fle)
else:
kwds['outfile'] = _os.path.join(
self.outpath, '.'.join([self.name, self.suffix, 'out']))
if 'errfile' in kwds:
pth, fle = _os.path.split(kwds['errfile'])
if not pth:
pth = self.outpath
kwds['errfile'] = _os.path.join(pth, fle)
else:
kwds['errfile'] = _os.path.join(
self.outpath, '.'.join([self.name, self.suffix, 'err']))
self.outfile = kwds['outfile']
self.errfile = kwds['errfile']
# Check and set dependencies
if 'depends' in kwds:
dependencies = _run.listify(kwds.pop('depends'))
self.dependencies = []
errmsg = 'Dependencies must be number, numeric string or Job'
for dependency in dependencies:
if not isinstance(dependency, (_str, _txt, Job)):
raise _ClusterError(errmsg)
self.dependencies.append(dependency)
# Save parsed keywords as _kwargs
self._kwargs = kwds
self.initialized = True
return self
def gen_scripts(self):
"""Create the script objects from the set parameters."""
if not self.initialized:
self.initialize()
######################################
# Command and Function Preparation #
######################################
command = self.command
args = self.args
kwargs = self.kwargs # Not self._kwargs
name = self._update_name()
kwds = self._kwargs
# Get imports
imports = kwds.pop('imports') if 'imports' in kwds else None
# Get syspaths
syspaths = kwds.pop('syspaths') if 'syspaths' in kwds else None
# Split out sys.paths from imports and set imports in self
if imports:
self.imports = []
syspaths = syspaths if syspaths else []
for i in imports:
if i.startswith('sys.path.append')\
or i.startswith('sys.path.insert'):
syspaths.append(i)
else:
self.imports.append(i)
# Function specific initialization
if callable(command):
self.kind = 'function'
script_file = _os.path.join(
self.scriptpath, '{}_func.{}.py'.format(name, self.suffix)
)
self.poutfile = self.outfile + '.func.pickle'
self.function = _Function(
file_name=script_file, function=command, args=args,
kwargs=kwargs, imports=self.imports, syspaths=syspaths,
outfile=self.poutfile
)
# Collapse the _command into a python call to the function script
executable = '#!/usr/bin/env python{}'.format(
_sys.version_info.major) if _conf.get_option(
'jobs', 'generic_python') else _sys.executable
command = '{} {}'.format(executable, self.function.file_name)
args = None
else:
self.kind = 'script'
self.poutfile = None
# Collapse args into command
command = command + ' '.join(args) if args else command
#####################
# Script Creation #
#####################
# Build execution wrapper with modules
modstr = ''
if self.modules:
for module in self.modules:
modstr += 'module load {}\n'.format(module)
# Add all of the keyword arguments at once
opt_string, submit_args = _options.options_to_string(kwds, self.qtype)
precmd = opt_string + '\n\n' + modstr
self.submit_args = submit_args
# Create queue-dependent scripts
self.submission, self.exec_script = self.batch.gen_scripts(
self, command, args, precmd, modstr
)
self.scripts_ready = True
return self
def write(self, overwrite=True):
"""Write all scripts.
Parameters
----------
overwrite : bool, optional
Overwrite existing files, defaults to True.
Returns
-------
self : Job
"""
if not self.scripts_ready:
self.gen_scripts()
_logme.log('Writing files, overwrite={}'.format(overwrite), 'debug')
self.submission.write(overwrite)
if self.exec_script:
self.exec_script.write(overwrite)
if self.function:
self.function.write(overwrite)
self.written = True
return self
def submit(self, wait_on_max_queue=True, additional_keywords=None,
max_jobs=None):
"""Submit this job.
To disable max_queue_len, set it to 0. None will allow override by
the default settings in the config file, and any positive integer will
be interpretted to be the maximum queue length.
Parameters
----------
wait_on_max_queue : bool, optional
Block until queue limit is below the maximum before submitting.
additional_keywords : dict, optional
Pass this dictionary to the batch system submission function,
not necessary.
max_jobs : int, optional
Override the maximum number of jobs to wait for
Returns
-------
self : Job
"""
if self.submitted:
_logme.log('Not submitting, already submitted.', 'warn')
return self
if not self.written:
self.write()
# Check dependencies
dependencies = []
if self.dependencies:
for depend in self.dependencies:
if isinstance(depend, Job):
if not depend.id:
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has not been submitted',
'error'
)
return self
dependencies.append(str(depend.id))
else:
dependencies.append(str(depend))
# Wait on the queue if necessary
if wait_on_max_queue:
if not self._updating:
self.update()
self.queue.wait_to_submit(max_jobs)
# Only include queued or running dependencies
self.queue._update() # Force update
depends = []
for depend in dependencies:
dep_check = self.queue.check_dependencies(depend)
if dep_check == 'absent':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'is not in the queue',
'error'
)
return self
elif dep_check == 'good':
_logme.log(
'Dependency {} is complete, skipping'
.format(depend), 'debug'
)
elif dep_check == 'bad':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has failed',
'error'
)
return self
elif dep_check == 'active':
if self.queue.jobs[depend].state == 'completeing':
continue
_logme.log('Dependency {} is {}, adding to deps'
.format(depend, self.queue.jobs[depend].state),
'debug')
depends.append(depend)
else:
# This shouldn't happen ever
raise _ClusterError('fyrd.queue.Queue.check_dependencies() ' +
'returned an unrecognized value {0}'
.format(dep_check))
self.id = self.batch.submit(
self.submission.file_name,
dependencies=depends,
job=self, args=self.submit_args,
kwds=additional_keywords
)
self.submitted = True
self.submit_time = _dt.now()
self.state = 'submitted'
if not self.submitted:
raise _ClusterError('Submission appears to have failed, this '
"shouldn't happen")
return self
def resubmit(self, wait_on_max_queue=True, cancel_running=None):
"""Attempt to auto resubmit, deletes prior files.
Parameters
----------
wait_on_max_queue : bool, optional
Block until queue limit is below the maximum before submitting.
cancel_running : bool or None, optional
If the job is currently running, cancel it before resubmitting.
If None (default), will ask the user.
To disable max_queue_len, set it to 0. None will allow override by
the default settings in the config file, and any positive integer will
be interpretted to be the maximum queue length.
Returns
-------
self : Job
"""
if self.running:
if cancel_running is None:
cancel_running = _run.get_yesno(
'Job currently running, cancel before resubmitting?', 'y'
)
if cancel_running:
self.kill(confirm=False)
self.scrub(confirm=False)
# Rerun
self.initialize()
self.gen_scripts()
self.write()
return self.submit(wait_on_max_queue)
def kill(self, confirm=True):
"""Kill the running job.
Parameters
----------
confirm : bool, optional
Returns
-------
self : Job
"""
if not self.submitted:
_logme.log('Job not submitted, cannot kill', 'warn')
return self
if self.done:
_logme.log('Job completed, cannot kill', 'warn')
return self
if confirm:
if not _run.get_yesno(
'This will terminate the running job, continue?', 'n'
):
return self
self.batch.kill(self.id)
return self
def clean(self, delete_outputs=None, get_outputs=True):
"""Delete all scripts created by this module, if they were written.
Parameters
----------
delete_outputs : bool, optional
also delete all output and err files, but get their contents first.
get_outputs : bool, optional
if delete_outputs, save outputs before deleting.
Returns
-------
self : Job
"""
_logme.log('Cleaning outputs, delete_outputs={}'
.format(delete_outputs), 'debug')
if not isinstance(delete_outputs, bool):
delete_outputs = self.clean_outputs
assert isinstance(delete_outputs, bool)
for jobfile in [self.submission, self.exec_script, self.function]:
if jobfile:
jobfile.clean()
if delete_outputs:
_logme.log('Deleting output files.', 'debug')
if get_outputs:
self.fetch_outputs(delete_files=True)
for f in self.outfiles:
if _os.path.isfile(f):
_logme.log('Deleteing {}'.format(f), 'debug')
_os.remove(f)
return self
def scrub(self, confirm=True):
"""Clean everything and reset to an unrun state.
Parameters
----------
confirm : bool, optional
Get user input before proceeding
Returns
-------
self : Job
"""
msg = ("This will delete all outputs stored in this job, as well "
"as all output files, job files, and scripts. Are you sure "
"you want to do this?")
if confirm:
_run.get_yesno(msg, default='n')
# Clean old set
self.clean(delete_outputs=True)
# Reset runtime attributes
self.initialized = False
self.scripts_ready = False
self.written = False
self.submitted = False
self.id = None
self.found = False
self.queue_info = None
self.state = 'Not_Submitted'
self._got_out = False
self._got_stdout = False
self._got_stderr = False
self._got_exitcode = False
self._out = None
self._stdout = None
self._stderr = None
self._exitcode = None
self._got_times = False
self._updating = False
self._found_files = False
self.start = None
self.end = None
return self.update()
######################
# Queue Management #
######################
def update(self, fetch_info=True):
"""Update status from the queue.
Parameters
----------
fetch_info : bool, optional
Fetch basic job info if complete.
Returns
-------
self : Job
"""
if not self._updating:
self._update(fetch_info)
else:
_logme.log('Already updating, aborting.', 'debug')
return self
def update_queue_info(self):
"""Set (and return) queue_info from the queue even if done."""
_logme.log('Updating queue_info', 'debug')
queue_info1 = self.queue[self.id]
self.queue.update()
queue_info2 = self.queue[self.id]
if queue_info2:
self.queue_info = queue_info2
elif queue_info1:
self.queue_info = queue_info1
elif self.queue_info is None and self.submitted:
_logme.log('Cannot find self in the queue and queue_info is empty',
'warn')
return self.queue_info
#################################
# Output Handling and Waiting #
#################################
def wait(self):
"""Block until job completes.
Returns
-------
success : bool or str
True if exitcode == 0, False if not, 'disappeared' if job lost from
queue.
"""
if not self.submitted:
if _conf.get_option('jobs', 'auto_submit'):
_logme.log('Auto-submitting as not submitted yet', 'debug')
self.submit()
else:
_logme.log('Cannot wait for result as job has not been ' +
'submitted', 'warn')
return False
self.update(fetch_info=False)
if not self.done:
_logme.log('Waiting for self {}'.format(self.name), 'debug')
status = self.queue.wait(self.id, return_disp=True)
if status == 'disappeared':
self.state = status
elif status is not True:
return False
else:
if not self._updating:
self.update()
if self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exitcode {}'
.format(self.exitcode), 'debug')
return False
if self._wait_for_files(caution_message=False):
if not self._updating:
self.update()
if self.state == 'disappeared':
_logme.log('Job files found for disappered job, assuming '
'success', 'info')
return 'disappeared'
return True
else:
if self.state == 'disappeared':
_logme.log('Disappeared job has no output files, assuming '
'failure', 'error')
return False
def get(self, save=True, cleanup=None, delete_outfiles=None,
del_no_save=None, raise_on_error=True):
"""Block until job completed and return output of script/function.
By default saves all outputs to this class and deletes all intermediate
files.
Parameters
----------
save : bool, optional
Save all outputs to the class also (advised)
cleanup : bool, optional
Clean all intermediate files after job completes.
delete_outfiles : bool, optional
Clean output files after job completes.
del_no_save : bool, optional
Delete output files even if `save` is `False`
raise_on_error : bool, optional
If the returned output is an Exception, raise it.
Returns
-------
str
Function output if Function, else STDOUT
"""
_logme.log(('Getting outputs, cleanup={}, autoclean={}, '
'delete_outfiles={}').format(
cleanup, self.clean_files, delete_outfiles
), 'debug')
# Wait for queue
status = self.wait()
if status is not True:
if status == 'disappeared':
msg = 'Job disappeared from queue'
_logme.log(msg + ', attempting to get '
'outputs', 'debug')
else:
msg = 'Wait failed'
_logme.log(msg + ', attempting to get outputs anyway',
'debug')
try:
self.fetch_outputs(save=save, delete_files=False,
get_stats=False)
except IOError:
_logme.log(msg + ' and files could not be found, job must '
'have failed', 'error')
if raise_on_error:
raise
return
if status != 'disappeared':
return
else:
# Get output
_logme.log('Wait complete, fetching outputs', 'debug')
self.fetch_outputs(save=save, delete_files=False)
out = self.out if save else self.get_output(save=save, update=False)
if isinstance(out, tuple) and issubclass(out[0], Exception):
if raise_on_error:
_reraise(*out)
else:
_logme.log('Job failed with exception {}'.format(out))
print(_tb(out[2]))
return out
# Cleanup
if cleanup is None:
cleanup = self.clean_files
else:
assert isinstance(cleanup, bool)
if delete_outfiles is None:
delete_outfiles = self.clean_outputs
if save is False:
delete_outfiles = del_no_save if del_no_save is not None else False
if cleanup:
self.clean(delete_outputs=delete_outfiles)
return out
def get_output(self, save=True, delete_file=None, update=True,
raise_on_error=True):
"""Get output of function or script.
This is the same as stdout for a script, or the function output for
a function.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Parameters
----------
save : bool, optional
Save the output to self.out, default True. Would be a good idea to
set to False if the output is huge.
delete_file : bool, optional
Delete the output file when getting
update : bool, optional
Update job info from queue first.
raise_on_error : bool, optional
If the returned output is an Exception, raise it.
Returns
-------
output : anything
The output of the script or function. Always a string if script.
"""
_logme.log(('Getting output, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if delete_file is None:
delete_file = self.clean_outputs
if self.kind == 'script':
return self.get_stdout(save=save, delete_file=delete_file,
update=update)
if self.done and self._got_out:
_logme.log('Getting output from _out', 'debug')
return self._out
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Cannot get pickled output before job completes',
'warn')
return None
_logme.log('Getting output from {}'.format(self.poutfile), 'debug')
if _os.path.isfile(self.poutfile):
with open(self.poutfile, 'rb') as fin:
out = _pickle.load(fin)
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self.poutfile),
'debug')
_os.remove(self.poutfile)
if save:
self._out = out
self._got_out = True
if _run.is_exc(out):
_logme.log('{} failed with exception {}'.format(self, out[1]),
'error')
if raise_on_error:
_reraise(*out)
return out
else:
_logme.log('No file at {} even though job has completed!'
.format(self.poutfile), 'critical')
raise IOError('File not found: {}'.format(self.poutfile))
def get_stdout(self, save=True, delete_file=None, update=True):
"""Get stdout of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Also sets self.start and self.end from the contents of STDOUT if
possible.
Returns
-------
save : bool, optional
Save the output to self.stdout, default True. Would be a good idea
to set to False if the output is huge.
delete_file : bool, optional
Delete the stdout file when getting
update : bool, optional
Update job info from queue first.
Returns
-------
str
The contents of STDOUT, with runtime info and trailing newline
removed.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stdout, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stdout:
_logme.log('Getting stdout from _stdout', 'debug')
return self._stdout
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDOUT ' +
'anyway', 'info')
_logme.log('Getting stdout from {}'.format(self._kwargs['outfile']),
'debug')
if _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
if stdout:
stdouts = stdout.strip().split('\n')
if len(stdouts) < 3 or stdouts[-3] != 'Done':
_logme.log('STDOUT incomplete, returning as is', 'info')
return stdout
if self.done:
self.get_times(update=False, stdout=stdout)
self.get_exitcode(update=False, stdout=stdout)
stdout = '\n'.join(stdouts[2:-3]) + '\n'
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self._kwargs['outfile']),
'debug')
_os.remove(self._kwargs['outfile'])
if save:
self._stdout = stdout
if self.done:
self._got_stdout = True
return stdout
else:
_logme.log('No file at {}, cannot get stdout'
.format(self._kwargs['outfile']), 'warn')
return None
def get_stderr(self, save=True, delete_file=None, update=True):
"""Get stderr of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Parameters
----------
save : bool, optional
Save the output to self.stdout, default True. Would be a good idea
to set to False if the output is huge.
delete_file : bool, optional
Delete the stdout file when getting
update : bool, optional
Update job info from queue first.
Returns
-------
str
The contents of STDERR, with trailing newline removed.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stderr, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stderr:
_logme.log('Getting stderr from _stderr', 'debug')
return self._stderr
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDERR ' +
'anyway', 'info')
_logme.log('Getting stderr from {}'.format(self._kwargs['errfile']),
'debug')
if _os.path.isfile(self._kwargs['errfile']):
with open(self._kwargs['errfile']) as fin:
stderr = fin.read()
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self._kwargs['errfile']),
'debug')
_os.remove(self._kwargs['errfile'])
if save:
self._stderr = stderr
if self.done:
self._got_stderr = True
return stderr
else:
_logme.log('No file at {}, cannot get stderr'
.format(self._kwargs['errfile']), 'warn')
return None
def get_times(self, update=True, stdout=None):
"""Get stdout of function or script, same for both.
Sets self.start and self.end from the contents of STDOUT if
possible.
Parameters
----------
update : bool, optional
Update job info from queue first.
stdout : str, optional
Pass existing stdout for use
Returns
-------
start : datetime.datetime
end : datetime.datetime
"""
_logme.log('Getting times', 'debug')
if self.done and self._got_times:
_logme.log('Getting times from self.start, self.end', 'debug')
return self.start, self.end
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Cannot get times until job is complete.', 'warn')
return None, None
_logme.log('Getting times from {}'.format(self._kwargs['outfile']),
'debug')
if not stdout:
if _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
else:
_logme.log('No file at {}, cannot get times'
.format(self._kwargs['outfile']), 'warn')
return None
stdouts = stdout.strip().split('\n')
if len(stdouts) < 3 or stdouts[-3] != 'Done':
_logme.log('STDOUT incomplete, cannot get times', 'warn')
return None
# Get times
timefmt = '%y-%m-%d-%H:%M:%S'
try:
self.start = _dt.strptime(stdouts[0], timefmt)
self.end = _dt.strptime(stdouts[-1], timefmt)
except ValueError as err:
_logme.log('Time parsing failed with value error; ' +
'{}. '.format(err) + 'This may be because you ' +
'are using the script running that does not ' +
'include time tracking', 'debug')
self._got_times = True
return self.start, self.end
def get_exitcode(self, update=True, stdout=None):
"""Try to get the exitcode.
Parameters
----------
update : bool, optional
Update job info from queue first.
stdout : str, optional
Pass existing stdout for use
Returns
-------
exitcode : int
"""
_logme.log('Getting exitcode', 'debug')
if self.done and self._got_exitcode:
_logme.log('Getting exitcode from _exitcode', 'debug')
return self._exitcode
if update and not self._updating and not self.done:
self.update()
if not self.done:
_logme.log('Job is not complete, no exit code yet', 'info')
return None
if self.state == 'disappeared':
_logme.log('Cannot get exitcode for disappeared job', 'debug')
return 0
code = None
if not stdout and _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
if stdout:
stdouts = stdout.strip().split('\n')
if len(stdouts) > 3 and stdouts[-3] == 'Done':
if stdouts[-2].startswith('Code: '):
code = int(stdouts[-2].split(':')[-1].strip())
if code is None:
_logme.log('Getting exitcode from queue', 'debug')
if not self.queue_info:
self.queue_info = self.queue[self.id]
if hasattr(self.queue_info, 'exitcode'):
code = self.queue_info.exitcode
if code is None:
_logme.log('Failed to get exitcode for job', 'warn')
return None
self._exitcode = code
self._got_exitcode = True
if code != 0:
self.state = 'failed'
_logme.log('Job {} failed with exitcode {}'
.format(self.name, code), 'error')
return code
def fetch_outputs(self, save=True, delete_files=None, get_stats=True):
"""Save all outputs in their current state. No return value.
This method does not wait for job completion, but merely gets the
outputs. To wait for job completion, use `get()` instead.
Parameters
----------
save : bool, optional
Save all outputs to the class also (advised)
delete_files : bool, optional
Delete the output files when getting, only used if save is True
get_stats : bool, optional
Try to get exitcode.
"""
_logme.log('Saving outputs to self, delete_files={}'
.format(delete_files), 'debug')
if not self._updating:
self.update()
if delete_files is None:
delete_files = self.clean_outputs
if not self._got_exitcode and get_stats:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
if save:
self.get_output(save=True, delete_file=delete_files, update=False)
self.get_stdout(save=True, delete_file=delete_files, update=False)
self.get_stderr(save=True, delete_file=delete_files, update=False)
##############################
# Minor management methods #
##############################
def get_keywords(self):
"""Return a list of the keyword arguments used to make the job."""
return self.kwds
def set_keywords(self, kwds, replace=False):
"""Set the job keywords, just updates self.kwds.
Parameters
----------
kwds : dict
Set of valid arguments.
replace : bool, optional
Overwrite the keword arguments instead of updating.
"""
kwds = _options.check_arguments(kwds)
if replace:
self.kwds = kwds
else:
for key, value in kwds.items():
self.kwds[key] = value
###############
# Internals #
###############
def _update(self, fetch_info=True):
"""Update status from the queue.
Parameters
----------
fetch_info : bool, optional
Fetch basic job info if complete.
"""
_logme.log('Updating job.', 'debug')
self._updating = True
if self.done or not self.submitted:
self._updating = False
return
self.queue.update()
if self.submitted and self.id:
queue_info = self.queue[self.id]
if queue_info:
assert self.id == queue_info.id
self.found = True
self.queue_info = queue_info
self.state = self.queue_info.state
elif self.found:
_logme.log('Job appears to have disappeared, waiting for '
'reappearance, this may take a while', 'warn')
status = self.wait()
if status == 'disappeared':
_logme.log('Job disappeared, but the output files are '
'present assuming completion', 'info')
self.state = 'completed'
self.disappeared = True
elif not status:
_logme.log('Job appears to have failed and disappeared',
'error')
# If job not found after 30 seconds, assume trouble, check for
# completion
elif self.submitted and (_dt.now()-self.submit_time).seconds > 360:
if self._wait_for_files(btme=4, caution_message=False):
self.state = 'completed'
self.disappeared = True
_logme.log('Job never appeared in the queue, but '
'outfiles still exist, assuming completion.',
'warn')
else:
self.state = 'failed'
self.disappeared = True
s = (_dt.now()-self.submit_time).seconds
_logme.log('Job not in queue after {} seconds '.format(s) +
'of searching and no outputs found, assuming '
'failure.', 'error')
elif self.submitted and (_dt.now()-self.submit_time).seconds > 30:
if self._wait_for_files(btme=1, caution_message=False):
self.state = 'completed'
self.disappeared = True
_logme.log('Job never appeared in the queue, but '
'outfiles still exist, assuming completion.',
'warn')
if self.done and fetch_info:
if self._wait_for_files(btme=1, caution_message=False):
if not self._got_exitcode:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
self._updating = False
def _wait_for_files(self, btme=None, caution_message=False):
"""Block until files appear up to 'file_block_time' in config file.
Aborts after 2 seconds if job exit code is not 0.
Parameters
----------
btme : int, optional
Number of seconds to try for before giving up, default set in
config file.
caution_message : bool, optional
Display a message if this is taking a while.
Returns
-------
bool
True if files found
"""
if self._found_files:
_logme.log('Already found files, not waiting again', 'debug')
return True
wait_time = 0.1 # seconds
if btme:
lvl = 'debug'
else:
lvl = 'warn'
btme = _conf.get_option('jobs', 'file_block_time', 30)
start = _dt.now()
dsp = False
_logme.log('Checking for output files', 'debug')
while True:
runtime = (_dt.now() - start).seconds
if caution_message and runtime > 1:
_logme.log('Job complete.', 'info')
_logme.log('Waiting for output files to appear.', 'info')
caution_message = False
if not dsp and runtime > 20:
_logme.log('Still waiting for output files to appear',
'info')
dsp = True
count = 0
outfiles = self.incomplete_outfiles
tlen = len(outfiles)
if not outfiles:
_logme.log('No incomplete outfiles, assuming all found in ' +
'{} seconds'.format(runtime), 'debug')
break
for i in outfiles:
if _os.path.isfile(i):
count += 1
if count == tlen:
_logme.log('All output files found in {} seconds'
.format(runtime), 'debug')
break
_sleep(wait_time)
if runtime > btme:
_logme.log('Job files have not appeared for ' +
'>{} seconds'.format(btme), lvl)
return False
if not self._updating:
self.update()
if runtime > 2 and self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exit code {}.'
.format(self.exitcode) + ' Cannot find files.',
'error')
return False
self._found_files = True
return True
def _update_name(self, name=None):
"""Make sure the job name is unique.
Sets
----
self.name
Parameters
----------
name : str, optional
A name override, if no provided self.name used
Returns
-------
name : str
"""
# Set name
name = name if name else self.name
if not name:
if callable(self.command):
strcmd = str(self.command).strip('<>')
parts = strcmd.split(' ')
if parts[0] == 'bound':
name = '_'.join(parts[2:3])
else:
parts.remove('function')
try:
parts.remove('built-in')
except ValueError:
pass
name = parts[0]
else:
name = self.command.split(' ')[0].split('/')[-1]
# Make sure name not in queue
if '.' not in name or not name.split('.')[-1] == self.uuid:
name = '{}.{}'.format(name, self.uuid)
self.name = name
return name
def __repr__(self):
"""Return simple job information."""
if not self.initialized:
self.initialize()
self.update()
outstr = "Job:{name}<{mode}:{qtype}".format(
name=self.name, mode=self.kind, qtype=self.qtype)
if self.submitted:
outstr += ':{}'.format(self.id)
outstr += "(command:{cmnd})".format(cmnd=self.command)
if self.submitted or self.done:
outstr += self.state.upper()
elif self.written:
outstr += "WRITTEN"
else:
outstr += "NOT_SUBMITTED"
outstr += ">"
return outstr
def __str__(self):
"""Print job name and ID + status."""
if not self._updating:
self.update()
return "Job: {name} ID: {id}, state: {state}".format(
name=self.name, id=self.id, state=self.state)
def __int__(self):
"""Return integer of ID."""
if self.id:
if str(self.id.isdigit()):
return int(id)
_logme.log('No ID yet.', 'error')
return 0
| mit | 1,620,173,737,660,842,800 | 34.249688 | 79 | 0.518399 | false | 4.58621 | false | false | false |
hagabbar/pycbc_copy | pycbc/io/hdf.py | 1 | 31952 | # convenience classes for accessing hdf5 trigger files
# the 'get_column()' method is implemented parallel to
# the existing pylal.SnglInspiralUtils functions
import h5py
import numpy as np
import logging
import inspect
from lal import LIGOTimeGPS, YRJUL_SI
from pycbc_glue.ligolw import ligolw
from pycbc_glue.ligolw import table
from pycbc_glue.ligolw import lsctables
from pycbc_glue.ligolw import ilwd
from pycbc_glue.ligolw import utils as ligolw_utils
from pycbc_glue.ligolw.utils import process as ligolw_process
from pycbc import version as pycbc_version
from pycbc.tmpltbank import return_search_summary
from pycbc.tmpltbank import return_empty_sngl
from pycbc import events, conversions, pnutils
class HFile(h5py.File):
""" Low level extensions to the capabilities of reading an hdf5 File
"""
def select(self, fcn, *args, **kwds):
""" Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
"""
# get references to each array
refs = {}
data = {}
for arg in args:
refs[arg] = self[arg]
data[arg] = []
return_indices = kwds.get('return_indices', False)
indices = np.array([], dtype=np.uint64)
# To conserve memory read the array in chunks
chunksize = kwds.get('chunksize', int(1e6))
size = len(refs[arg])
i = 0
while i < size:
r = i + chunksize if i + chunksize < size else size
#Read each chunks worth of data and find where it passes the function
partial = [refs[arg][i:r] for arg in args]
keep = fcn(*partial)
if return_indices:
indices = np.concatenate([indices, np.flatnonzero(keep) + i])
#store only the results that pass the function
for arg, part in zip(args, partial):
data[arg].append(part[keep])
i += chunksize
# Combine the partial results into full arrays
if len(args) == 1:
res = np.concatenate(data[args[0]])
if return_indices:
return indices, res
else:
return res
else:
res = tuple(np.concatenate(data[arg]) for arg in args)
if return_indices:
return (indices,) + res
else:
return res
class DictArray(object):
""" Utility for organizing sets of arrays of equal length.
Manages a dictionary of arrays of equal length. This can also
be instantiated with a set of hdf5 files and the key values. The full
data is always in memory and all operations create new instances of the
DictArray.
"""
def __init__(self, data=None, files=None, groups=None):
""" Create a DictArray
Parameters
----------
data: dict, optional
Dictionary of equal length numpy arrays
files: list of filenames, optional
List of hdf5 file filenames. Incompatibile with the `data` option.
groups: list of strings
List of keys into each file. Required by the files options.
"""
self.data = data
if files:
self.data = {}
for g in groups:
self.data[g] = []
for f in files:
d = HFile(f)
for g in groups:
if g in d:
self.data[g].append(d[g][:])
d.close()
for k in self.data:
if not len(self.data[k]) == 0:
self.data[k] = np.concatenate(self.data[k])
for k in self.data:
setattr(self, k, self.data[k])
def _return(self, data):
return self.__class__(data=data)
def __len__(self):
return len(self.data[self.data.keys()[0]])
def __add__(self, other):
data = {}
for k in self.data:
data[k] = np.concatenate([self.data[k], other.data[k]])
return self._return(data=data)
def select(self, idx):
""" Return a new DictArray containing only the indexed values
"""
data = {}
for k in self.data:
data[k] = self.data[k][idx]
return self._return(data=data)
def remove(self, idx):
""" Return a new DictArray that does not contain the indexed values
"""
data = {}
for k in self.data:
data[k] = np.delete(self.data[k], idx)
return self._return(data=data)
class StatmapData(DictArray):
def __init__(self, data=None, seg=None, attrs=None,
files=None):
groups = ['stat', 'time1', 'time2', 'trigger_id1', 'trigger_id2',
'template_id', 'decimation_factor', 'timeslide_id']
super(StatmapData, self).__init__(data=data, files=files, groups=groups)
if data:
self.seg=seg
self.attrs=attrs
elif files:
f = HFile(files[0], "r")
self.seg = f['segments']
self.attrs = f.attrs
def _return(self, data):
return self.__class__(data=data, attrs=self.attrs, seg=self.seg)
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
if len(self.time1) == 0 or len(self.time2) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat, self.time1, self.time2,
self.timeslide_id, interval, window)
return self.select(cid)
def save(self, outname):
f = HFile(outname, "w")
for k in self.attrs:
f.attrs[k] = self.attrs[k]
for k in self.data:
f.create_dataset(k, data=self.data[k],
compression='gzip',
compression_opts=9,
shuffle=True)
for key in self.seg.keys():
f['segments/%s/start' % key] = self.seg[key]['start'][:]
f['segments/%s/end' % key] = self.seg[key]['end'][:]
f.close()
class FileData(object):
def __init__(self, fname, group=None, columnlist=None, filter_func=None):
"""
Parameters
----------
group : string
Name of group to be read from the file
columnlist : list of strings
Names of columns to be read; if None, use all existing columns
filter_func : string
String should evaluate to a Boolean expression using attributes
of the class instance derived from columns: ex. 'self.snr < 6.5'
"""
if not fname: raise RuntimeError("Didn't get a file!")
self.fname = fname
self.h5file = HFile(fname, "r")
if group is None:
if len(self.h5file.keys()) == 1:
group = self.h5file.keys()[0]
else:
raise RuntimeError("Didn't get a group!")
self.group_key = group
self.group = self.h5file[group]
self.columns = columnlist if columnlist is not None \
else self.group.keys()
self.filter_func = filter_func
self._mask = None
def close(self):
self.h5file.close()
@property
def mask(self):
"""
Create a mask implementing the requested filter on the datasets
Returns
-------
array of Boolean
True for dataset indices to be returned by the get_column method
"""
if self.filter_func is None:
raise RuntimeError("Can't get a mask without a filter function!")
else:
# only evaluate if no previous calculation was done
if self._mask is None:
# get required columns into the namespace as numpy arrays
for column in self.columns:
if column in self.filter_func:
setattr(self, column, self.group[column][:])
self._mask = eval(self.filter_func)
return self._mask
def get_column(self, col):
"""
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested
"""
# catch corner case with an empty file (group with no datasets)
if not len(self.group.keys()):
return np.array([])
vals = self.group[col]
if self.filter_func:
return vals[self.mask]
else:
return vals[:]
class DataFromFiles(object):
def __init__(self, filelist, group=None, columnlist=None, filter_func=None):
self.files = filelist
self.group = group
self.columns = columnlist
self.filter_func = filter_func
def get_column(self, col):
"""
Loop over files getting the requested dataset values from each
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested and
concatenated in order of file list
"""
logging.info('getting %s' % col)
vals = []
for f in self.files:
d = FileData(f, group=self.group, columnlist=self.columns,
filter_func=self.filter_func)
vals.append(d.get_column(col))
# Close each file since h5py has an upper limit on the number of
# open file objects (approx. 1000)
d.close()
logging.info('- got %i values' % sum(len(v) for v in vals))
return np.concatenate(vals)
class SingleDetTriggers(object):
"""
Provides easy access to the parameters of single-detector CBC triggers.
"""
# FIXME: Some of these are optional and should be kwargs.
def __init__(self, trig_file, bank_file, veto_file, segment_name, filter_func, detector):
logging.info('Loading triggers')
self.trigs_f = HFile(trig_file, 'r')
self.trigs = self.trigs_f[detector]
if bank_file:
logging.info('Loading bank')
self.bank = HFile(bank_file, 'r')
else:
logging.info('No bank file given')
# empty dict in place of non-existent hdf file
self.bank = {}
if veto_file:
logging.info('Applying veto segments')
# veto_mask is an array of indices into the trigger arrays
# giving the surviving triggers
logging.info('%i triggers before vetoes',
len(self.trigs['end_time'][:]))
self.veto_mask, _ = events.veto.indices_outside_segments(
self.trigs['end_time'][:], [veto_file],
ifo=detector, segment_name=segment_name)
logging.info('%i triggers remain after vetoes',
len(self.veto_mask))
else:
self.veto_mask = np.arange(len(self.trigs['end_time']))
if filter_func:
# get required columns into the namespace with dummy attribute
# names to avoid confusion with other class properties
for c in self.trigs.keys():
if c in filter_func:
setattr(self, '_'+c, self.trigs[c][:])
for c in self.bank.keys():
if c in filter_func:
# get template parameters corresponding to triggers
setattr(self, '_'+c,
np.array(self.bank[c])[self.trigs['template_id'][:]])
self.filter_mask = eval(filter_func.replace('self.', 'self._'))
# remove the dummy attributes
for c in self.trigs.keys() + self.bank.keys():
if c in filter_func: delattr(self, '_'+c)
self.boolean_veto = np.in1d(np.arange(len(self.trigs['end_time'])),
self.veto_mask, assume_unique=True)
self.mask = np.logical_and(self.boolean_veto, self.filter_mask)
logging.info('%i triggers remain after cut on %s',
len(self.trigs['end_time'][self.mask]), filter_func)
else:
self.mask = self.veto_mask
def checkbank(self, param):
if self.bank == {}:
return RuntimeError("Can't get %s values without a bank file"
% param)
@classmethod
def get_param_names(cls):
"""Returns a list of plottable CBC parameter variables"""
return [m[0] for m in inspect.getmembers(cls) \
if type(m[1]) == property]
def mask_to_n_loudest_clustered_events(self, n_loudest=10,
ranking_statistic="newsnr",
cluster_window=10):
"""Edits the mask property of the class to point to the N loudest
single detector events as ranked by ranking statistic. Events are
clustered so that no more than 1 event within +/- cluster-window will
be considered."""
# If this becomes memory intensive we can optimize
if ranking_statistic == "newsnr":
stat = self.newsnr
# newsnr doesn't return an array if len(stat) == 1
if len(self.snr) == 1:
stat = np.array([stat])
self.stat_name = "Reweighted SNR"
elif ranking_statistic == "newsnr_sgveto":
stat = self.newsnr_sgveto
# newsnr doesn't return an array if len(stat) == 1
if len(self.snr) == 1:
stat = np.array([stat])
self.stat_name = "Reweighted SNR (+sgveto)"
elif ranking_statistic == "snr":
stat = self.snr
self.stat_name = "SNR"
else:
err_msg = "Don't recognize statistic %s." % (ranking_statistic)
raise ValueError(err_msg)
times = self.end_time
index = stat.argsort()[::-1]
new_times = []
new_index = []
for curr_idx in index:
curr_time = times[curr_idx]
for time in new_times:
if abs(curr_time - time) < cluster_window:
break
else:
# Only get here if no other triggers within cluster window
new_index.append(curr_idx)
new_times.append(curr_time)
if len(new_index) >= n_loudest:
break
index = np.array(new_index)
self.stat = stat[index]
if self.mask.dtype == 'bool':
orig_indices = self.mask.nonzero()[0][index]
self.mask = np.in1d(np.arange(len(self.mask)), orig_indices,
assume_unique=True)
else:
self.mask = self.mask[index]
@property
def template_id(self):
return np.array(self.trigs['template_id'])[self.mask]
@property
def mass1(self):
self.checkbank('mass1')
return np.array(self.bank['mass1'])[self.template_id]
@property
def mass2(self):
self.checkbank('mass2')
return np.array(self.bank['mass2'])[self.template_id]
@property
def spin1z(self):
self.checkbank('spin1z')
return np.array(self.bank['spin1z'])[self.template_id]
@property
def spin2z(self):
self.checkbank('spin2z')
return np.array(self.bank['spin2z'])[self.template_id]
@property
def spin2x(self):
self.checkbank('spin2x')
return np.array(self.bank['spin2x'])[self.template_id]
@property
def spin2y(self):
self.checkbank('spin2y')
return np.array(self.bank['spin2y'])[self.template_id]
@property
def spin1x(self):
self.checkbank('spin1x')
return np.array(self.bank['spin1x'])[self.template_id]
@property
def spin1y(self):
self.checkbank('spin1y')
return np.array(self.bank['spin1y'])[self.template_id]
@property
def inclination(self):
self.checkbank('inclination')
return np.array(self.bank['inclination'])[self.template_id]
@property
def f_lower(self):
self.checkbank('f_lower')
return np.array(self.bank['f_lower'])[self.template_id]
@property
def mtotal(self):
return self.mass1 + self.mass2
@property
def mchirp(self):
return conversions.mchirp_from_mass1_mass2(self.mass1, self.mass2)
@property
def eta(self):
return conversions.eta_from_mass1_mass2(self.mass1, self.mass2)
@property
def effective_spin(self):
# FIXME assumes aligned spins
return conversions.chi_eff(self.mass1, self.mass2,
self.spin1z, self.spin2z)
# IMPROVEME: would like to have a way to access all get_freq and/or
# other pnutils.* names rather than hard-coding each one
# - eg make this part of a fancy interface to the bank file ?
@property
def f_seobnrv2_peak(self):
return pnutils.get_freq('fSEOBNRv2Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def f_seobnrv4_peak(self):
return pnutils.get_freq('fSEOBNRv4Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def end_time(self):
return np.array(self.trigs['end_time'])[self.mask]
@property
def template_duration(self):
return np.array(self.trigs['template_duration'])[self.mask]
@property
def snr(self):
return np.array(self.trigs['snr'])[self.mask]
@property
def sgchisq(self):
return np.array(self.trigs['sg_chisq'])[self.mask]
@property
def u_vals(self):
return np.array(self.trigs['u_vals'])[self.mask]
@property
def rchisq(self):
return np.array(self.trigs['chisq'])[self.mask] \
/ (np.array(self.trigs['chisq_dof'])[self.mask] * 2 - 2)
@property
def newsnr(self):
return events.newsnr(self.snr, self.rchisq)
@property
def newsnr_sgveto(self):
return events.newsnr_sgveto(self.snr, self.rchisq, self.sgchisq)
def get_column(self, cname):
if hasattr(self, cname):
return getattr(self, cname)
else:
return np.array(self.trigs[cname])[self.mask]
class ForegroundTriggers(object):
# FIXME: A lot of this is hardcoded to expect two ifos
def __init__(self, coinc_file, bank_file, sngl_files=None, n_loudest=None,
group='foreground'):
self.coinc_file = FileData(coinc_file, group=group)
self.sngl_files = {}
if sngl_files is not None:
for file in sngl_files:
curr_dat = FileData(file)
curr_ifo = curr_dat.group_key
self.sngl_files[curr_ifo] = curr_dat
self.bank_file = HFile(bank_file, "r")
self.n_loudest = n_loudest
self._sort_arr = None
self._template_id = None
self._trig_ids = None
@property
def sort_arr(self):
if self._sort_arr is None:
ifar = self.coinc_file.get_column('ifar')
sorting = ifar.argsort()[::-1]
if self.n_loudest:
sorting = sorting[:self.n_loudest]
self._sort_arr = sorting
return self._sort_arr
@property
def template_id(self):
if self._template_id is None:
template_id = self.get_coincfile_array('template_id')
self._template_id = template_id
return self._template_id
@property
def trig_id(self):
if self._trig_ids is not None:
return self._trig_ids
self._trig_ids = {}
# FIXME: There is no clear mapping from trig_id to ifo. This is bad!!!
# for now a hack is in place.
ifo1 = self.coinc_file.h5file.attrs['detector_1']
ifo2 = self.coinc_file.h5file.attrs['detector_2']
trigid1 = self.get_coincfile_array('trigger_id1')
trigid2 = self.get_coincfile_array('trigger_id2')
self._trig_ids[ifo1] = trigid1
self._trig_ids[ifo2] = trigid2
return self._trig_ids
def get_coincfile_array(self, variable):
return self.coinc_file.get_column(variable)[self.sort_arr]
def get_bankfile_array(self, variable):
try:
return self.bank_file[variable][:][self.template_id]
except IndexError:
if len(self.template_id) == 0:
return np.array([])
raise
def get_snglfile_array_dict(self, variable):
return_dict = {}
for ifo in self.sngl_files.keys():
try:
curr = self.sngl_files[ifo].get_column(variable)[\
self.trig_id[ifo]]
except IndexError:
if len(self.trig_id[ifo]) == 0:
curr = np.array([])
else:
raise
return_dict[ifo] = curr
return return_dict
def to_coinc_xml_object(self, file_name):
# FIXME: This function will only work with two ifos!!
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
ifos = [ifo for ifo in self.sngl_files.keys()]
proc_id = ligolw_process.register_to_xmldoc(outdoc, 'pycbc',
{}, ifos=ifos, comment='', version=pycbc_version.git_hash,
cvs_repository='pycbc/'+pycbc_version.git_branch,
cvs_entry_time=pycbc_version.date).process_id
search_summ_table = lsctables.New(lsctables.SearchSummaryTable)
coinc_h5file = self.coinc_file.h5file
start_time = coinc_h5file['segments']['coinc']['start'][:].min()
end_time = coinc_h5file['segments']['coinc']['end'][:].max()
num_trigs = len(self.sort_arr)
search_summary = return_search_summary(start_time, end_time,
num_trigs, ifos)
search_summ_table.append(search_summary)
outdoc.childNodes[0].appendChild(search_summ_table)
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
coinc_def_table = lsctables.New(lsctables.CoincDefTable)
coinc_event_table = lsctables.New(lsctables.CoincTable)
coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
time_slide_table = lsctables.New(lsctables.TimeSlideTable)
# Set up time_slide table
time_slide_id = lsctables.TimeSlideID(0)
for ifo in ifos:
time_slide_row = lsctables.TimeSlide()
time_slide_row.instrument = ifo
time_slide_row.time_slide_id = time_slide_id
time_slide_row.offset = 0
time_slide_row.process_id = proc_id
time_slide_table.append(time_slide_row)
# Set up coinc_definer table
coinc_def_id = lsctables.CoincDefID(0)
coinc_def_row = lsctables.CoincDef()
coinc_def_row.search = "inspiral"
coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincidences"
coinc_def_row.coinc_def_id = coinc_def_id
coinc_def_row.search_coinc_type = 0
coinc_def_table.append(coinc_def_row)
bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z']
bank_col_vals = {}
for name in bank_col_names:
bank_col_vals[name] = self.get_bankfile_array(name)
coinc_event_names = ['ifar', 'time1', 'fap', 'stat']
coinc_event_vals = {}
for name in coinc_event_names:
coinc_event_vals[name] = self.get_coincfile_array(name)
sngl_col_names = ['snr', 'chisq', 'chisq_dof', 'bank_chisq',
'bank_chisq_dof', 'cont_chisq', 'cont_chisq_dof',
'end_time', 'template_duration', 'coa_phase',
'sigmasq']
sngl_col_vals = {}
for name in sngl_col_names:
sngl_col_vals[name] = self.get_snglfile_array_dict(name)
for idx in xrange(len(self.sort_arr)):
# Set up IDs and mapping values
coinc_id = lsctables.CoincID(idx)
# Set up sngls
# FIXME: As two-ifo is hardcoded loop over all ifos
sngl_combined_mchirp = 0
sngl_combined_mtot = 0
for ifo in ifos:
sngl_id = self.trig_id[ifo][idx]
event_id = lsctables.SnglInspiralID(sngl_id)
sngl = return_empty_sngl()
sngl.event_id = event_id
sngl.ifo = ifo
for name in sngl_col_names:
val = sngl_col_vals[name][ifo][idx]
if name == 'end_time':
sngl.set_end(LIGOTimeGPS(val))
else:
setattr(sngl, name, val)
for name in bank_col_names:
val = bank_col_vals[name][idx]
setattr(sngl, name, val)
sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
sngl.mass1, sngl.mass2)
sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
sngl.mass1, sngl.mass2)
sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
sngl_combined_mchirp += sngl.mchirp
sngl_combined_mtot += sngl.mtotal
sngl_inspiral_table.append(sngl)
# Set up coinc_map entry
coinc_map_row = lsctables.CoincMap()
coinc_map_row.table_name = 'sngl_inspiral'
coinc_map_row.coinc_event_id = coinc_id
coinc_map_row.event_id = event_id
coinc_event_map_table.append(coinc_map_row)
sngl_combined_mchirp = sngl_combined_mchirp / len(ifos)
sngl_combined_mtot = sngl_combined_mtot / len(ifos)
# Set up coinc inspiral and coinc event tables
coinc_event_row = lsctables.Coinc()
coinc_inspiral_row = lsctables.CoincInspiral()
coinc_event_row.coinc_def_id = coinc_def_id
coinc_event_row.nevents = len(ifos)
coinc_event_row.instruments = ','.join(ifos)
coinc_inspiral_row.set_ifos(ifos)
coinc_event_row.time_slide_id = time_slide_id
coinc_event_row.process_id = proc_id
coinc_event_row.coinc_event_id = coinc_id
coinc_inspiral_row.coinc_event_id = coinc_id
coinc_inspiral_row.mchirp = sngl_combined_mchirp
coinc_inspiral_row.mass = sngl_combined_mtot
coinc_inspiral_row.set_end(\
LIGOTimeGPS(coinc_event_vals['time1'][idx]))
coinc_inspiral_row.snr = coinc_event_vals['stat'][idx]
coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx]
coinc_inspiral_row.combined_far = 1./coinc_event_vals['ifar'][idx]
# Transform to Hz
coinc_inspiral_row.combined_far = \
coinc_inspiral_row.combined_far / YRJUL_SI
coinc_event_row.likelihood = 0.
coinc_inspiral_row.minimum_duration = 0.
coinc_event_table.append(coinc_event_row)
coinc_inspiral_table.append(coinc_inspiral_row)
outdoc.childNodes[0].appendChild(coinc_def_table)
outdoc.childNodes[0].appendChild(coinc_event_table)
outdoc.childNodes[0].appendChild(coinc_event_map_table)
outdoc.childNodes[0].appendChild(time_slide_table)
outdoc.childNodes[0].appendChild(coinc_inspiral_table)
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
ligolw_utils.write_filename(outdoc, file_name)
chisq_choices = ['traditional', 'cont', 'bank', 'max_cont_trad', 'sg',
'max_bank_cont', 'max_bank_trad', 'max_bank_cont_trad']
def get_chisq_from_file_choice(hdfile, chisq_choice):
f = hdfile
if chisq_choice in ['traditional','max_cont_trad', 'max_bank_trad',
'max_bank_cont_trad']:
trad_chisq = f['chisq'][:]
# We now need to handle the case where chisq is not actually calculated
# 0 is used as a sentinel value
trad_chisq_dof = f['chisq_dof'][:]
trad_chisq /= (trad_chisq_dof * 2 - 2)
if chisq_choice in ['cont', 'max_cont_trad', 'max_bank_cont',
'max_bank_cont_trad']:
cont_chisq = f['cont_chisq'][:]
cont_chisq_dof = f['cont_chisq_dof'][:]
cont_chisq /= cont_chisq_dof
if chisq_choice in ['bank', 'max_bank_cont', 'max_bank_trad',
'max_bank_cont_trad']:
bank_chisq = f['bank_chisq'][:]
bank_chisq_dof = f['bank_chisq_dof'][:]
bank_chisq /= bank_chisq_dof
if chisq_choice == 'sg':
chisq = f['sg_chisq'][:]
elif chisq_choice == 'traditional':
chisq = trad_chisq
elif chisq_choice == 'cont':
chisq = cont_chisq
elif chisq_choice == 'bank':
chisq = bank_chisq
elif chisq_choice == 'max_cont_trad':
chisq = np.maximum(trad_chisq, cont_chisq)
elif chisq_choice == 'max_bank_cont':
chisq = np.maximum(bank_chisq, cont_chisq)
elif chisq_choice == 'max_bank_trad':
chisq = np.maximum(bank_chisq, trad_chisq)
elif chisq_choice == 'max_bank_cont_trad':
chisq = np.maximum(np.maximum(bank_chisq, cont_chisq), trad_chisq)
else:
err_msg="Do not recognized --chisq-choice %s" % chisq_choice
raise ValueError(err_msg)
return chisq
def save_dict_to_hdf5(dic, filename):
"""
Parameters
----------
dic:
python dictionary to be converted to hdf5 format
filename:
desired name of hdf5 file
"""
with h5py.File(filename, 'w') as h5file:
recursively_save_dict_contents_to_group(h5file, '/', dic)
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
"""
for key, item in dic.items():
if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)):
h5file[path + str(key)] = item
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
else:
raise ValueError('Cannot save %s type'%type(item))
| gpl-3.0 | -1,520,289,452,508,774,400 | 35.981481 | 93 | 0.559339 | false | 3.6338 | false | false | false |
zbyte64/django-dockitcms | dockitcms/widgetblock/fields.py | 1 | 1521 | from django import forms
from dockit import schema
from dockitcms.fields import BaseFieldEntry, ListFieldMixin
from dockitcms.widgetblock.models import Widget
class WidgetField(BaseFieldEntry):
field_class = schema.SchemaField
def get_field_kwargs(self):
kwargs = dict(super(WidgetField, self).get_field_kwargs())
kwargs['schema'] = Widget
return kwargs
class Meta:
typed_key = 'WidgetField'
class ListWidgetField(ListFieldMixin, WidgetField):
def get_list_field_kwargs(self):
subfield = WidgetField.create_field(self)
return {'subfield': subfield}
class Meta:
typed_key = 'ListWidgetField'
class VisibleSchemaTypeField(schema.SchemaTypeField):
form_field_class = forms.ChoiceField
form_widget_class = forms.Select
def formfield_kwargs(self, **kwargs):
kwargs = super(VisibleSchemaTypeField, self).formfield_kwargs(**kwargs)
kwargs['choices'] = self.get_choices()
return kwargs
class TypedWidgetField(BaseFieldEntry):
widget_type = VisibleSchemaTypeField(schemas=Widget._meta.fields['widget_type'].schemas)
field_class = schema.SchemaField
def get_field_kwargs(self):
kwargs = dict(super(TypedWidgetField, self).get_field_kwargs())
kwargs.pop('widget_type', None)
kwargs['schema'] = Widget._meta.fields['widget_type'].schemas.get(self.widget_type, Widget)
return kwargs
class Meta:
typed_key = 'TypedWidgetField'
| bsd-3-clause | 1,446,314,102,255,688,400 | 28.823529 | 99 | 0.687048 | false | 3.981675 | false | false | false |
BackupTheBerlios/espressopp | examples/hadress/hadressFEC/hadressDensityFEC.py | 1 | 8414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# relevant imports
import sys
import time
import espresso
import mpi4py.MPI as MPI
import Tetracryst # Preparation of tetrahedral crystal and constuctions of bonds in tetrahedral liquid
from espresso import Real3D, Int3D
from espresso.tools import decomp
from espresso.tools import timers
# integration steps, cutoff, skin, AdResS specifications
steps = 1000
timestep = 0.0005
intervals = 100
rc = 4.5 # cutoff coarse-grained potential
rca = 1.122462048309373 # cutoff atomistic potential (cutoff (2^(1/6)), WCA)
skin = 0.4
# Parameters for the thermostat
#gamma = 2.0
#temp = 1.0
# Parameters for size of AdResS dimensions
ex_size = 5.0
hy_size = 5.0
# read equilibrated configuration file
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = espresso.tools.readxyz("equilibrated_conf.xyz")
# Table for coarse-grained potential
tabCG = "table_potential.dat"
# FEC compensation table
tabFEC = "table_FEC_Gibbs.dat"
# number of CG particles
num_particlesCG = len(x)/4
# number of AT particles
num_particles = len(x)
# set up the system
sys.stdout.write('Setting up simulation ...\n')
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espresso.System()
system.rng = espresso.esutil.RNG()
system.bc = espresso.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
# (H-)AdResS domain decomposition
system.storage = espresso.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
# prepare AT particles
allParticlesAT = []
allParticles = []
tuples = []
for pidAT in range(num_particles):
allParticlesAT.append([pidAT, # add here these particles just temporarily
Real3D(x[pidAT], y[pidAT], z[pidAT]), # position
Real3D(vx[pidAT], vy[pidAT], vz[pidAT]), # velocity
Real3D(0, 0, 0), # force
1, 1.0, 1]) # type, mass, is AT particle
# create CG particles
for pidCG in range(num_particlesCG):
# we put CG molecule in first atom, later CG molecules will be positioned in the center
cmp = espresso.tools.AdressSetCG(4, pidCG, allParticlesAT)
# Preparation of tuples (tuples define, which atoms belong to which CG molecules)
tmptuple = [pidCG+num_particles]
for pidAT2 in range(4):
pid = pidCG*4+pidAT2
tmptuple.append(pid)
# append CG particles
allParticles.append([pidCG+num_particles, # CG particle has to be added first!
Real3D(cmp[0], cmp[1], cmp[2]), # pos
Real3D(0, 0, 0), # vel
Real3D(0, 0, 0), # force
0, 4.0, 0]) # type, mass, is not AT particle
# append AT particles
for pidAT in range(4):
pid = pidCG*4+pidAT
allParticles.append([pid, # now the AT particles can be added
(allParticlesAT[pid])[1], # pos
(allParticlesAT[pid])[2], # vel
(allParticlesAT[pid])[3], # force
(allParticlesAT[pid])[4], # type
(allParticlesAT[pid])[5], # mass
(allParticlesAT[pid])[6]]) # is AT particle
# append tuple to tuplelist
tuples.append(tmptuple)
# add particles to system
system.storage.addParticles(allParticles, "id", "pos", "v", "f", "type", "mass", "adrat")
# create FixedTupleList object
ftpl = espresso.FixedTupleListAdress(system.storage)
# and add the tuples
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
# add bonds between AT particles
fpl = espresso.FixedPairListAdress(system.storage, ftpl)
bonds = Tetracryst.makebonds(len(x))
fpl.addBonds(bonds)
# decompose after adding tuples and bonds
print "Added tuples and bonds, decomposing now ..."
system.storage.decompose()
print "done decomposing"
# AdResS Verlet list
vl = espresso.VerletListAdress(system, cutoff=rc, adrcut=rc,
dEx=ex_size, dHy=hy_size,
adrCenter=[Lx/2, Ly/2, Lz/2])
# non-bonded potentials
# LJ Capped WCA between AT and tabulated potential between CG particles
interNB = espresso.interaction.VerletListHadressLennardJones(vl, ftpl) # Here we need specific (H-)AdResS interaction type
potWCA = espresso.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=rca)
potCG = espresso.interaction.Tabulated(itype=3, filename=tabCG, cutoff=rc) # CG
interNB.setPotentialAT(type1=1, type2=1, potential=potWCA) # AT
interNB.setPotentialCG(type1=0, type2=0, potential=potCG) # CG
system.addInteraction(interNB)
# bonded potentials
# Quartic potential between AT particles
potQuartic = espresso.interaction.Quartic(K=75.0, r0=1.0)
interQuartic = espresso.interaction.FixedPairListQuartic(system, fpl, potQuartic)
system.addInteraction(interQuartic)
# VelocityVerlet integrator
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = timestep
# add AdResS extension
adress = espresso.integrator.Adress(system, vl, ftpl)
integrator.addExtension(adress)
# add Langevin thermostat extension
#langevin = espresso.integrator.LangevinThermostat(system)
#langevin.gamma = gamma
#langevin.temperature = temp
#langevin.adress = True # enable AdResS!
#integrator.addExtension(langevin)
# add TDF (dummy, just testing)
fec = espresso.integrator.FreeEnergyCompensation(system, center=[Lx/2, Ly/2, Lz/2])
fec.addForce(itype=3, filename=tabFEC, type=0)
integrator.addExtension(fec)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
espresso.tools.AdressDecomp(system, integrator)
# system information
print ''
print 'AdResS Center =', [Lx/2, Ly/2, Lz/2]
print 'number of AT particles =', num_particles
print 'number of CG particles =', num_particlesCG
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
temperature = espresso.analysis.Temperature(system)
fmt = '%5d %8.4f %12.3f %12.3f %12.3f %12.3f %12.3f\n'
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
sys.stdout.write(' step Temp etotal enonbonded ebonded ekinetic ecorrection\n')
sys.stdout.write(fmt % (0, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# Density profile preparation
density_array_total = []
Adds = 0.0
densityprofilegrid = 100
# Timer, Steps
start_time = time.clock()
nsteps = steps / intervals
# integration and on the fly analysis
for s in range(1, intervals + 1):
integrator.run(nsteps)
step = nsteps * s
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
# calculate density profile
if s > 10:
densityprofile = espresso.analysis.XDensity(system)
density_array = densityprofile.compute(densityprofilegrid)
for i in range(len(density_array)):
if(i>=len(density_array_total)):
density_array_total.append(density_array[i])
else:
density_array_total[i] += density_array[i]
Adds += 1.0
sys.stdout.write(fmt % (step, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# correct the density profile according to number of samples
for i in range(len(density_array_total)):
density_array_total[i] /= Adds
# printing density profile
nameFile = 'density_profile_Gibbs.dat'
print ''
print "Printing the density profile to %s\n" %nameFile
tempFile = open (nameFile, 'w')
fmt = ' %12.8f %12.8f\n'
dr = Lx / float(densityprofilegrid)
for i in range( len(density_array_total) ):
tempFile.write(fmt % ( (i+0.5)*dr, density_array_total[i] ))
tempFile.close()
# simulation information
end_time = time.clock()
timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
| gpl-3.0 | 402,461,459,966,892,200 | 32.388889 | 122 | 0.686475 | false | 3.019017 | false | false | false |
Rudd-O/cloud-tool | cloudapis/cloud.py | 1 | 2976 | '''Implements the Cloud.com API'''
from cloudtool.utils import describe
import urllib
import urllib2
import os
import xml.dom.minidom
class CloudAPI:
@describe("server", "Management Server host name or address")
@describe("responseformat", "Response format: xml or json")
def __init__(self,
server="127.0.0.1:8096",
responseformat="xml",
):
self.__dict__.update(locals())
def _make_request(self,command,parameters=None):
'''Command is a string, parameters is a dictionary'''
if ":" in self.server:
host,port = self.server.split(":")
port = int(port)
else:
host = self.server
port = 8096
url = "http://" + self.server + "/?"
if not parameters: parameters = {}
parameters["command"] = command
parameters["response"] = self.responseformat
querystring = urllib.urlencode(parameters)
url += querystring
f = urllib2.urlopen(url)
data = f.read()
return data
def load_dynamic_methods():
'''creates smart function objects for every method in the commands.xml file'''
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE: rc.append(node.data)
return ''.join(rc)
# FIXME figure out installation and packaging
xmlfile = os.path.join(os.path.dirname(__file__),"commands.xml")
dom = xml.dom.minidom.parse(xmlfile)
for cmd in dom.getElementsByTagName("command"):
name = getText(cmd.getElementsByTagName('name')[0].childNodes).strip()
assert name
description = cmd.getElementsByTagName('name')[0].getAttribute("description")
if description: description = '"""%s"""' % description
else: description = ''
arguments = []
options = []
descriptions = []
for param in cmd.getElementsByTagName('arg'):
argname = getText(param.childNodes).strip()
assert argname
required = param.getAttribute("required").strip()
if required == 'true': required = True
elif required == 'false': required = False
else: raise AssertionError, "Not reached"
if required: arguments.append(argname)
options.append(argname)
description = param.getAttribute("description").strip()
if description: descriptions.append( (argname,description) )
funcparams = ["self"] + [ "%s=None"%o for o in options ]
funcparams = ", ".join(funcparams)
code = """
def %s(%s):
%s
parms = locals()
del parms["self"]
for arg in %r:
if locals()[arg] is None:
raise TypeError, "%%s is a required option"%%arg
for k,v in parms.items():
if v is None: del parms[k]
output = self._make_request("%s",parms)
print output
"""%(name,funcparams,description,arguments,name)
namespace = {}
exec code.strip() in namespace
func = namespace[name]
for argname,description in descriptions:
func = describe(argname,description)(func)
yield (name,func)
for name,meth in load_dynamic_methods(): setattr(CloudAPI,name,meth)
implementor = CloudAPI
del name,meth,describe,load_dynamic_methods
| gpl-3.0 | -2,088,494,372,221,547,500 | 25.336283 | 79 | 0.673387 | false | 3.472579 | false | false | false |
99cloud/keystone_register | openstack_dashboard/api/nova.py | 1 | 18812 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Openstack, LLC
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import ugettext as _
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import security_group_rules as nova_rules
from novaclient.v1_1.security_groups import SecurityGroup as NovaSecurityGroup
from novaclient.v1_1.servers import REBOOT_HARD, REBOOT_SOFT
from horizon.conf import HORIZON_CONFIG
from horizon.utils.memoized import memoized
from openstack_dashboard.api.base import (APIResourceWrapper, QuotaSet,
APIDictWrapper, url_for)
from openstack_dashboard.api import network
LOG = logging.getLogger(__name__)
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
class VNCConsole(APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class Server(APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name',
'tenant_id', 'user_id', 'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state', 'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:host']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
@property
def image_name(self):
import glanceclient.exc as glance_exceptions
from openstack_dashboard.api import glance
try:
image = glance.image_get(self.request, self.image['id'])
return image.name
except glance_exceptions.ClientException:
return "(not found)"
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
def reboot(self, hardness=REBOOT_HARD):
novaclient(self.request).servers.reboot(self.id, hardness)
class NovaUsage(APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': getattr(self, "total_vcpus_usage", 0),
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_hours", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
class SecurityGroup(APIResourceWrapper):
"""Wrapper around novaclient.security_groups.SecurityGroup which wraps its
rules in SecurityGroupRule objects and allows access to them.
"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@property
def rules(self):
"""Wraps transmitted rule info in the novaclient rule class."""
if "_rules" not in self.__dict__:
manager = nova_rules.SecurityGroupRuleManager
self._rules = [nova_rules.SecurityGroupRule(manager, rule)
for rule in self._apiresource.rules]
return self.__dict__['_rules']
@rules.setter
def rules(self, value):
self._rules = value
class SecurityGroupRule(APIResourceWrapper):
""" Wrapper for individual rules in a SecurityGroup. """
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range', 'group']
def __unicode__(self):
if 'name' in self.group:
vals = {'from': self.from_port,
'to': self.to_port,
'group': self.group['name']}
return _('ALLOW %(from)s:%(to)s from %(group)s') % vals
else:
vals = {'from': self.from_port,
'to': self.to_port,
'cidr': self.ip_range['cidr']}
return _('ALLOW %(from)s:%(to)s from %(cidr)s') % vals
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
class FloatingIp(APIResourceWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id', 'pool']
def __init__(self, fip):
fip.__setattr__('port_id', fip.instance_id)
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(APIDictWrapper):
def __init__(self, pool):
pool_dict = {'id': pool.name,
'name': pool.name}
super(FloatingIpPool, self).__init__(pool_dict)
class FloatingIpTarget(APIDictWrapper):
def __init__(self, server):
server_dict = {'name': '%s (%s)' % (server.name, server.id),
'id': server.id}
super(FloatingIpTarget, self).__init__(server_dict)
class FloatingIpManager(network.FloatingIpManager):
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list_pools(self):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
def list(self):
return [FloatingIp(fip)
for fip in self.client.floating_ips.list()]
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
def allocate(self, pool):
return FloatingIp(self.client.floating_ips.create(pool=pool))
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
def disassociate(self, floating_ip_id, port_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
def get_target_id_by_instance(self, instance_id):
return instance_id
def is_simple_associate_supported(self):
return HORIZON_CONFIG["simple_ip_management"]
def novaclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('novaclient connection created using token "%s" and url "%s"' %
(request.user.token.id, url_for(request, 'compute')))
c = nova_client.Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=url_for(request, 'compute'),
insecure=insecure,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = url_for(request, 'compute')
return c
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(instance_id,
console_type)['console'])
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
def flavor_create(request, name, memory, vcpu, disk, ephemeral=0, swap=0,
metadata=None):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
ephemeral=ephemeral,
swap=swap)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id):
return novaclient(request).flavors.get(flavor_id)
@memoized
def flavor_list(request):
"""Get the list of available instance sizes (flavors)."""
return novaclient(request).flavors.list()
def flavor_get_extras(request, flavor_id, raw=False):
"""Get flavor extra specs."""
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return novaclient(request).keypairs.list()
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping, nics=None,
instance_count=1):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
nics=nics,
min_count=instance_count), request)
def server_delete(request, instance):
novaclient(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_list(request, search_opts=None, all_tenants=False):
if search_opts is None:
search_opts = {}
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
return [Server(s, request)
for s in novaclient(request).servers.list(True, search_opts)]
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_security_groups(request, instance_id):
"""Gets security groups of an instance."""
# TODO(gabriel): This needs to be moved up to novaclient, and should
# be removed once novaclient supports this call.
security_groups = []
nclient = novaclient(request)
resp, body = nclient.client.get('/servers/%s/os-security-groups'
% instance_id)
if body:
# Wrap data in SG objects as novaclient would.
sg_objs = [NovaSecurityGroup(nclient.security_groups, sg, loaded=True)
for sg in body.get('security_groups', [])]
# Then wrap novaclient's object with our own. Yes, sadly wrapping
# with two layers of objects is necessary.
security_groups = [SecurityGroup(sg) for sg in sg_objs]
# Package up the rules, as well.
for sg in security_groups:
rule_objects = [SecurityGroupRule(rule) for rule in sg.rules]
sg.rules = rule_objects
return security_groups
def server_add_security_group(request, instance_id, security_group_name):
return novaclient(request).servers.add_security_group(instance_id,
security_group_name)
def server_remove_security_group(request, instance_id, security_group_name):
return novaclient(request).servers.remove_security_group(
instance_id,
security_group_name)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_reboot(request, instance_id, hardness=REBOOT_HARD):
server = server_get(request, instance_id)
server.reboot(hardness)
def server_update(request, instance_id, name):
response = novaclient(request).servers.update(instance_id, name=name)
# TODO(gabriel): servers.update method doesn't return anything. :-(
if response is None:
return True
else:
return response
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
def tenant_quota_get(request, tenant_id):
return QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
novaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
def security_group_list(request):
return [SecurityGroup(g) for g
in novaclient(request).security_groups.list()]
def security_group_get(request, sg_id):
return SecurityGroup(novaclient(request).security_groups.get(sg_id))
def security_group_create(request, name, desc):
return SecurityGroup(novaclient(request).security_groups.create(name,
desc))
def security_group_delete(request, security_group_id):
novaclient(request).security_groups.delete(security_group_id)
def security_group_rule_create(request, parent_group_id, ip_protocol=None,
from_port=None, to_port=None, cidr=None,
group_id=None):
sg = novaclient(request).security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id)
return SecurityGroupRule(sg)
def security_group_rule_delete(request, security_group_rule_id):
novaclient(request).security_group_rules.delete(security_group_rule_id)
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def get_x509_credentials(request):
return novaclient(request).certs.create()
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api.cinder import cinderclient
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinderclient(request).volumes.get(volume.id)
volume.name = volume_data.display_name
return volumes
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
| apache-2.0 | 3,814,561,238,036,676,000 | 32.895495 | 79 | 0.633638 | false | 3.832926 | false | false | false |
hychen/boliau | boliau/plugins/lp_cli/actionlib.py | 1 | 6379 | #!/usr/bin/env python
# -*- coding: utf-8 -*
#
# File: lp_cli.py
#
# Copyright (C) 2012 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import logging
from boliau import actionlib
from launchpadlib.launchpad import Launchpad
# -----------------------------------------------------------------------
# Global Variables
# -----------------------------------------------------------------------
LP_VALIDATE_BUGTASK_STATUS={
'In Progress': 100,
'Triaged': 90,
'Confirmed': 80,
'New': 70,
'Incomplete (with response)': 60,
'Incomplete (without response)': 50,
'Incomplete': 40,
'Fix Committed': 30,
'Fix Released': 20,
'Won\'t Fix': 10,
'Invalid': 0,
'Opinion': 0}
LP_VALIDATE_BUGTASK_IMPORTANCE={
'Critical': 5,
'High': 4,
'Medium': 3,
'Low': 2,
'Wishlist': 1,
'Undecided': 0}
LP_VALIDATE_BRANCH_STATUS=(
'Experimental',
'Development',
'Mature',
'Merged',
'Abandoned')
class LaunchpadDatabase(object):
lp = None
LP_VALIDATE_BUGTASK_STATUS = LP_VALIDATE_BUGTASK_STATUS
LP_VALIDATE_BUGTASK_IMPORTANCE = LP_VALIDATE_BUGTASK_IMPORTANCE
def connect(self):
if not self.lp:
system = os.getenv('LPSYSTEM') or 'production'
cachedir = os.path.expanduser("~/.launchpadlib/cache")
self.lp = Launchpad.login_with('lp-cli', system, cachedir)
return self.lp
def get(self, entry_type, entry_id):
self.connect()
if entry_type != 'people':
entry_type = entry_type+'s'
try:
return getattr(self.lp, entry_type)[entry_id]
except KeyError as e:
logging.debug(e)
return None
def load_lp_objects(self, opts):
if opts.get('assignee'):
opts['assignee'] = self.get('people', opts['assignee'])
return opts
class _StartAction(object):
def __init__(self):
self.db = LaunchpadDatabase()
self.acc = actionlib.Mission(self.db)
# -----------------------------------------------------------------------
# Action Classes
# -----------------------------------------------------------------------
class Get(_StartAction):
desc = """
Get a Launchpad Entry.
"""
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
entry_type = opts.pop('entry_type')
entry_id = opts.pop('entry_id')
self.acc.add_task(repr(self.__class__),
self.maintask,
entry_type, entry_id,
**opts)
return self.acc
def maintask(db, entry_type, entry_id, **opts):
return db.get(entry_type, entry_id)
class FindBugTasks(_StartAction):
desc = """
Search Bug Tasks of the entry.
"""
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
entry_type = opts.pop('entry_type')
entry_id = opts.pop('entry_id')
self.acc.add_task(repr(self.__class__),
self.maintask,
entry_type, entry_id,
**opts)
return self.acc
def maintask(db, entry_type, entry_id, **opts):
entry = db.get(entry_type, entry_id)
# handling milestone.
if entry and entry_type == 'project' and opts.get('milestone'):
opts['milestone'] = entry.getMilestone(name=opts['milestone'])
# handling status.
if 'Todo' in opts['status'] and 'All' in opts['status']:
raise Exception("Todo and All are confilict.")
if 'All' in opts['status']:
opts['status'] = db.LP_VALIDATE_BUGTASK_STATUS.keys()
elif 'Todo' in opts['status']:
opts['status'] = filter(lambda e: e not in ('Invalid',
'Won\'t Fix',
'Fix Committed',
'Fix Released',
'Opinion',),
db.LP_VALIDATE_BUGTASK_STATUS.keys())
opts = db.load_lp_objects(opts)
return entry.searchTasks(**opts)
class FindPackages(_StartAction):
desc = 'Find packages'
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
ppa = opts.pop('ppa').replace('ppa:', '')
ppa_owner, ppa_name = ppa.split('/')
self.acc.add_task(repr(self.__class__),
self.maintask,
ppa_owner, ppa_name,
**opts)
return self.acc
def maintask(db, ppa_onwer, ppa_name, **opts):
people = db.get('people', ppa_onwer)
if not people:
people = db.get('team', ppa_onwer)
archive = people.getPPAByName(name=ppa_name)
return archive.getPublishedSources(status='Published')
| mit | 4,258,922,284,468,343,000 | 33.112299 | 77 | 0.521555 | false | 4.134154 | false | false | false |
rangertaha/salt-manager | salt-manager/webapp/apps/management/commands/bshell.py | 1 | 3463 | #!/usr/bin/env python
"""
"""
import os
from optparse import make_option
from django.core.management.base import NoArgsCommand
def starting_imports():
from django.db.models.loading import get_models
for m in get_models():
exec "from %s import %s" % (m.__module__, m.__name__)
from datetime import datetime, timedelta
sdt = datetime.today().date()
edt = sdt + timedelta(days=1)
return locals()
def start_plain_shell(use_plain):
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
def start_ipython_shell():
from IPython.Shell import IPShell
import IPython
# Explicitly pass an empty list as arguments, because otherwise IPython
# would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
def start_bpython_shell():
from bpython import cli
cli.main(args=[], locals_=starting_imports())
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
make_option('--ipython', action='store_true', dest='ipython',
help='Tells Django to use ipython.'),
make_option('--bpython', action='store_true', dest='bpython',
help='Tells Django to use bpython.'),
)
help = "Runs a Python interactive interpreter. Tries to use bPython, if it's available."
requires_model_validation = False
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
use_ipython = options.get('ipython', False)
use_bpython = options.get('bpython', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
elif use_ipython:
start_ipython_shell()
elif use_bpython:
start_bpython_shell()
else:
start_bpython_shell()
except ImportError:
# fallback to plain shell if we encounter an ImportError
start_plain_shell(use_plain)
| mit | -6,263,867,662,607,562,000 | 34.336735 | 92 | 0.6324 | false | 4.24908 | false | false | false |
nall/pythonista-tradervue | utils.py | 1 | 5565 | # vim: ft=python tabstop=2 shiftwidth=2 expandtab
# Copyright (c) 2015, Jon Nall
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pythonista-tradervue nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import clipboard
import console
import keychain
import logging
import os
import re
import sys
from datetime import datetime, date
sys.path.insert(0, os.path.realpath(os.path.join(os.getcwd(), 'tradervue')))
from tradervue import Tradervue, TradervueLogFormatter
LOG = None
DEBUG = 0 # 1 for normal debug, 2 for HTTP debug as well
KEYCHAIN_ID = 'tradervue'
USER_AGENT = "Pythonista Tradervue ([email protected])"
def get_args(argv):
args = { 'action': 'set_password',
'user': None,
'text': clipboard.get(),
'date': date.today().strftime('%Y%m%d'),
'overwrite': "0" }
for a in argv:
pairs = a.split(':')
for p in pairs:
(k, v) = p.split('=', 2)
if k not in args:
raise ValueError("Invalid argument '%s'" % (k))
args[k] = v
if args['user'] is None:
args['user'] = console.input_alert("Tradervue Username")
if not re.match(r'^\d{8}$', args['date']):
raise ValueError("Invalid date format '%s'. Must be YYYYMMDD" % (args['date']))
if int(args['overwrite']) == 0:
args['overwrite'] = False
else:
args['overwrite'] = True
args['date'] = datetime.strptime(args['date'], '%Y%m%d')
return args
def set_password(args):
p = console.password_alert("Tradervue Credentials", args['user'])
keychain.set_password(KEYCHAIN_ID, args['user'], p)
return True
def delete_password(args):
if keychain.get_password(KEYCHAIN_ID, args['user']) is None:
LOG.error("No password was set for %s" % (args['user']))
return False
else:
keychain.delete_password(KEYCHAIN_ID, args['user'])
LOG.info("Deleted credentials for %s" % (args['user']))
return True
def new_note(args, tv):
note_id = tv.create_note(args['text'])
if note_id is None:
LOG.error("Failed to create new note")
return False
else:
LOG.info("Created new note with ID %s" % (note_id))
return True
def update_journal(args, tv):
datestring = args['date'].strftime('%Y-%m-%d')
# Check if we have an existing entry on the date. If not, just create it
# Otherwise overwrite it if args['overwrite'] is set or append to it if not
#
journal = tv.get_journal(date = args['date'])
if journal is None:
journal_id = tv.create_journal(args['date'], args['text'])
if journal_id is None:
LOG.error("Failed to create journal on %s" % (datestring))
return False
else:
LOG.info("Created new journal on %s with ID %s" % (datestring, journal_id))
return True
else:
verb = 'Appended'
text = journal['notes']
if args['overwrite']:
verb = 'Overwrite'
text = ''
text += "\n%s" % (args['text'])
print text
if tv.update_journal(journal['id'], text):
LOG.info("%s journal on %s (ID %s)" % (verb, journal['id'], datestring))
return True
else:
LOG.error("Failed to update journal on %s (ID %s)" % (datestring, journal['id']))
return False
def main():
global LOG
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
if DEBUG > 1:
LOG.setLevel(logging.DEBUG)
c = logging.StreamHandler()
c.setFormatter(TradervueLogFormatter())
LOG.addHandler(c)
args = get_args(sys.argv[1:])
actions = { 'set_password': set_password,
'delete_password': delete_password,
'new_note': new_note,
'update_journal': update_journal }
ok = False
if args['action'] not in actions:
raise ValueError("Invalid action '%s'" % (args['action']))
elif args['action'].endswith('_password'):
ok = actions[args['action']](args)
else:
p = keychain.get_password(KEYCHAIN_ID, args['user'])
if p is None:
# Request one from the user
p = console.password_alert("Tradervue Credentials", args['user'])
else:
tv = Tradervue(args['user'], p, USER_AGENT, verbose_http = True if DEBUG > 1 else False)
ok = actions[args['action']](args, tv)
return 0 if ok else 1
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | -2,197,524,809,621,453,300 | 32.727273 | 94 | 0.669003 | false | 3.646789 | false | false | false |
smcoll/stormpath-django | django_stormpath/social.py | 1 | 7183 | from django.contrib.auth import login as django_login
from django.shortcuts import resolve_url
from django.core.urlresolvers import reverse
from django.conf import settings
from stormpath.error import Error as StormpathError
from stormpath.resources.provider import Provider
from requests_oauthlib import OAuth2Session
from .models import CLIENT, APPLICATION
from .backends import StormpathSocialBackend
SOCIAL_AUTH_BACKEND = 'django_stormpath.backends.StormpathSocialBackend'
GITHUB_AUTHORIZATION_BASE_URL = 'https://github.com/login/oauth/authorize'
GITHUB_TOKEN_URL = 'https://github.com/login/oauth/access_token'
GOOGLE_AUTHORIZATION_BASE_URL = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
FACEBOOK_AUTHORIZATION_BASE_URL = 'https://www.facebook.com/dialog/oauth'
FACEBOOK_TOKEN_URL = 'https://graph.facebook.com/oauth/access_token'
LINKEDIN_AUTHORIZATION_BASE_URL = 'https://www.linkedin.com/uas/oauth2/authorization'
LINKEDIN_TOKEN_URL = 'https://www.linkedin.com/uas/oauth2/accessToken'
def _get_django_user(account):
backend = StormpathSocialBackend()
return backend.authenticate(account=account)
def get_access_token(provider, authorization_response, redirect_uri):
if provider == Provider.GOOGLE:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GOOGLE']['client_id'],
redirect_uri=redirect_uri
)
ret = p.fetch_token(GOOGLE_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['GOOGLE']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.FACEBOOK:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['FACEBOOK']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
p = facebook_compliance_fix(p)
ret = p.fetch_token(FACEBOOK_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['FACEBOOK']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.GITHUB or provider.upper() == Provider.GITHUB:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GITHUB']['client_id'],
)
ret = p.fetch_token(GITHUB_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['GITHUB']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.LINKEDIN:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['LINKEDIN']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
p = linkedin_compliance_fix(p)
ret = p.fetch_token(LINKEDIN_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['LINKEDIN']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
else:
return None
def handle_social_callback(request, provider):
provider_redirect_url = 'stormpath_' + provider.lower() + '_login_callback'
abs_redirect_uri = request.build_absolute_uri(
reverse(provider_redirect_url, kwargs={'provider': provider}))
access_token = get_access_token(
provider,
request.build_absolute_uri(),
abs_redirect_uri)
if not access_token:
raise RuntimeError('Error communicating with Autentication Provider: %s' % provider)
params = {'provider': provider, 'access_token': access_token}
try:
account = APPLICATION.get_provider_account(**params)
except StormpathError as e:
# We might be missing a social directory
# First we look for one and see if it's already there
# and just error out
for asm in APPLICATION.account_store_mappings:
if (getattr(asm.account_store, 'provider') and
asm.account_store.provider.provider_id == provider):
raise e
# Or if we couldn't find one we create it for the user
# map it to the current application
# and try authenticate again
create_provider_directory(provider, abs_redirect_uri)
account = APPLICATION.get_provider_account(**params)
user = _get_django_user(account)
user.backend = SOCIAL_AUTH_BACKEND
django_login(request, user)
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
def create_provider_directory(provider, redirect_uri):
"""Helper function for creating a provider directory"""
dir = CLIENT.directories.create({
'name': APPLICATION.name + '-' + provider,
'provider': {
'client_id': settings.STORMPATH_SOCIAL[provider.upper()]['client_id'],
'client_secret': settings.STORMPATH_SOCIAL[provider.upper()]['client_secret'],
'redirect_uri': redirect_uri,
'provider_id': provider,
},
})
APPLICATION.account_store_mappings.create({
'application': APPLICATION,
'account_store': dir,
'list_index': 99,
'is_default_account_store': False,
'is_default_group_store': False,
})
def get_authorization_url(provider, redirect_uri):
if provider == Provider.GOOGLE:
scope = [
"email",
"profile"
]
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GOOGLE']['client_id'],
scope=scope,
redirect_uri=redirect_uri
)
authorization_url, state = p.authorization_url(GOOGLE_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.FACEBOOK:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['FACEBOOK']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
p = facebook_compliance_fix(p)
authorization_url, state = p.authorization_url(FACEBOOK_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.GITHUB or provider.upper() == Provider.GITHUB:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GITHUB']['client_id'],
)
authorization_url, state = p.authorization_url(GITHUB_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.LINKEDIN:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['LINKEDIN']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
p = linkedin_compliance_fix(p)
authorization_url, state = p.authorization_url(LINKEDIN_AUTHORIZATION_BASE_URL)
return authorization_url, state
else:
raise RuntimeError('Invalid Provider %s' % provider)
| apache-2.0 | 732,327,008,245,947,000 | 38.905556 | 92 | 0.655019 | false | 3.972898 | false | false | false |
jakirkham/lazyflow | lazyflow/operators/opFeatureMatrixCache.py | 1 | 12534 | from functools import partial
import logging
logger = logging.getLogger(__name__)
import numpy
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.request import RequestLock, Request, RequestPool
from lazyflow.utility import OrderedSignal
from lazyflow.roi import getBlockBounds, getIntersectingBlocks, determineBlockShape
class OpFeatureMatrixCache(Operator):
"""
- Request features and labels in blocks
- For nonzero label pixels in each block, extract the label image
- Cache the feature matrix for each block separately
- Output the concatenation of all feature matrices
Note: This operator does not currently use the NonZeroLabelBlocks slot.
Instead, it only requests labels for blocks that have been
marked dirty via dirty notifications from the LabelImage slot.
As a result, you MUST connect/configure this operator before you
load your upstream label cache with values.
This operator must already be "watching" when when the label operator
is initialized with its first labels.
"""
FeatureImage = InputSlot()
LabelImage = InputSlot()
NonZeroLabelBlocks = InputSlot() # TODO: Eliminate this slot. It isn't used...
# Output is a single 'value', which is a 2D ndarray.
# The first row is labels, the rest are the features.
# (As a consequence of this, labels are converted to float)
LabelAndFeatureMatrix = OutputSlot()
ProgressSignal = OutputSlot() # For convenience of passing several progress signals
# to a downstream operator (such as OpConcatenateFeatureMatrices),
# we provide the progressSignal member as an output slot.
MAX_BLOCK_PIXELS = 1e6
def __init__(self, *args, **kwargs):
super(OpFeatureMatrixCache, self).__init__(*args, **kwargs)
self._lock = RequestLock()
self.progressSignal = OrderedSignal()
self._progress_lock = RequestLock()
self._blockshape = None
self._dirty_blocks = set()
self._blockwise_feature_matrices = {}
self._block_locks = {} # One lock per stored block
self._init_blocks(None, None)
def _init_blocks(self, input_shape, new_blockshape):
old_blockshape = self._blockshape
if new_blockshape == old_blockshape:
# Nothing to do
return
if ( len(self._dirty_blocks) != 0
or len(self._blockwise_feature_matrices) != 0):
raise RuntimeError("It's too late to change the dimensionality of your data after you've already started training.\n"
"Delete all your labels and try again.")
# In these set/dict members, the block id (dict key)
# is simply the block's start coordinate (as a tuple)
self._blockshape = new_blockshape
logger.debug("Initialized with blockshape: {}".format(new_blockshape))
def setupOutputs(self):
# We assume that channel the last axis
assert self.FeatureImage.meta.getAxisKeys()[-1] == 'c'
assert self.LabelImage.meta.getAxisKeys()[-1] == 'c'
assert self.LabelImage.meta.shape[-1] == 1
# For now, we assume that the two input images have the same shape (except channel)
# This constraint could be relaxed in the future if necessary
assert self.FeatureImage.meta.shape[:-1] == self.LabelImage.meta.shape[:-1],\
"FeatureImage and LabelImage shapes do not match: {} vs {}"\
"".format( self.FeatureImage.meta.shape, self.LabelImage.meta.shape )
self.LabelAndFeatureMatrix.meta.shape = (1,)
self.LabelAndFeatureMatrix.meta.dtype = object
self.LabelAndFeatureMatrix.meta.channel_names = self.FeatureImage.meta.channel_names
num_feature_channels = self.FeatureImage.meta.shape[-1]
if num_feature_channels != self.LabelAndFeatureMatrix.meta.num_feature_channels:
self.LabelAndFeatureMatrix.meta.num_feature_channels = num_feature_channels
self.LabelAndFeatureMatrix.setDirty()
self.ProgressSignal.meta.shape = (1,)
self.ProgressSignal.meta.dtype = object
self.ProgressSignal.setValue( self.progressSignal )
# Auto-choose a blockshape
tagged_shape = self.LabelImage.meta.getTaggedShape()
if 't' in tagged_shape:
# A block should never span multiple time slices.
# For txy volumes, that could lead to lots of extra features being computed.
tagged_shape['t'] = 1
blockshape = determineBlockShape( tagged_shape.values(), OpFeatureMatrixCache.MAX_BLOCK_PIXELS )
# Don't span more than 256 px along any axis
blockshape = tuple(min(x, 256) for x in blockshape)
self._init_blocks(self.LabelImage.meta.shape, blockshape)
def execute(self, slot, subindex, roi, result):
assert slot == self.LabelAndFeatureMatrix
self.progressSignal(0.0)
# Technically, this could result in strange progress reporting if execute()
# is called by multiple threads in parallel.
# This could be fixed with some fancier progress state, but
# (1) We don't expect that to by typical, and
# (2) progress reporting is merely informational.
num_dirty_blocks = len( self._dirty_blocks )
remaining_dirty = [num_dirty_blocks]
def update_progress( result ):
remaining_dirty[0] -= 1
percent_complete = 95.0*(num_dirty_blocks - remaining_dirty[0])/num_dirty_blocks
self.progressSignal( percent_complete )
# Update all dirty blocks in the cache
logger.debug( "Updating {} dirty blocks".format(num_dirty_blocks) )
# Before updating the blocks, ensure that the necessary block locks exist
# It's better to do this now instead of inside each request
# to avoid contention over self._lock
with self._lock:
for block_start in self._dirty_blocks:
if block_start not in self._block_locks:
self._block_locks[block_start] = RequestLock()
# Update each block in its own request.
pool = RequestPool()
reqs = {}
for block_start in self._dirty_blocks:
req = Request( partial(self._get_features_for_block, block_start ) )
req.notify_finished( update_progress )
reqs[block_start] = req
pool.add( req )
pool.wait()
# Now store the results we got.
# It's better to store the blocks here -- rather than within each request -- to
# avoid contention over self._lock from within every block's request.
with self._lock:
for block_start, req in reqs.items():
if req.result is None:
# 'None' means the block wasn't dirty. No need to update.
continue
labels_and_features_matrix = req.result
self._dirty_blocks.remove(block_start)
if labels_and_features_matrix.shape[0] > 0:
# Update the block entry with the new matrix.
self._blockwise_feature_matrices[block_start] = labels_and_features_matrix
else:
# All labels were removed from the block,
# So the new feature matrix is empty.
# Just delete its entry from our list.
try:
del self._blockwise_feature_matrices[block_start]
except KeyError:
pass
# Concatenate the all blockwise results
if self._blockwise_feature_matrices:
total_feature_matrix = numpy.concatenate( self._blockwise_feature_matrices.values(), axis=0 )
else:
# No label points at all.
# Return an empty label&feature matrix (of the correct shape)
num_feature_channels = self.FeatureImage.meta.shape[-1]
total_feature_matrix = numpy.ndarray( shape=(0, 1 + num_feature_channels), dtype=numpy.float32 )
self.progressSignal(100.0)
logger.debug( "After update, there are {} clean blocks".format( len(self._blockwise_feature_matrices) ) )
result[0] = total_feature_matrix
def propagateDirty(self, slot, subindex, roi):
if slot == self.NonZeroLabelBlocks:
# Label changes will be handled via labelimage dirtyness propagation
return
assert slot == self.FeatureImage or slot == self.LabelImage
# Our blocks are tracked by label roi (1 channel)
roi = roi.copy()
roi.start[-1] = 0
roi.stop[-1] = 1
# Bookkeeping: Track the dirty blocks
block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
block_starts = map( tuple, block_starts )
#
# If the features were dirty (not labels), we only really care about
# the blocks that are actually stored already
# For big dirty rois (e.g. the entire image),
# we avoid a lot of unnecessary entries in self._dirty_blocks
if slot == self.FeatureImage:
block_starts = set( block_starts ).intersection( self._blockwise_feature_matrices.keys() )
with self._lock:
self._dirty_blocks.update( block_starts )
# Output has no notion of roi. It's all dirty.
self.LabelAndFeatureMatrix.setDirty()
def _get_features_for_block(self, block_start):
"""
Computes the feature matrix for the given block IFF the block is dirty.
Otherwise, returns None.
"""
# Caller must ensure that the lock for this block already exists!
with self._block_locks[block_start]:
if block_start not in self._dirty_blocks:
# Nothing to do if this block isn't actually dirty
# (For parallel requests, its theoretically possible.)
return None
block_roi = getBlockBounds( self.LabelImage.meta.shape, self._blockshape, block_start )
# TODO: Shrink the requested roi using the nonzero blocks slot...
# ...or just get rid of the nonzero blocks slot...
labels_and_features_matrix = self._extract_feature_matrix(block_roi)
return labels_and_features_matrix
def _extract_feature_matrix(self, label_block_roi):
num_feature_channels = self.FeatureImage.meta.shape[-1]
labels = self.LabelImage(label_block_roi[0], label_block_roi[1]).wait()
label_block_positions = numpy.nonzero(labels[...,0].view(numpy.ndarray))
labels_matrix = labels[label_block_positions].astype(numpy.float32).view(numpy.ndarray)
if len(label_block_positions) == 0 or len(label_block_positions[0]) == 0:
# No label points in this roi.
# Return an empty label&feature matrix (of the correct shape)
return numpy.ndarray( shape=(0, 1 + num_feature_channels), dtype=numpy.float32 )
# Shrink the roi to the bounding box of nonzero labels
block_bounding_box_start = numpy.array( map( numpy.min, label_block_positions ) )
block_bounding_box_stop = 1 + numpy.array( map( numpy.max, label_block_positions ) )
global_bounding_box_start = block_bounding_box_start + label_block_roi[0][:-1]
global_bounding_box_stop = block_bounding_box_stop + label_block_roi[0][:-1]
# Since we're just requesting the bounding box, offset the feature positions by the box start
bounding_box_positions = numpy.transpose( numpy.transpose(label_block_positions) - numpy.array(block_bounding_box_start) )
bounding_box_positions = tuple(bounding_box_positions)
# Append channel roi (all feature channels)
feature_roi_start = list(global_bounding_box_start) + [0]
feature_roi_stop = list(global_bounding_box_stop) + [num_feature_channels]
# Request features (bounding box only)
features = self.FeatureImage(feature_roi_start, feature_roi_stop).wait()
# Cast as plain ndarray (not VigraArray), since we don't need/want axistags
features_matrix = features[bounding_box_positions].view(numpy.ndarray)
return numpy.concatenate( (labels_matrix, features_matrix), axis=1)
| lgpl-3.0 | -5,391,571,419,154,124,000 | 47.211538 | 130 | 0.631961 | false | 4.311662 | false | false | false |
hoaibang07/Webscrap | transcripture/sources/crawler_data.py | 1 | 2804 | # -*- encoding: utf-8 -*-
import io
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import urllib2
import urlparse
def _remove_div_vdx(soup):
for div in soup.find_all('div', class_='vidx'):
div.extract()
return soup
def get_data(urlchuong_list, i):
filename = 'urlsach/data/sach' + str(i) + '.txt'
ftmp = io.open(filename, 'w', encoding='utf-8')
try:
hdrs = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive',
'Cookie': 'ipq_lip=20376774; ipq_set=1453874029; __atuvc=2%7C4; __utma=126044488.676620502.1453787537.1453787537.1453787537.1; __utmz=126044488.1453787537.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); PHPSESSID=ed3f4874b92a29b6ed036adfa5ad6fb3; ipcountry=us',
'Host': 'www.transcripture.com',
'Referer': 'http://www.transcripture.com/vietnamese-spanish-genesis-1.html',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:43.0) Gecko/20100101 Firefox/43.0'
}
for urlchuong in urlchuong_list:
# urlchuong = 'http://www.transcripture.com/vietnamese-chinese-revelation-3.html'
print urlchuong
# create request
req = urllib2.Request(urlchuong, headers=hdrs)
# get response
response = urllib2.urlopen(req)
soup = BeautifulSoup(response.read())
soup = _remove_div_vdx(soup)
# print soup
table_tag = soup.find_all('table', attrs={'width':'100%', 'cellspacing':'0'})[0]
tr_tags = table_tag.find_all('tr')
_len = len(tr_tags)
# in first tr tag:
h2_class = tr_tags[0].find_all('h2', class_='cphd')
ftmp.write(u'' + h2_class[0].get_text() + '|')
ftmp.write(u'' + h2_class[1].get_text() + '\n')
# print table_tag
for x in xrange(1,_len):
data = tr_tags[x].get_text('|')
# print data
# url_ec = url.encode('unicode','utf-8')
ftmp.write(u'' + data + '\n')
except Exception, e:
print e
# close file
ftmp.close()
def main():
for x in xrange(1,67):
print('Dang get data sach %d'%x)
urlchuong_list = []
filename = 'urlsach/sach' + str(x) + '.txt'
urlchuong_file = open(filename, 'r')
for line in urlchuong_file:
# print(line)
urlchuong_list.append(line.rstrip())
get_data(urlchuong_list, x)
urlchuong_file.close()
if __name__ == '__main__':
main()
# urlchuong_list = ['http://www.transcripture.com/vietnamese-chinese-revelation-3.html']
# get_data(urlchuong_list, 1) | gpl-2.0 | 4,626,698,484,316,223,000 | 34.506329 | 280 | 0.557418 | false | 3.074561 | false | false | false |
maheshp/novatest | nova/virt/xenapi/vmops.py | 1 | 74575 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import functools
import itertools
import time
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova.compute import api as compute
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.imageupload.glance.GlanceStore',
help='Object Store Driver used to handle image uploads.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
DEVICE_CONFIGDRIVE = '5'
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
progress = round(float(step_info['current']) /
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
msg = _("Importing image upload handler: %s")
LOG.debug(msg % CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
@property
def agent_enabled(self):
return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def confirm_migration(self, migration, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# NOTE(danms): if we're reverting migration in the failure case,
# make sure we don't have a conflicting vm still running here,
# as might be the case in a failed migrate-to-same-host situation
new_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is not None:
if new_ref is not None:
self._destroy(instance, new_ref)
# Remove the '-orig' suffix (which was added in case the
# resized VM ends up on the source host, common during
# testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
elif new_ref is not None:
# We crashed before the -orig backup was made
vm_ref = new_ref
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
if resize_instance:
self._resize_instance(instance, root_vdi)
# Check if kernel and ramdisk are external
kernel_file = None
ramdisk_file = None
name_label = instance['name']
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
vm_ref = self._create_vm(context, instance, instance['name'],
{'root': root_vdi},
disk_image_type, network_info, kernel_file,
ramdisk_file)
self._attach_mapped_block_devices(instance, block_device_info)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
#
# In order to make sure this detach is consistent (virt, BDM, cinder),
# we only detach in the virt-layer if a callback is provided.
if bad_volumes_callback:
bad_devices = self._volumeops.find_bad_volumes(vm_ref)
for device_name in bad_devices:
self._volumeops.detach_volume(
None, instance['name'], device_name)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
# Allow higher-layers a chance to detach bad-volumes as well (in order
# to cleanup BDM entries and detach in Cinder)
if bad_volumes_callback and bad_devices:
bad_volumes_callback(bad_devices)
def _create_disks(self, context, instance, name_label, disk_image_type,
image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
image_meta.get('id'),
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
vdi['uuid'])
root_vdi = vdis.get('root')
if root_vdi:
self._resize_instance(instance, root_vdi)
return vdis
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if name_label is None:
name_label = instance['name']
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
@step
def determine_disk_image_type_step(undo_mgr):
return vm_utils.determine_disk_image_type(image_meta)
@step
def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
disk_image_type, image_meta,
block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
instance=instance)
vm_utils.destroy_kernel_ramdisk(
self._session, kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file):
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type, admin_password,
injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
@step
def setup_network_step(undo_mgr, vm_ref, vdis):
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
@step
def inject_metadata_step(undo_mgr, vm_ref):
self.inject_instance_metadata(instance, vm_ref)
@step
def prepare_security_group_filters_step(undo_mgr):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
self._boot_new_instance(instance, vm_ref, injected_files,
admin_password)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
@step
def bdev_set_default_root(undo_mgr):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
setup_network_step(undo_mgr, vm_ref, vdis)
inject_metadata_step(undo_mgr, vm_ref)
prepare_security_group_filters_step(undo_mgr)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _create_vm(self, context, instance, name_label, vdis,
disk_image_type, network_info, kernel_file=None,
ramdisk_file=None, rescue=False):
"""Create VM instance."""
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
# NOTE(mikal): file injection only happens if we are _not_ using a
# configdrive.
if not configdrive.required_by(instance):
self.inject_instance_metadata(instance, vm_ref)
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
self.inject_hostname(instance, vm_ref, hostname)
def _create_vm_record(self, context, instance, name_label, vdis,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
# Ensure enough free memory is available
if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
mode = vm_mode.get_from_instance(instance)
if mode == vm_mode.XEN:
use_pv_kernel = True
elif mode == vm_mode.HVM:
use_pv_kernel = False
else:
use_pv_kernel = vm_utils.determine_is_pv(self._session,
vdis['root']['ref'], disk_image_type, instance['os_type'])
mode = use_pv_kernel and vm_mode.XEN or vm_mode.HVM
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = instance_types.extract_instance_type(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
cd_vdi = vdis.pop('root')
root_vdi = vm_utils.fetch_blank_disk(self._session,
instance_type['id'])
vdis['root'] = root_vdi
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=False)
vm_utils.create_vbd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD, vbd_type='CD', bootable=True)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
vm_utils.auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
admin_password=admin_password,
files=files)
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
if self.agent_enabled:
agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Update agent, if necessary
# This also waits until the agent starts
agent = self._get_agent(instance, vm_ref)
version = agent.get_agent_version()
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject ssh key.
agent.inject_ssh_key()
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
for path, contents in injected_files:
agent.inject_file(path, contents)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(admin_password)
# Reset network config
agent.resetnetwork()
# Set VCPU weight
instance_type = instance_types.extract_instance_type(instance)
vcpu_weight = instance_type['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
'weight', str(vcpu_weight))
def _get_vm_opaque_ref(self, instance):
"""Get xapi OpaqueRef from a db record."""
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call a plugin on the
XenServer that will bundle the VHDs together and then push the
bundle. Depending on the configured value of
'xenapi_image_upload_handler', image data may be pushed to
Glance or the specified data store.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
locals(), instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
# 1. NOOP since we're not transmitting the base-copy separately
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
vdi_uuid = vm_vdi_rec['uuid']
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
LOG.debug(_("Resizing down VDI %(vdi_uuid)s from "
"%(old_gb)dGB to %(new_gb)dGB"), locals(),
instance=instance)
# 2. Power down the instance before resizing
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Copy VDI, resize partition and filesystem, forget VDI,
# truncate VHD
new_ref, new_uuid = vm_utils.resize_disk(self._session,
instance,
vdi_ref,
instance_type)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the new VHD
self._migrate_vhd(instance, new_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_ref)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Transfer the immutable VHDs (base-copies)
#
# The first VHD will be the leaf (aka COW) that is being used by
# the VM. For this step, we're only interested in the immutable
# VHDs which are all of the parents of the leaf VHD.
for seq_num, vdi_uuid in itertools.islice(
enumerate(vdi_uuids), 1, None):
self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
resize_down = instance['root_gb'] > instance_type['root_gb']
if resize_down and not instance['auto_disk_config']:
reason = _('Resize down not allowed without auto_disk_config')
raise exception.ResizeError(reason=reason)
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version."""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure, exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
" volumes and starting halted instance"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
else:
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.set_admin_password(new_pass)
else:
raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.inject_file(path, contents)
else:
raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
def store_meta(topdir, data_list):
for item in data_list:
key = self._sanitize_xenstore_key(item['key'])
value = item['value'] or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', instance['metadata'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
vm_ref = self._get_vm_opaque_ref(instance)
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
# NOTE(sirp): `block_device_info` is not used, information about which
# volumes should be detached is determined by the
# VBD.other_config['osvol'] attribute
return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]:
vif_map[vif['device']] = vif['MAC']
return vif_map
def get_all_bw_counters(self):
"""Return running bandwidth counter for each interface on each
running VM"""
counters = vm_utils.fetch_bandwidth(self._session)
bw = {}
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vif_map = self._get_vif_device_map(vm_rec)
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
dom = vm_rec.get('domid')
if dom is None or dom not in counters:
continue
vifs_bw = bw.setdefault(name, {})
for vif_num, vif_data in counters[dom].iteritems():
mac = vif_map[vif_num]
vif_data['mac_address'] = mac
vifs_bw[mac] = vif_data
return bw
def get_console_output(self, instance):
"""Return snapshot of console."""
# TODO(armando-migliaccio): implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
# The compute manager expects InstanceNotFound for this case.
raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _create_vifs(self, vm_ref, instance, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %(network_ref)s'),
locals(), instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
locals(), instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.resetnetwork()
else:
raise NotImplementedError()
def inject_hostname(self, instance, vm_ref, hostname):
"""Inject the hostname of the instance into the xenstore."""
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname to xenstore"), instance=instance)
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
args = {'dom_id': vm_rec['domid']}
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure, e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'error', 'message': err_msg}
return None
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = self._virtapi.aggregate_get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
reason = _('Destination host:%(hostname)s must be in the same '
'aggregate as the source server')
raise exception.MigrationError(reason=reason % locals())
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
raise exception.MigrationError('No suitable network for migrate')
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Receive failed'))
return migrate_data
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data = {
"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}}
return dest_check_data
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return None
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it's possible to execute live migration on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if dest_check_data and 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
return dest_check_data
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('VM.assert_can_migrate'
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
"""generate a vdi_map for _call_live_migrate_command."""
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}
def _is_active(vm_rec):
power_state = vm_rec['power_state'].lower()
return power_state in ['running', 'paused']
def _get_uuid(vm_rec):
other_config = vm_rec['other_config']
return other_config.get('nova_uuid', None)
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage
| apache-2.0 | -5,872,085,318,642,051,000 | 41.73639 | 79 | 0.55996 | false | 4.21852 | false | false | false |
sullivat/Markov-Twitter-Bot | src/mybot.test.py | 1 | 1257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime as dt
import logging
import time
import tweepy
from tweet_builder import *
from credentials import *
# Housekeeping: do not edit
logging.basicConfig(filename='tweet_test.log', level=logging.DEBUG)
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
INTERVALS = [1, 1, 1, 5, 10]
# What the bot will tweet
def gen_tweet():
"""Generate a tweet from markovify."""
return str(create_tweet(authors[pick_author()]))
def is_tweet_safe(tweet):
"""using Mark Twain text inevitably leads to tweets with offensive langueage"""
vulgarities = ['nigger', 'fuck']
for vulg in vulgarities:
if vulg in tweet.lower():
return False
else:
return True
def main_no_tweet():
while True:
t = gen_tweet()
if is_tweet_safe(t):
# api.update_status(t) # DON'T TWEET
logging.info("On {0} -- Tweeted: {1}".format(dt.datetime.today(), t))
time.sleep(random.choice(INTERVALS))
print("Tweeting: {}".format(t))
print('...\nAll done!')
if __name__ == '__main__':
#main()
main_no_tweet()
| bsd-2-clause | -7,918,807,360,116,483,000 | 22.277778 | 83 | 0.61973 | false | 3.214834 | false | false | false |
alphagov/notifications-api | migrations/versions/0151_refactor_letter_rates.py | 1 | 3140 | """
Revision ID: 0151_refactor_letter_rates
Revises: 0150_another_letter_org
Create Date: 2017-12-05 10:24:41.232128
"""
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0151_refactor_letter_rates'
down_revision = '0150_another_letter_org'
def upgrade():
op.drop_table('letter_rate_details')
op.drop_table('letter_rates')
op.create_table('letter_rates',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=False),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('sheet_count', sa.Integer(), nullable=False),
sa.Column('rate', sa.Numeric(), nullable=False),
sa.Column('crown', sa.Boolean(), nullable=False),
sa.Column('post_class', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
start_date = datetime(2016, 3, 31, 23, 00, 00)
op.execute("insert into letter_rates values('{}', '{}', null, 1, 0.30, True, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 2, 0.33, True, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 3, 0.36, True, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 1, 0.33, False, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 2, 0.39, False, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 3, 0.45, False, 'second')".format(
str(uuid.uuid4()), start_date)
)
def downgrade():
op.drop_table('letter_rates')
op.create_table('letter_rates',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('valid_from', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='letter_rates_pkey'),
postgresql_ignore_search_path=False
)
op.create_table('letter_rate_details',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('letter_rate_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('page_total', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('rate', sa.NUMERIC(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['letter_rate_id'], ['letter_rates.id'],
name='letter_rate_details_letter_rate_id_fkey'),
sa.PrimaryKeyConstraint('id', name='letter_rate_details_pkey')
)
| mit | 8,390,699,011,551,367,000 | 43.225352 | 105 | 0.576433 | false | 3.862239 | false | false | false |
jpscaletti/rev-assets | rev_assets/__init__.py | 1 | 1570 | """
===========================
RevAssets
===========================
Makes possible for python web apps to work with hashed static assets
generated by other tools like Gulp or Webpack.
It does so by reading the manifest generated by the revision tool.
"""
import json
import io
__version__ = '1.0.3'
class AssetNotFound(Exception):
pass
class RevAssets(object):
"""
Map the source -> hashed assets
:param base_url: From where the hashed assets are served.
:param reload: Reload the manifest each time an asset is requested.
:param manifest: Path and filename of the manifest file.
:param quiet: If False, a missing asset will raise an exception
"""
def __init__(self, base_url='/static', reload=False,
manifest='manifest.json', quiet=True):
self.base_url = base_url.rstrip('/')
self.reload = reload
self.manifest = manifest
self.assets = {}
self.quiet = quiet
def _load_manifest(self):
with io.open(self.manifest, 'rt', encoding='utf-8') as mf:
return json.loads(mf.read())
def asset_url(self, asset):
if not self.assets or self.reload:
self.assets = self._load_manifest()
asset = asset.strip('/')
path = self.assets.get(asset)
if not path:
if self.quiet:
return ''
msg = 'Asset file {!r} not found'.format(asset)
raise AssetNotFound(msg)
return '{}/{}'.format(
self.base_url,
path.lstrip('/'),
)
| bsd-3-clause | -7,948,017,020,570,609,000 | 25.166667 | 71 | 0.576433 | false | 4.14248 | false | false | false |
Python1320/icmpviewer | main.py | 1 | 1217 | #!/usr/bin/env python2
QUEUE_NUM = 5
#hush verbosity
import logging
l=logging.getLogger("scapy.runtime")
l.setLevel(49)
import os,sys,time
from sys import stdout as out
import nfqueue,socket
from scapy.all import *
import GeoIP
gi = GeoIP.open("GeoLiteCity.dat",GeoIP.GEOIP_STANDARD)
lastip=""
def DoGeoIP(pkt):
global lastip
ip = pkt[IP].src
if lastip==ip:
out.write('.')
out.flush()
return
lastip=ip
gir = gi.record_by_addr(ip)
if gir != None:
out.write("\n%s %s %s %s "%(
time.strftime("%Y-%m-%d %H:%M:%S"),
ip,
gir['country_name'] or "?",
gir['city'] or "?"))
out.flush()
def process_packet(dummy, payload):
payload.set_verdict(nfqueue.NF_ACCEPT)
data = payload.get_data()
pkt = IP(data)
proto = pkt.proto
if proto is 0x01:
if pkt[ICMP].type is 8:
DoGeoIP(pkt)
#automatic iptables rules?
def hook():
pass
def unhook():
pass
def main():
q = nfqueue.queue()
q.open()
q.bind(socket.AF_INET)
q.set_callback(process_packet)
q.create_queue(QUEUE_NUM)
try:
hook()
q.try_run()
except KeyboardInterrupt:
unhook()
print("Exit...")
q.unbind(socket.AF_INET)
q.close()
sys.exit(0)
print("Listening on queue number "+str(QUEUE_NUM))
main()
| unlicense | 3,581,285,116,070,015,000 | 15.226667 | 55 | 0.653246 | false | 2.546025 | false | false | false |
mhabib1981/pySecn00b | zap_xml_parse.py | 1 | 2159 | from xml.dom.minidom import parse
import xml.dom.minidom
import sys
import csv
#uni_file=open(sys.argv[1],'r')
#non_uni_file=uni_file.decode("utf8")
dom_tree=parse(sys.argv[1])
collect=dom_tree.documentElement
output_data=[[],[],[],[],[],[],[],[]]
out_filename=((sys.argv[1].split("/")[-1]).split(".")[0])+".csv"
out_file=open(out_filename,'w')
write_csv=csv.writer(out_file, dialect=csv.excel)
for item in collect.getElementsByTagName("alertitem"):
try:
risk_desc=item.getElementsByTagName('riskdesc')[0]
output_data[0].append(risk_desc.childNodes[0].data)
except IndexError:
output_data[0].append("NONE")
try:
alert_name=item.getElementsByTagName('alert')[0]
output_data[1].append(alert_name.childNodes[0].data)
except IndexError:
output_data[1].append("NONE")
try:
alert_desc=item.getElementsByTagName('desc')[0]
output_data[2].append((alert_desc.childNodes[0].data).encode("utf-8"))
except IndexError:
output_data[2].append("NONE")
try:
alert_solution=item.getElementsByTagName('solution')[0]
output_data[3].append((alert_solution.childNodes[0].data).encode("utf-8"))
except IndexError:
output_data[3].append("NONE")
try:
alert_ref=item.getElementsByTagName('reference')[0]
output_data[4].append((alert_ref.childNodes[0].data).encode("utf-8"))
except IndexError:
output_data[4].append("NONE")
try:
uri=item.getElementsByTagName('uri')[0]
output_data[5].append(uri.childNodes[0].data)
except IndexError:
output_data[5].append("NONE")
try:
evid=item.getElementsByTagName('evidence')[0]
output_data[6].append(evid.childNodes[0].data)
except IndexError:
output_data[6].append("NONE")
try:
attack=item.getElementsByTagName('attack')[0]
output_data[7].append(attack.childNodes[0].data)
except IndexError:
output_data[7].append("NONE")
try:
for i in range(0,len(output_data[0])-1):
row=[]
for x in range(0,len(output_data)):
row.append(str(output_data[x][i]).replace(',',';c'))
print row
except UnicodeEncodeError:
raise
#print output_data
# for x in xrange(0,len(output_data)-1):
# print output_data[x][i]
#write_csv.writerows(output_data)
| cc0-1.0 | -1,634,140,350,638,111,200 | 22.467391 | 76 | 0.695692 | false | 2.905787 | false | false | false |
google/fedjax | fedjax/models/stackoverflow.py | 1 | 5453 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stack Overflow recurrent models."""
from typing import Optional
from fedjax.core import metrics
from fedjax.core import models
import haiku as hk
import jax.numpy as jnp
def create_lstm_model(vocab_size: int = 10000,
embed_size: int = 96,
lstm_hidden_size: int = 670,
lstm_num_layers: int = 1,
share_input_output_embeddings: bool = False,
expected_length: Optional[float] = None) -> models.Model:
"""Creates LSTM language model.
Word-level language model for Stack Overflow.
Defaults to the model used in:
Adaptive Federated Optimization
Sashank Reddi, Zachary Charles, Manzil Zaheer, Zachary Garrett, Keith Rush,
Jakub Konečný, Sanjiv Kumar, H. Brendan McMahan.
https://arxiv.org/abs/2003.00295
Args:
vocab_size: The number of possible output words. This does not include
special tokens like PAD, BOS, EOS, or OOV.
embed_size: Embedding size for each word.
lstm_hidden_size: Hidden size for LSTM cells.
lstm_num_layers: Number of LSTM layers.
share_input_output_embeddings: Whether to share the input embeddings with
the output logits.
expected_length: Expected average sentence length used to scale the training
loss down by `1. / expected_length`. This constant term is used so that
the total loss over all the words in a sentence can be scaled down to per
word cross entropy values by a constant factor instead of dividing by
number of words which can vary across batches. Defaults to no scaling.
Returns:
Model.
"""
# TODO(jaero): Replace these with direct references from dataset.
pad = 0
bos = 1
eos = 2
oov = vocab_size + 3
full_vocab_size = vocab_size + 4
# We do not guess EOS, and if we guess OOV, it's treated as a mistake.
logits_mask = [0. for _ in range(full_vocab_size)]
for i in (pad, bos, eos, oov):
logits_mask[i] = jnp.NINF
logits_mask = tuple(logits_mask)
def forward_pass(batch):
x = batch['x']
# [time_steps, batch_size, ...].
x = jnp.transpose(x)
# [time_steps, batch_size, embed_dim].
embedding_layer = hk.Embed(full_vocab_size, embed_size)
embeddings = embedding_layer(x)
lstm_layers = []
for _ in range(lstm_num_layers):
lstm_layers.extend([
hk.LSTM(hidden_size=lstm_hidden_size),
jnp.tanh,
# Projection changes dimension from lstm_hidden_size to embed_size.
hk.Linear(embed_size)
])
rnn_core = hk.DeepRNN(lstm_layers)
initial_state = rnn_core.initial_state(batch_size=embeddings.shape[1])
# [time_steps, batch_size, hidden_size].
output, _ = hk.static_unroll(rnn_core, embeddings, initial_state)
if share_input_output_embeddings:
output = jnp.dot(output, jnp.transpose(embedding_layer.embeddings))
output = hk.Bias(bias_dims=[-1])(output)
else:
output = hk.Linear(full_vocab_size)(output)
# [batch_size, time_steps, full_vocab_size].
output = jnp.transpose(output, axes=(1, 0, 2))
return output
def train_loss(batch, preds):
"""Returns total loss per sentence optionally scaled down to token level."""
targets = batch['y']
per_token_loss = metrics.unreduced_cross_entropy_loss(targets, preds)
# Don't count padded values in loss.
per_token_loss *= targets != pad
sentence_loss = jnp.sum(per_token_loss, axis=-1)
if expected_length is not None:
return sentence_loss * (1. / expected_length)
return sentence_loss
transformed_forward_pass = hk.transform(forward_pass)
return models.create_model_from_haiku(
transformed_forward_pass=transformed_forward_pass,
sample_batch={
'x': jnp.zeros((1, 1), dtype=jnp.int32),
'y': jnp.zeros((1, 1), dtype=jnp.int32),
},
train_loss=train_loss,
eval_metrics={
'accuracy_in_vocab':
metrics.SequenceTokenAccuracy(
masked_target_values=(pad, eos), logits_mask=logits_mask),
'accuracy_no_eos':
metrics.SequenceTokenAccuracy(masked_target_values=(pad, eos)),
'num_tokens':
metrics.SequenceTokenCount(masked_target_values=(pad,)),
'sequence_length':
metrics.SequenceLength(masked_target_values=(pad,)),
'sequence_loss':
metrics.SequenceCrossEntropyLoss(masked_target_values=(pad,)),
'token_loss':
metrics.SequenceTokenCrossEntropyLoss(
masked_target_values=(pad,)),
'token_oov_rate':
metrics.SequenceTokenOOVRate(
oov_target_values=(oov,), masked_target_values=(pad,)),
'truncation_rate':
metrics.SequenceTruncationRate(
eos_target_value=eos, masked_target_values=(pad,)),
})
| apache-2.0 | -2,779,023,789,321,949,700 | 37.935714 | 80 | 0.652174 | false | 3.648594 | false | false | false |
lglenat/whmnet | RaspberryPi/readGateway.py | 1 | 12588 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
readGateway.py script
=====================
This script is used in the whmnet project to receive data from the
wireless network gateway and send it to a custom server on the web.
This script is run on a Raspberry Pi, connected to the Gateway
through the UART serial port on the Pi GPIO header.
"""
# built-in modules
import serial
import binascii
import struct
import datetime
import logging
import logging.handlers
# third-party modules
import crcmod
import requests
# Import configuration variables
from config import *
# Constants
FILE_TYPE_GW_LOG = 0
FILE_TYPE_SENSOR_LOG = 1
FILE_TYPE_LEGACY_DATA = 2
FILE_TYPE_DATA = 3
GW_TYPE_REMOTE_DATA = 0
GW_TYPE_LOCAL_DATA = 1
GW_TYPE_LOG = 2
SENS_TYPE_DATA = 0
SENS_TYPE_LOG = 1
SENS_TYPE_DATA_LEGACY = 2
logger = logging.getLogger()
def main():
# Configure logger
wfh = logging.handlers.WatchedFileHandler(cfgLoggerFile) # log file
formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s') # log header
wfh.setFormatter(formatter)
logger.addHandler(wfh)
logger.setLevel(cfgLogLevel) # set level according to your needs
# Configure CRC with polynome, reversing etc.
crc32_func = crcmod.mkCrcFun(0x104C11DB7, initCrc=0x0, rev=True, xorOut=0xFFFFFFFF)
# Open serial port to communicate with gateway
try:
logger.info('Opening serial port.')
port = serial.Serial(cfgSerialName, cfgSerialSpeed, timeout=None)
except serial.SerialException:
logger.critical('Serial port unavailable')
raise
else:
logger.info('Serial port successfully opened.')
# main loop
while True:
# search for sync byte 0xAA
rcv = port.read(1)
if rcv == b'\xAA':
logger.debug('Sync word received.')
# Get timestamp
timedata = datetime.datetime.now()
# Proceed with message
# First byte is length of UART frame
# use ord() because data is written in binary format on UART by STM32 (not char)
length = ord(port.read(1))
logger.debug('Size of rcvd frame: ' + str(length))
# We can have length = 0 if rx uart buffer is full (in case python script
# is started after sensor gateway)
if length > 0:
# Then read the entire frame
msg = port.read(length)
logger.debug('Rx frame: ' + binascii.hexlify(msg))
# Unpack the CRC from the 4 last bytes of frame
try:
rxcrc = struct.unpack('<I', msg[length-4:length])[0]
except struct.error:
logger.exception('CRC struct error.')
else:
logger.debug('Rx CRC: ' + str(rxcrc) + ' - ' + hex(rxcrc))
# Compute CRC on frame data (except sync and length bytes)
compcrc = crc32_func(msg[0:length-4])
logger.debug('Calculated CRC: ' + str(compcrc) + ' - ' + hex(int(compcrc)))
# Compare rcvd CRC and calculated CRC
if rxcrc != int(compcrc):
# A problem occured during UART transmission
logger.info('CRC ERROR.')
else:
# Get message type from Gateway
gwMsgType = ord(msg[0]);
# Remote data is data coming from wireless sensors
if gwMsgType == GW_TYPE_REMOTE_DATA:
# get sensor id and msg type
sensMsgType = ord(msg[2]) >> 4
sensorId = ord(msg[2]) & 0xf
# get RSSI (can be negative)
rssi = ord(msg[length-6])
if rssi > 127:
rssi = (256 - rssi) * (-1)
# Print sensor ID
logger.info('Sensor ID: ' + str(sensorId) + ' - RSSI: ' + str(rssi))
# log/error message from sensor
if sensMsgType == SENS_TYPE_LOG:
# print and process log message
log_msg = binascii.hexlify(msg[3:6])
logger.info('Log message: ' + log_msg)
# Write msg to file
writeSensorLog(sensorId, timedata, log_msg, rssi)
# Post msg on server
postMeasFromFile()
# measurement message from V1 sensor (not used anymore)
elif sensMsgType == SENS_TYPE_DATA_LEGACY:
# Extract and print temperature #
temperature = computeTemp(msg[3:5])
logger.debug('Temperature: ' + str(temperature))
# Write measurement to file
writeLegacyData(sensorId, timedata, temperature, rssi)
# Post measurement on server
postMeasFromFile()
# measurement message from V2 sensor
elif sensMsgType == SENS_TYPE_DATA:
#Extract data from message
data = computeData(msg[3:8])
logger.info('Temp: ' + '{:.2f}'.format(data['temp']))
logger.info('Hum: ' + '{:.2f}'.format(data['hum']))
logger.info('Pres: ' + str(data['pres']))
# Write data to file
writeData(sensorId, timedata, data['temp'], data['hum'], data['pres'], rssi)
# Post on server
postMeasFromFile()
else:
logger.warning('UNKNOWN SENSOR MSG TYPE.')
# log message from gateway itself
elif gwMsgType == GW_TYPE_LOG:
# Print log message
logger.info('Gateway log: ' + str(ord(msg[1])))
# Write msg to file
writeGatewayLog(timedata, ord(msg[1]))
# Post msg on server
postMeasFromFile()
else:
logger.warning('UNKNOWN GATEWAY MSG TYPE.')
else:
logger.error('Gateway msg is of length 0.')
# The 4 functions below save posts to the CSV buffer file before they are sent to the server
def writeLegacyData(id, timedata, temp, rssi):
with open(cfgBufferFile, 'a') as f:
f.write(str(id) + ',' + str(FILE_TYPE_LEGACY_DATA) + ',' +
timedata.strftime("%Y-%m-%d %H:%M:%S") + ',' +
str(temp) + ',' + str(rssi))
f.write('\n')
def writeData(id, timedata, temp, hum, pres, rssi):
with open(cfgBufferFile, 'a') as f:
f.write(str(id) + ',' + str(FILE_TYPE_DATA) + ',' + timedata.strftime("%Y-%m-%d %H:%M:%S") + ',' +
'{:.2f}'.format(temp) + ',' + '{:.2f}'.format(hum) + ',' + str(pres) + ',' + str(rssi))
f.write('\n')
def writeSensorLog(id, timedata, log, rssi):
with open(cfgBufferFile, 'a') as f:
f.write(str(id) + ',' + str(FILE_TYPE_SENSOR_LOG) + ',' + timedata.strftime("%Y-%m-%d %H:%M:%S") +
',' + str(log) + ',' + str(rssi))
f.write('\n')
def writeGatewayLog(timedata, log):
with open(cfgBufferFile, 'a') as f:
f.write('255' + ',' + str(FILE_TYPE_GW_LOG) + ',' + timedata.strftime("%Y-%m-%d %H:%M:%S") + ',' + str(log))
f.write('\n')
# Function to compute temperature from V1 sensor message (not for V2 sensor)
def computeTemp(tempString):
# unpack data - big endian 16 bit
try:
temp = struct.unpack('>h', tempString)[0]
except struct.error:
logger.exception('Temperature struct error.')
return -99
else:
# Convert code to actual temperature
temp = temp * 0.0625 # 1 LSB = 0.0625 °C
return temp
# Function to extract temperature, RH and pressure data from sensor V2 message
# See sensor V2 STM32L0 firmware source to retrieve message structure
def computeData(dataStr):
# Initialize dictionnary
data = {}
#fixme: return errors and check for error in calling function
# Little endian 24-bit padded to 32
try:
sht = struct.unpack('<I', dataStr[0:3] + '\x00')
except struct.error:
logger.exception('SHT21 data decoding struct error.')
return -99
else:
data['temp'] = -46.85 + 175.72 * ((sht[0] & 0x7FF) << 5) / pow(2,16)
data['hum'] = -6.0 + 125.0 * ((sht[0] >> 11) << 5) / pow(2,16)
# Little endian 16-bit
try:
ms56 = struct.unpack('<H', dataStr[3:5])
except struct.error:
logger.exception('MS5637 data decoding struct error.')
return -99
else:
data['pres'] = ms56[0] + 85000
return data
# Function that reads the CSV buffer file line by line and post the data to the
# webserver if it is reachable on the internet
def postMeasFromFile():
nbLinesPosted = 0
# open backup file in read mode
with open(cfgBufferFile, 'r') as f:
# Save all measurements in lines variable
lines = f.readlines()
# Go back to start of file and read it line by line
f.seek(0, 0)
for line in f:
# Remove newline character
line = line.rstrip('\n')
# Split the line to get the items in a list
s = line.split(',', -1)
if len(s) != 0:
# Try to post measurement on server
type = int(s[1])
if type == FILE_TYPE_GW_LOG:
status = postOnServer(s[0], s[1], s[2], s[3], '', '', '', '')
elif type == FILE_TYPE_SENSOR_LOG:
status = postOnServer(s[0], s[1], s[2], s[3], '', '', '', s[4])
elif type == FILE_TYPE_LEGACY_DATA:
status = postOnServer(s[0], s[1], s[2], '', s[3], '', '', s[4])
elif type == FILE_TYPE_DATA:
status = postOnServer(s[0], s[1], s[2], '', s[3], s[4], s[5], s[6])
else:
logger.error('Unknow type in data file.')
status = 200
# If posting is successful, increment variable else break
if status != 200:
break
else:
nbLinesPosted = nbLinesPosted + 1
else:
# simply ignore line
logger.error('Invalid line in file. Skipping.')
nbLinesPosted = nbLinesPosted + 1
# Open the file not appending write mode
with open(cfgBufferFile, 'w') as f:
# Write all lines that were not posted on server
f.writelines(lines[nbLinesPosted:])
# Function to post data on websever. Uses the requests package.
def postOnServer(id_s, dataType_s, datetime_s, log_s, temp_s, hum_s, pres_s, rssi_s):
retval = 0;
payload = {'id': id_s, 'type': dataType_s, 'time': datetime_s,
'temp': temp_s, 'hum': hum_s, 'pres': pres_s,
'log': log_s, 'rssi': rssi_s, 'chk': cfgPostPwd}
logger.debug(payload)
try:
r = requests.post(cfgPostUrl, data=payload, timeout=5)
except requests.exceptions.ConnectionError:
logger.exception('Connection error')
except requests.exceptions.HTTPError:
logger.exception('HTTP invalid response error.')
except requests.exceptions.Timeout:
logger.exception('Connection timeout error.')
except requests.exceptions.TooManyRedirects:
logger.exception('Too many redirects.')
else:
retval = r.status_code
logger.debug(r.text)
return retval
if __name__ == "__main__":
main()
__author__ = "Lucas Glénat"
__copyright__ = "Copyright 2017, whmnet project"
__credits__ = ["Lucas Glénat"]
__license__ = "GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Lucas Glénat"
__email__ = "[email protected]"
__status__ = "Production"
#### END OF FILE ####
| gpl-3.0 | 5,445,676,795,377,608,000 | 36.011765 | 116 | 0.516052 | false | 4.037215 | false | false | false |
ISRyuu/ISNNTF | test.py | 1 | 3957 | import numpy as np
def iou(box_1, box_2):
box_1_ulx = box_1[0] - box_1[2] * 0.5
box_1_uly = box_1[1] - box_1[3] * 0.5
box_1_lrx = box_1[0] + box_1[2] * 0.5
box_1_lry = box_1[1] + box_1[3] * 0.5
box_2_ulx = box_2[0] - box_2[2] * 0.5
box_2_uly = box_2[1] - box_2[3] * 0.5
box_2_lrx = box_2[0] + box_2[2] * 0.5
box_2_lry = box_2[1] + box_2[3] * 0.5
overlap_ulx = max(box_1_ulx, box_2_ulx)
overlap_uly = max(box_1_uly, box_2_uly)
overlap_lrx = min(box_1_lrx, box_2_lrx)
overlap_lry = min(box_1_lry, box_2_lry)
overlap = max(0, (overlap_lrx - overlap_ulx)) * max(0, (overlap_lry - overlap_uly))
union = max(1e-10, (box_1[2] * box_1[3] + box_2[2] * box_2[3] - overlap))
return min(max(0, overlap / union), 1)
def non_max_suppression(output, cell_size, class_num, boxes_per_cell,
threshold=0.1, iou_threshold=0.5):
'''output [cell_size, cell_size, boxes_per_cell, values]'''
offset_y = np.reshape(
np.asarray([np.arange(cell_size)]*cell_size*boxes_per_cell).T,
(cell_size, cell_size, boxes_per_cell))
offset_x = np.transpose(offset_y, [1, 0, 2])
output = np.asarray(output)
classes = np.reshape(output[..., :class_num],
[cell_size, cell_size, class_num])
confidences = np.reshape(output[..., class_num:class_num+boxes_per_cell],
[cell_size, cell_size, boxes_per_cell])
boxes = np.reshape(output[..., class_num+boxes_per_cell:],
[cell_size, cell_size, boxes_per_cell, -1])
boxes[..., 0] = (boxes[..., 0] + offset_x) / cell_size
boxes[..., 1] = (boxes[..., 1] + offset_y) / cell_size
boxes[..., 2:] = np.square(boxes[..., 2:])
class_confidences = []
for i in range(boxes_per_cell):
class_confidences += [np.expand_dims(confidences[..., i], axis=-1) * classes]
class_confidences = np.stack(class_confidences, axis=-2)
class_filter = class_confidences >= threshold
class_filtered_indices = np.nonzero(class_filter)
boxes_filtered = boxes[class_filtered_indices[0:3]]
class_filtered = np.argmax(class_confidences, axis=-1)[class_filtered_indices[0:3]]
probabilites_filtered = class_confidences[class_filter]
sorted_probs_indices = np.flip(np.argsort(probabilites_filtered), axis=0)
probabilites_filtered = probabilites_filtered[sorted_probs_indices]
boxes_filtered = boxes_filtered[sorted_probs_indices]
class_filtered = class_filtered[sorted_probs_indices]
for i in range(len(sorted_probs_indices)):
if probabilites_filtered[i] == 0:
continue
for j in range(i+1, len(sorted_probs_indices)):
if iou(boxes_filtered[i], boxes_filtered[j]) >= iou_threshold:
probabilites_filtered[j] = 0
result_indices = probabilites_filtered > 0
confidence_result = probabilites_filtered[result_indices]
classes_result = class_filtered[result_indices]
boxes_result = boxes_filtered[result_indices]
return np.concatenate([np.expand_dims(confidence_result, axis=-1),
np.expand_dims(classes_result, axis=-1),
boxes_result],
axis=-1)
if __name__ == '__main__':
test_data = np.reshape(np.load("/Users/Kevin/Desktop/out.npy")[2], [7, 7, 30])
print(non_max_suppression(test_data, 7, 20, 2))
# confidences = np.random.randn(3,3,2)
# classes = np.random.randn(3,3,20)
# boxes_per_cell = 2
# probs = np.zeros([3,3,2,20])
# for i in range(boxes_per_cell):
# for j in range(20):
# probs[:, :, i, j] = np.multiply(
# classes[:, :, j], confidences[:, :, i])
# probabilites = []
# for i in range(boxes_per_cell):
# probabilites += [np.expand_dims(confidences[..., i], axis=-1) * classes]
# print(probs == np.stack(probabilites, axis=-2))
| bsd-3-clause | 5,585,448,979,678,535,000 | 37.048077 | 87 | 0.581248 | false | 2.913844 | false | false | false |
canaryhealth/nlu_trainer | nlu_trainer/util.py | 1 | 1186 | # -*- coding: utf-8 -*-
import re
def phrase_index(sentence, phrase):
'''
Returns the start and end index of phrase (first instance) if it exists in
sentence.
ex: >>> phrase_index('the quick brown fox jumps over the lazy dog',
'brown fox jumps')
(10, 24)
'''
phrase = str(phrase) # in case phrase is a number
m = re.match(r'(.*?)\b'+re.escape(phrase)+r'\b', sentence)
if m:
# group 0 and 1 returns the match with and without the phrase respectively
l = len(m.group(1))
return (l, l+len(phrase)-1)
return None
def phrase_pos(sentence, phrase):
'''
Returns the start and end position of phrase (first instance) if it exists in
sentence.
ex: >>> phrase_index('the quick brown fox jumps over the lazy dog',
'brown fox jumps')
(2, 5)
'''
phrase = str(phrase) # in case phrase is a number
s_tok = sentence.split()
p_tok = phrase.split()
p_len = len(p_tok)
# get all indices where s_tok[i] matches p_tok[0]
indices = [ i for i, x in enumerate(s_tok) if x == p_tok[0] ]
for i in indices:
if s_tok[i : i+p_len] == p_tok:
return i, i+p_len
return None
| mit | -3,836,816,383,660,354,000 | 27.926829 | 79 | 0.598651 | false | 3.258242 | false | false | false |
dprog-philippe-docourt/django-qr-code | setup.py | 1 | 1589 | import re
from setuptools import setup
# Get version without importing
with open('qr_code/__init__.py', 'rb') as f:
VERSION = str(re.search('__version__ = \'(.+?)\'', f.read().decode('utf-8')).group(1))
setup(
name='django-qr-code',
version=VERSION,
packages=['qr_code', 'qr_code.qrcode', 'qr_code.templatetags'],
url='https://github.com/dprog-philippe-docourt/django-qr-code',
license='BSD 3-clause',
author='Philippe Docourt',
author_email='[email protected]',
maintainer='Philippe Docourt',
description='An application that provides tools for displaying QR codes on your Django site.',
long_description="""This application provides tools for displaying QR codes on your `Django <https://www.djangoproject.com/>`_ site.
This application depends on the `Segno QR Code generator <https://pypi.org/project/segno/>`_.
This app makes no usage of the Django models and therefore do not use any database.
Only Python >= 3.6 is supported.""",
install_requires=['segno', 'django>=2.2'],
python_requires='>=3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Natural Language :: English'
],
keywords='qr code django',
)
| bsd-3-clause | 7,363,241,608,391,071,000 | 37.756098 | 136 | 0.642542 | false | 3.730047 | false | false | false |
laginimaineb/android_fde_bruteforce | structures.py | 1 | 2262 | import struct
from StringIO import StringIO
#The crypt_mnt_ftr structure - see /system/vold/cryptfs.h
CRYPT_MNT_FTR = [('magic' , 'I'),
('major_version' , 'H'),
('minor_version' , 'H'),
('ftr_size' , 'I'),
('flags' , 'I'),
('keysize' , 'I'),
('crypt_size' , 'I'),
('fs_size' , 'Q'),
('failed_decrypt_count' , 'I'),
('crypto_type_name' , '64s'),
('spare2' , 'I'),
('master_key' , '48s'),
('salt' , '16s'),
('persist_data_offset_0' , 'Q'),
('persist_data_offset_1' , 'Q'),
('persist_data_size' , 'I'),
('kdf_type' , 'B'),
('N_factor' , 'B'),
('r_factor' , 'B'),
('p_factor' , 'B'),
('encrypted_upto' , 'Q'),
('hash_first_block' , '32s'),
('keymaster_blob' , '2048s'),
('keymaster_blob_size' , 'I'),
('scrypted_intermediate_key', '32s')]
#The qcom_km_key_blob structure - see /hardware/qcom/keymaster/keymaster_qcom.h
QCOM_KEY_BLOB = [('magic_num' , 'I'),
('version_num' , 'I'),
('modulus' , '512s'),
('modulus_size' , 'I'),
('public_exponent' , '512s'),
('public_exponent_size' , 'I'),
('iv' , '16s'),
('encrypted_private_exponent' , '512s'),
('encrypted_private_exponent_size' , 'I'),
('hmac' , '32s')]
def read_object(data, definition):
'''
Unpacks a structure using the given data and definition.
'''
reader = StringIO(data)
obj = {}
object_size = 0
for (name, stype) in definition:
object_size += struct.calcsize(stype)
obj[name] = struct.unpack(stype, reader.read(struct.calcsize(stype)))[0]
obj['object_size'] = object_size
obj['raw_data'] = data
return obj
def read_crypt_mnt_ftr(data):
return read_object(data, CRYPT_MNT_FTR)
def read_qcom_key_blob(data):
return read_object(data, QCOM_KEY_BLOB)
| gpl-2.0 | 1,956,154,337,582,137,300 | 36.081967 | 79 | 0.438992 | false | 3.316716 | false | false | false |
windweaver828/kspeech | commandtools.py | 1 | 1026 | #!/usr/bin/env python
def isCommand(command, args):
index = 0
for arg in args:
if isinstance(arg, list):
for ar in arg:
if isinstance(ar, list):
for a in ar:
if isinstance(a, list):
index-=1
isCommand(command, a)
elif not a in command:
break
else:
index+=1
elif ar in command:
index+=1
break
if index >= len(args):
return True
def callCommand(func, args):
if args: return func(*args)
else: return func()
def matchCommand(command, commands):
for commdef in commands.keys():
if isCommand(command, commdef):
return commands[commdef]
else: return False
def matchAndCallCommand(command, commands):
ret = matchCommand(command, commands)
if ret: callCommand(*ret)
| gpl-2.0 | 3,961,388,683,577,054,700 | 24.65 | 49 | 0.477583 | false | 4.816901 | false | false | false |
snakecon/AI_Lab | spider/book/book/pipelines.py | 1 | 4704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
import book.database as db
from scrapy import Request
from scrapy.utils.misc import arg_to_iter
from twisted.internet.defer import DeferredList
from scrapy.pipelines.images import ImagesPipeline
from book.items import Subject, Meta, Comment
class BookPipeline(object):
def get_subject(self, item):
sql = 'SELECT * FROM subjects WHERE douban_id=%s' % item['douban_id']
return db.conn.get(sql)
def save_subject(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
sql = 'INSERT INTO subjects (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *values)
def get_meta(self, item):
sql = 'SELECT * FROM books WHERE douban_id=%s' % item['douban_id']
return db.conn.get(sql)
def save_meta(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
sql = 'INSERT INTO books (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *(i.strip() for i in values))
def update_meta(self, item):
douban_id = item.pop('douban_id')
keys = item.keys()
values = item.values()
values.append(douban_id)
fields = ['%s=' % i + '%s' for i in keys]
sql = 'UPDATE books SET %s WHERE douban_id=%s\
' % (','.join(fields), '%s')
db.conn.update(sql, *values)
def get_comment(self, item):
sql = 'SELECT * FROM comments WHERE douban_comment_id=%s\
' % item['douban_comment_id']
return db.conn.get(sql)
def save_comment(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
db.conn.execute('SET NAMES utf8mb4')
sql = 'INSERT INTO comments (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *(i.strip() for i in values))
def process_item(self, item, spider):
if isinstance(item, Subject):
'''
subject
'''
exsit = self.get_subject(item)
if not exsit:
self.save_subject(item)
elif isinstance(item, Meta):
'''
book meta
'''
exsit = self.get_meta(item)
if not exsit:
try:
self.save_meta(item)
except Exception, e:
print item
print e
else:
self.update_meta(item)
elif isinstance(item, Comment):
'''
book comment
'''
exsit = self.get_comment(item)
if not exsit:
try:
self.save_comment(item)
except Exception, e:
print item
print e
return item
class CoverPipeline(ImagesPipeline):
def process_item(self, item, spider):
if spider.name != 'meta':
return item
info = self.spiderinfo
requests = arg_to_iter(self.get_media_requests(item, info))
dlist = [self._process_request(r, info) for r in requests]
dfd = DeferredList(dlist, consumeErrors=1)
return dfd.addCallback(self.item_completed, item, info)
def file_path(self, request, response=None, info=None):
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are \
deprecated, please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
image_guid = hashlib.sha1(url).hexdigest()
return '%s%s/%s%s/%s.jpg\
' % (image_guid[9], image_guid[19], image_guid[29], image_guid[39], image_guid)
def get_media_requests(self, item, info):
if item['cover']:
return Request(item['cover'])
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if image_paths:
item['cover'] = image_paths[0]
else:
item['cover'] = ''
return item
| apache-2.0 | 2,729,605,431,872,991,000 | 32.361702 | 88 | 0.540816 | false | 3.85258 | false | false | false |
stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/modules/dnsutil.py | 1 | 11113 | # -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
import socket
# Import python libs
import logging
import time
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.fopen(hostsfile, 'a') as fp_:
fp_.write(append_line)
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
host_list = entries.split(',')
with salt.utils.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write('{0}\n'.format(line))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(' '.join(comps))
out_file.write('\n')
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.fopen(zonefile, 'r') as fp_:
zone = fp_.read()
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.'):
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for `host`.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for `host`.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dig.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dig.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = str(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
| apache-2.0 | -4,666,205,067,642,195,000 | 27.494872 | 116 | 0.579052 | false | 3.917166 | false | false | false |
shiburizu/py2discord | py2discord.py | 1 | 8389 | import discord
import sqlite3 as sql
import logging
import cleverbot
import random
logging.basicConfig(level=logging.INFO)
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
from apiclient.discovery import build
import apiclient.errors
# Please refer to the README to find where you should paste your bot's credentials for services.
blacklistwords = ['image','gif','help','add','talk','permissions','blacklist','whitelist']
maxwhiterank = 3
maxblackrank = 3
service = build("customsearch", "v1",
developerKey="CREATE FROM CONSOLE.DEVELOPERS.GOOGLE.COM")
class client(discord.Client):
def isBlacklisted(self,msg,p):
c.execute("SELECT level FROM blacklist where id = ?", (msg.author.id,))
blacklist = c.fetchone()
if blacklist:
val = int(blacklist[0][0])
if val >= int(p):
self.send_message(msg.channel,'%s is blacklist level %s, therefore this command is locked.' % (msg.author.name, blacklist[0][0]))
return True
else:
return False
else:
return False
def isWhitelisted(self,msg,p):
c.execute("SELECT level FROM whitelist where id = ?", (msg.author.id,))
whitelist = c.fetchone()
if whitelist:
val = int(whitelist[0][0])
if val >= int(p):
return True
else:
self.send_message(msg.channel,'%s does not have sufficient permissions to use that command.' % msg.author.name)
return False
else:
self.send_message(msg.channel,'%s does not have sufficient permissions to use that command.' % msg.author.name)
return False
def on_message(self, message):
p = self.isBlacklisted(message,'3')
if p == False:
if message.content.startswith('$help'):
commands = c.execute('SELECT name FROM cmds')
self.send_message(message.channel,
"""py2discord is a Discord chat bot written in Python
by https://github.com/shiburizu/""" % ', '.join([str(i[0])for i in commands]))
elif message.content.startswith('$blacklist '):
try:
p = self.isWhitelisted(message,'1') #check whitelist 1
if p == True:
insert = (message.content[13:].replace('>','')).split(' ', 1)
try:
if insert[1].isdigit():
print insert
if int(insert[1]) > maxblackrank:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
else:
c.execute('INSERT OR REPLACE INTO blacklist(id, level) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
'Successfully blacklisted ID %s at level %s.' % (insert[0],insert[1]))
else:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
except IndexError:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$whitelist '):
try:
p = self.isWhitelisted(message,'2') #check whitelist 2
if p == True:
insert = (message.content[13:].replace('>','')).split(' ', 1)
try:
if insert[1].isdigit():
print insert
if int(insert[1]) > maxwhiterank:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
else:
c.execute('INSERT OR REPLACE INTO whitelist(id, level) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
'Successfully whitelisted ID %s at level %s.' % (insert[0],insert[1]))
else:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
except IndexError:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$image '):
try:
p = self.isBlacklisted(message,'1') #check blacklist 1
if p == False:
query = message.content[7:]
if query != '':
res = service.cse().list(
q=query,
cx='INSERT CX KEY FROM CSE.GOOGLE.COM',
searchType='image',
num=10,
safe='off'
).execute()
if not 'items' in res:
self.send_message(message.channel, "No image found.")
else:
results = []
for item in res['items']:
results.append(item['link'])
self.send_message(message.channel, random.choice(results))
else:
self.send_message(message.channel,'Please input search terms.')
except apiclient.errors.HttpError as e:
self.send_message(message.channel,
"There was a problem with your request. Here is some information:```%s```" % e)
elif message.content.startswith('$gif '):
try:
p = self.isBlacklisted(message,'1') #check blacklist 1
if p == False:
query = message.content[7:]
if query != '' or None:
res = service.cse().list(
q=query,
cx='INSERT CX KEY FROM CSE.GOOGLE.COM',
searchType='image',
fileType='gif',
num=10,
safe='off'
).execute()
if not 'items' in res:
self.send_message(message.channel, "No image found.")
else:
results = []
for item in res['items']:
results.append(item['link'])
self.send_message(message.channel, random.choice(results))
else:
self.send_message(message.channel,'Please input search terms.')
except apiclient.errors.HttpError as e:
self.send_message(message.channel,
"There was a problem with your request. Here is some information:```%s```" % e)
elif message.content.startswith('$add '):
try:
p = self.isBlacklisted(message,'2')
if p == False:
insert = (message.content[5:].encode('utf-8')).split(' ', 1)
if not insert in blacklistwords:
print insert
c.execute('INSERT OR ABORT INTO cmds(name,cmd) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
"Command added.")
else:
self.send_message(message.channel,
"This is a blacklisted word, and cannot be added.")
except sql.IntegrityError:
self.send_message(message.channel, "Already exists. Aborted.")
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$talk '):
reply = talk.ask(message.content[6:])
print "Was asked:", message.content[6:], "Replied with:", reply
self.send_message(message.channel, reply)
elif message.content.startswith('$permissions'):
c.execute('SELECT level FROM whitelist WHERE id = ?', (message.author.id,))
white = c.fetchone()
if not white:
white = 0
else:
white = white[0][0]
c.execute('SELECT level FROM blacklist WHERE id = ?', (message.author.id,))
black = c.fetchone()
if not black:
black = 0
else:
black = black[0][0]
self.send_message(message.channel,
'%s, your Discord ID is %s. Your whitelist level is %s and blacklist level is %s.' % (
message.author.name,message.author.id,white,black))
elif message.content.startswith('$'):
try:
c.execute("SELECT cmd FROM cmds WHERE name = ?",
(message.content[1:],))
fetch = c.fetchone()
self.send_message(message.channel, fetch[0])
except TypeError:
pass
talk = cleverbot.Cleverbot()
bot = client()
db = sql.connect('commands.db')
db.text_factory = str
c = db.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS cmds(name VARCHAR(10) UNIQUE,
cmd VARCHAR(64));''')
c.execute('''CREATE TABLE IF NOT EXISTS blacklist(id VARCHAR(10) UNIQUE,
level VARCHAR(64));''')
c.execute('''CREATE TABLE IF NOT EXISTS whitelist(id VARCHAR(10) UNIQUE,
level VARCHAR(64));''')
db.commit()
bot.login('EMAIL','PASSWORD')
bot.run()
| isc | -2,526,754,386,635,023,000 | 35.004292 | 133 | 0.635713 | false | 3.351578 | false | false | false |
LMSlay/wiper | modules/clamav.py | 1 | 2875 | # This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import getopt
try:
import pyclamd
HAVE_CLAMD = True
except ImportError:
HAVE_CLAMD = False
from viper.common.out import *
from viper.common.abstracts import Module
from viper.core.session import __sessions__
class ClamAV(Module):
cmd = 'clamav'
description = 'Scan file from local ClamAV daemon'
authors = ['neriberto']
def run(self):
def usage():
self.log('', "usage: clamav [-h] [-s]")
def help():
usage()
self.log('', "")
self.log('', "Options:")
self.log('', "\t--help (-h)\tShow this help message")
self.log('', "\t--socket(-s)\tSpecify an unix socket (default: Clamd Unix Socket)")
self.log('', "")
if not HAVE_CLAMD:
self.log('error', "Missing dependency, install requests (`pip install pyclamd`)")
return
try:
opts, argv = getopt.getopt(self.args, 'hs:', ['help', 'socket='])
except getopt.GetoptError as e:
self.log('', e)
usage()
return
daemon = None
result = None
socket = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-s', '--socket'):
self.log('info', "Using socket {0} to connect to ClamAV daemon".format(value))
socket = value
try:
daemon = pyclamd.ClamdUnixSocket(socket)
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
try:
if not daemon:
daemon = pyclamd.ClamdUnixSocket()
socket = 'Clamav'
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
try:
if daemon.ping():
results = daemon.scan_file(__sessions__.current.file.path)
else:
self.log('error', "Unable to connect to the daemon")
except Exception as e:
self.log('error', "Unable to scan with antivirus daemon, {0}".format(e))
return
found = None
name = 'not found'
if results:
for item in results:
found = results[item][0]
name = results[item][1]
if found == 'ERROR':
self.log('error', "Check permissions of the binary folder, {0}".format(name))
else:
self.log('info', "Daemon {0} returns: {1}".format(socket, name))
| bsd-3-clause | -1,005,792,964,719,011,700 | 29.913978 | 95 | 0.510957 | false | 4.246677 | false | false | false |
vmendez/DIRAC | Resources/Storage/StorageElement.py | 1 | 35478 | """ This is the StorageElement class.
"""
from types import ListType
__RCSID__ = "$Id$"
# # custom duty
import re
import time
import datetime
import copy
import errno
# # from DIRAC
from DIRAC import gLogger, gConfig, siteName
from DIRAC.Core.Utilities import DErrno, DError
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, returnSingleResult
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
class StorageElementCache( object ):
def __init__( self ):
self.seCache = DictCache()
def __call__( self, name, protocols = None, vo = None, hideExceptions = False ):
self.seCache.purgeExpired( expiredInSeconds = 60 )
argTuple = ( name, protocols, vo )
seObj = self.seCache.get( argTuple )
if not seObj:
seObj = StorageElementItem( name, protocols, vo, hideExceptions = hideExceptions )
# Add the StorageElement to the cache for 1/2 hour
self.seCache.add( argTuple, 1800, seObj )
return seObj
class StorageElementItem( object ):
"""
.. class:: StorageElement
common interface to the grid storage element
self.name is the resolved name of the StorageElement i.e CERN-tape
self.options is dictionary containing the general options defined in the CS e.g. self.options['Backend] = 'Castor2'
self.storages is a list of the stub objects created by StorageFactory for the protocols found in the CS.
self.localPlugins is a list of the local protocols that were created by StorageFactory
self.remotePlugins is a list of the remote protocols that were created by StorageFactory
self.protocolOptions is a list of dictionaries containing the options found in the CS. (should be removed)
dynamic method :
retransferOnlineFile( lfn )
exists( lfn )
isFile( lfn )
getFile( lfn, localPath = False )
putFile( lfnLocal, sourceSize = 0 ) : {lfn:local}
replicateFile( lfn, sourceSize = 0 )
getFileMetadata( lfn )
getFileSize( lfn )
removeFile( lfn )
prestageFile( lfn, lifetime = 86400 )
prestageFileStatus( lfn )
pinFile( lfn, lifetime = 60 * 60 * 24 )
releaseFile( lfn )
isDirectory( lfn )
getDirectoryMetadata( lfn )
getDirectorySize( lfn )
listDirectory( lfn )
removeDirectory( lfn, recursive = False )
createDirectory( lfn )
putDirectory( lfn )
getDirectory( lfn, localPath = False )
"""
__deprecatedArguments = ["singleFile", "singleDirectory"] # Arguments that are now useless
# Some methods have a different name in the StorageElement and the plugins...
# We could avoid this static list in the __getattr__ by checking the storage plugin and so on
# but fine... let's not be too smart, otherwise it becomes unreadable :-)
__equivalentMethodNames = {"exists" : "exists",
"isFile" : "isFile",
"getFile" : "getFile",
"putFile" : "putFile",
"replicateFile" : "putFile",
"getFileMetadata" : "getFileMetadata",
"getFileSize" : "getFileSize",
"removeFile" : "removeFile",
"prestageFile" : "prestageFile",
"prestageFileStatus" : "prestageFileStatus",
"pinFile" : "pinFile",
"releaseFile" : "releaseFile",
"isDirectory" : "isDirectory",
"getDirectoryMetadata" : "getDirectoryMetadata",
"getDirectorySize" : "getDirectorySize",
"listDirectory" : "listDirectory",
"removeDirectory" : "removeDirectory",
"createDirectory" : "createDirectory",
"putDirectory" : "putDirectory",
"getDirectory" : "getDirectory",
}
# We can set default argument in the __executeFunction which impacts all plugins
__defaultsArguments = {"putFile" : {"sourceSize" : 0 },
"getFile": { "localPath": False },
"prestageFile" : { "lifetime" : 86400 },
"pinFile" : { "lifetime" : 60 * 60 * 24 },
"removeDirectory" : { "recursive" : False },
"getDirectory" : { "localPath" : False },
}
def __init__( self, name, plugins = None, vo = None, hideExceptions = False ):
""" c'tor
:param str name: SE name
:param list plugins: requested storage plugins
:param vo
"""
self.methodName = None
if vo:
self.vo = vo
else:
result = getVOfromProxyGroup()
if not result['OK']:
return
self.vo = result['Value']
self.opHelper = Operations( vo = self.vo )
proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
useProxy = ( gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "UnknownProtocol" )
in proxiedProtocols )
if not useProxy:
useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
if not useProxy:
useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )
self.valid = True
if plugins == None:
res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = [], hideExceptions = hideExceptions )
else:
res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = plugins, hideExceptions = hideExceptions )
if not res['OK']:
self.valid = False
self.name = name
self.errorReason = res['Message']
else:
factoryDict = res['Value']
self.name = factoryDict['StorageName']
self.options = factoryDict['StorageOptions']
self.localPlugins = factoryDict['LocalPlugins']
self.remotePlugins = factoryDict['RemotePlugins']
self.storages = factoryDict['StorageObjects']
self.protocolOptions = factoryDict['ProtocolOptions']
self.turlProtocols = factoryDict['TurlProtocols']
for storage in self.storages:
storage.setStorageElement( self )
self.log = gLogger.getSubLogger( "SE[%s]" % self.name )
self.useCatalogURL = gConfig.getValue( '/Resources/StorageElements/%s/UseCatalogURL' % self.name, False )
# 'getTransportURL',
self.readMethods = [ 'getFile',
'prestageFile',
'prestageFileStatus',
'getDirectory']
self.writeMethods = [ 'retransferOnlineFile',
'putFile',
'replicateFile',
'pinFile',
'releaseFile',
'createDirectory',
'putDirectory' ]
self.removeMethods = [ 'removeFile', 'removeDirectory' ]
self.checkMethods = [ 'exists',
'getDirectoryMetadata',
'getDirectorySize',
'getFileSize',
'getFileMetadata',
'listDirectory',
'isDirectory',
'isFile',
]
self.okMethods = [ 'getLocalProtocols',
'getProtocols',
'getRemoteProtocols',
'getStorageElementName',
'getStorageParameters',
'getTransportURL',
'isLocalSE' ]
self.__fileCatalog = None
def dump( self ):
""" Dump to the logger a summary of the StorageElement items. """
log = self.log.getSubLogger( 'dump', True )
log.verbose( "Preparing dump for StorageElement %s." % self.name )
if not self.valid:
log.debug( "Failed to create StorageElement plugins.", self.errorReason )
return
i = 1
outStr = "\n\n============ Options ============\n"
for key in sorted( self.options ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), self.options[key] )
for storage in self.storages:
outStr = "%s============Protocol %s ============\n" % ( outStr, i )
storageParameters = storage.getParameters()
for key in sorted( storageParameters ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), storageParameters[key] )
i = i + 1
log.verbose( outStr )
#################################################################################################
#
# These are the basic get functions for storage configuration
#
def getStorageElementName( self ):
""" SE name getter """
self.log.getSubLogger( 'getStorageElementName' ).verbose( "The Storage Element name is %s." % self.name )
return S_OK( self.name )
def getChecksumType( self ):
""" get local /Resources/StorageElements/SEName/ChecksumType option if defined, otherwise
global /Resources/StorageElements/ChecksumType
"""
self.log.getSubLogger( 'getChecksumType' ).verbose( "get checksum type for %s." % self.name )
return S_OK( str( gConfig.getValue( "/Resources/StorageElements/ChecksumType", "ADLER32" ) ).upper()
if "ChecksumType" not in self.options else str( self.options["ChecksumType"] ).upper() )
def getStatus( self ):
"""
Return Status of the SE, a dictionary with:
- Read: True (is allowed), False (it is not allowed)
- Write: True (is allowed), False (it is not allowed)
- Remove: True (is allowed), False (it is not allowed)
- Check: True (is allowed), False (it is not allowed).
NB: Check always allowed IF Read is allowed (regardless of what set in the Check option of the configuration)
- DiskSE: True if TXDY with Y > 0 (defaults to True)
- TapeSE: True if TXDY with X > 0 (defaults to False)
- TotalCapacityTB: float (-1 if not defined)
- DiskCacheTB: float (-1 if not defined)
"""
self.log.getSubLogger( 'getStatus' ).verbose( "determining status of %s." % self.name )
retDict = {}
if not self.valid:
retDict['Read'] = False
retDict['Write'] = False
retDict['Remove'] = False
retDict['Check'] = False
retDict['DiskSE'] = False
retDict['TapeSE'] = False
retDict['TotalCapacityTB'] = -1
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
# If nothing is defined in the CS Access is allowed
# If something is defined, then it must be set to Active
retDict['Read'] = not ( 'ReadAccess' in self.options and self.options['ReadAccess'] not in ( 'Active', 'Degraded' ) )
retDict['Write'] = not ( 'WriteAccess' in self.options and self.options['WriteAccess'] not in ( 'Active', 'Degraded' ) )
retDict['Remove'] = not ( 'RemoveAccess' in self.options and self.options['RemoveAccess'] not in ( 'Active', 'Degraded' ) )
if retDict['Read']:
retDict['Check'] = True
else:
retDict['Check'] = not ( 'CheckAccess' in self.options and self.options['CheckAccess'] not in ( 'Active', 'Degraded' ) )
diskSE = True
tapeSE = False
if 'SEType' in self.options:
# Type should follow the convention TXDY
seType = self.options['SEType']
diskSE = re.search( 'D[1-9]', seType ) != None
tapeSE = re.search( 'T[1-9]', seType ) != None
retDict['DiskSE'] = diskSE
retDict['TapeSE'] = tapeSE
try:
retDict['TotalCapacityTB'] = float( self.options['TotalCapacityTB'] )
except Exception:
retDict['TotalCapacityTB'] = -1
try:
retDict['DiskCacheTB'] = float( self.options['DiskCacheTB'] )
except Exception:
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
def isValid( self, operation = '' ):
""" check CS/RSS statuses for :operation:
:param str operation: operation name
"""
log = self.log.getSubLogger( 'isValid', True )
log.verbose( "Determining if the StorageElement %s is valid for VO %s" % ( self.name, self.vo ) )
if not self.valid:
log.debug( "Failed to create StorageElement plugins.", self.errorReason )
return S_ERROR( "SE.isValid: Failed to create StorageElement plugins: %s" % self.errorReason )
# Check if the Storage Element is eligible for the user's VO
if 'VO' in self.options and not self.vo in self.options['VO']:
log.debug( "StorageElement is not allowed for VO", self.vo )
return DError( errno.EACCES, "StorageElement.isValid: StorageElement is not allowed for VO" )
log.verbose( "Determining if the StorageElement %s is valid for %s" % ( self.name, operation ) )
if ( not operation ) or ( operation in self.okMethods ):
return S_OK()
# Determine whether the StorageElement is valid for checking, reading, writing
res = self.getStatus()
if not res[ 'OK' ]:
log.debug( "Could not call getStatus", res['Message'] )
return S_ERROR( "SE.isValid could not call the getStatus method" )
checking = res[ 'Value' ][ 'Check' ]
reading = res[ 'Value' ][ 'Read' ]
writing = res[ 'Value' ][ 'Write' ]
removing = res[ 'Value' ][ 'Remove' ]
# Determine whether the requested operation can be fulfilled
if ( not operation ) and ( not reading ) and ( not writing ) and ( not checking ):
log.debug( "Read, write and check access not permitted." )
return DError( errno.EACCES, "SE.isValid: Read, write and check access not permitted." )
# The supplied operation can be 'Read','Write' or any of the possible StorageElement methods.
if ( operation in self.readMethods ) or ( operation.lower() in ( 'read', 'readaccess' ) ):
operation = 'ReadAccess'
elif operation in self.writeMethods or ( operation.lower() in ( 'write', 'writeaccess' ) ):
operation = 'WriteAccess'
elif operation in self.removeMethods or ( operation.lower() in ( 'remove', 'removeaccess' ) ):
operation = 'RemoveAccess'
elif operation in self.checkMethods or ( operation.lower() in ( 'check', 'checkaccess' ) ):
operation = 'CheckAccess'
else:
log.debug( "The supplied operation is not known.", operation )
return DError( DErrno.ENOMETH , "SE.isValid: The supplied operation is not known." )
log.debug( "check the operation: %s " % operation )
# Check if the operation is valid
if operation == 'CheckAccess':
if not reading:
if not checking:
log.debug( "Check access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Check access not currently permitted." )
if operation == 'ReadAccess':
if not reading:
log.debug( "Read access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Read access not currently permitted." )
if operation == 'WriteAccess':
if not writing:
log.debug( "Write access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Write access not currently permitted." )
if operation == 'RemoveAccess':
if not removing:
log.debug( "Remove access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Remove access not currently permitted." )
return S_OK()
def getPlugins( self ):
""" Get the list of all the plugins defined for this Storage Element
"""
self.log.getSubLogger( 'getPlugins' ).verbose( "Obtaining all plugins of %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
allPlugins = self.localPlugins + self.remotePlugins
return S_OK( allPlugins )
def getRemotePlugins( self ):
""" Get the list of all the remote access protocols defined for this Storage Element
"""
self.log.getSubLogger( 'getRemotePlugins' ).verbose( "Obtaining remote protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.remotePlugins )
def getLocalPlugins( self ):
""" Get the list of all the local access protocols defined for this Storage Element
"""
self.log.getSubLogger( 'getLocalPlugins' ).verbose( "Obtaining local protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.localPlugins )
def getStorageParameters( self, plugin ):
""" Get plugin specific options
:param plugin : plugin we are interested in
"""
log = self.log.getSubLogger( 'getStorageParameters' )
log.verbose( "Obtaining storage parameters for %s plugin %s." % ( self.name,
plugin ) )
res = self.getPlugins()
if not res['OK']:
return res
availablePlugins = res['Value']
if not plugin in availablePlugins:
errStr = "Requested plugin not available for SE."
log.debug( errStr, '%s for %s' % ( plugin, self.name ) )
return S_ERROR( errStr )
for storage in self.storages:
storageParameters = storage.getParameters()
if storageParameters['PluginName'] == plugin:
return S_OK( storageParameters )
errStr = "Requested plugin supported but no object found."
log.debug( errStr, "%s for %s" % ( plugin, self.name ) )
return S_ERROR( errStr )
def negociateProtocolWithOtherSE( self, sourceSE, protocols = None ):
""" Negotiate what protocol could be used for a third party transfer
between the sourceSE and ourselves. If protocols is given,
the chosen protocol has to be among those
:param sourceSE : storageElement instance of the sourceSE
:param protocols: protocol restriction list
:return a list protocols that fits the needs, or None
"""
# We should actually separate source and destination protocols
# For example, an SRM can get as a source an xroot or gsiftp url...
# but with the current implementation, we get only srm
destProtocols = set( [destStorage.protocolParameters['Protocol'] for destStorage in self.storages] )
sourceProtocols = set( [sourceStorage.protocolParameters['Protocol'] for sourceStorage in sourceSE.storages] )
commonProtocols = destProtocols & sourceProtocols
if protocols:
protocols = set( list( protocols ) ) if protocols else set()
commonProtocols = commonProtocols & protocols
return S_OK( list( commonProtocols ) )
#################################################################################################
#
# These are the basic get functions for lfn manipulation
#
def __getURLPath( self, url ):
""" Get the part of the URL path below the basic storage path.
This path must coincide with the LFN of the file in order to be compliant with the DIRAC conventions.
"""
log = self.log.getSubLogger( '__getURLPath' )
log.verbose( "Getting path from url in %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
res = pfnparse( url )
if not res['OK']:
return res
fullURLPath = '%s/%s' % ( res['Value']['Path'], res['Value']['FileName'] )
# Check all available storages and check whether the url is for that protocol
urlPath = ''
for storage in self.storages:
res = storage.isNativeURL( url )
if res['OK']:
if res['Value']:
parameters = storage.getParameters()
saPath = parameters['Path']
if not saPath:
# If the sa path doesn't exist then the url path is the entire string
urlPath = fullURLPath
else:
if re.search( saPath, fullURLPath ):
# Remove the sa path from the fullURLPath
urlPath = fullURLPath.replace( saPath, '' )
if urlPath:
return S_OK( urlPath )
# This should never happen. DANGER!!
errStr = "Failed to get the url path for any of the protocols!!"
log.debug( errStr )
return S_ERROR( errStr )
def getLFNFromURL( self, urls ):
""" Get the LFN from the PFNS .
:param lfn : input lfn or lfns (list/dict)
"""
result = checkArgumentFormat( urls )
if result['OK']:
urlDict = result['Value']
else:
errStr = "Supplied urls must be string, list of strings or a dictionary."
self.log.getSubLogger( 'getLFNFromURL' ).debug( errStr )
return DError( errno.EINVAL, errStr )
retDict = { "Successful" : {}, "Failed" : {} }
for url in urlDict:
res = self.__getURLPath( url )
if res["OK"]:
retDict["Successful"][url] = res["Value"]
else:
retDict["Failed"][url] = res["Message"]
return S_OK( retDict )
###########################################################################################
#
# This is the generic wrapper for file operations
#
def getURL( self, lfn, protocol = False, replicaDict = None ):
""" execute 'getTransportURL' operation.
:param str lfn: string, list or dictionary of lfns
:param protocol: if no protocol is specified, we will request self.turlProtocols
:param replicaDict: optional results from the File Catalog replica query
"""
self.log.getSubLogger( 'getURL' ).verbose( "Getting accessUrl %s for lfn in %s." % ( "(%s)" % protocol if protocol else "", self.name ) )
if not protocol:
protocols = self.turlProtocols
elif type( protocol ) is ListType:
protocols = protocol
elif type( protocol ) == type( '' ):
protocols = [protocol]
self.methodName = "getTransportURL"
result = self.__executeMethod( lfn, protocols = protocols )
return result
def __isLocalSE( self ):
""" Test if the Storage Element is local in the current context
"""
self.log.getSubLogger( 'LocalSE' ).verbose( "Determining whether %s is a local SE." % self.name )
import DIRAC
localSEs = getSEsForSite( DIRAC.siteName() )['Value']
if self.name in localSEs:
return S_OK( True )
else:
return S_OK( False )
def __getFileCatalog( self ):
if not self.__fileCatalog:
self.__fileCatalog = FileCatalog( vo = self.vo )
return self.__fileCatalog
def __generateURLDict( self, lfns, storage, replicaDict = {} ):
""" Generates a dictionary (url : lfn ), where the url are constructed
from the lfn using the constructURLFromLFN method of the storage plugins.
:param: lfns : dictionary {lfn:whatever}
:returns dictionary {constructed url : lfn}
"""
log = self.log.getSubLogger( "__generateURLDict" )
log.verbose( "generating url dict for %s lfn in %s." % ( len( lfns ), self.name ) )
urlDict = {} # url : lfn
failed = {} # lfn : string with errors
for lfn in lfns:
if self.useCatalogURL:
# Is this self.name alias proof?
url = replicaDict.get( lfn, {} ).get( self.name, '' )
if url:
urlDict[url] = lfn
continue
else:
fc = self.__getFileCatalog()
result = fc.getReplicas()
if not result['OK']:
failed[lfn] = result['Message']
url = result['Value']['Successful'].get( lfn, {} ).get( self.name, '' )
if not url:
failed[lfn] = 'Failed to get catalog replica'
else:
# Update the URL according to the current SE description
result = returnSingleResult( storage.updateURL( url ) )
if not result['OK']:
failed[lfn] = result['Message']
else:
urlDict[result['Value']] = lfn
else:
result = storage.constructURLFromLFN( lfn, withWSUrl = True )
if not result['OK']:
errStr = result['Message']
log.debug( errStr, 'for %s' % ( lfn ) )
failed[lfn] = "%s %s" % ( failed[lfn], errStr ) if lfn in failed else errStr
else:
urlDict[result['Value']] = lfn
res = S_OK( {'Successful': urlDict, 'Failed' : failed} )
# res['Failed'] = failed
return res
def __executeMethod( self, lfn, *args, **kwargs ):
""" Forward the call to each storage in turn until one works.
The method to be executed is stored in self.methodName
:param lfn : string, list or dictionnary
:param *args : variable amount of non-keyword arguments. SHOULD BE EMPTY
:param **kwargs : keyword arguments
:returns S_OK( { 'Failed': {lfn : reason} , 'Successful': {lfn : value} } )
The Failed dict contains the lfn only if the operation failed on all the storages
The Successful dict contains the value returned by the successful storages.
"""
removedArgs = {}
log = self.log.getSubLogger( '__executeMethod' )
log.verbose( "preparing the execution of %s" % ( self.methodName ) )
# args should normaly be empty to avoid problem...
if len( args ):
log.verbose( "args should be empty!%s" % args )
# because there is normally only one kw argument, I can move it from args to kwargs
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} ).keys()
if len( methDefaultArgs ):
kwargs[methDefaultArgs[0] ] = args[0]
args = args[1:]
log.verbose( "put it in kwargs, but dirty and might be dangerous!args %s kwargs %s" % ( args, kwargs ) )
# We check the deprecated arguments
for depArg in StorageElementItem.__deprecatedArguments:
if depArg in kwargs:
log.verbose( "%s is not an allowed argument anymore. Please change your code!" % depArg )
removedArgs[depArg] = kwargs[depArg]
del kwargs[depArg]
# Set default argument if any
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} )
for argName in methDefaultArgs:
if argName not in kwargs:
log.debug( "default argument %s for %s not present.\
Setting value %s." % ( argName, self.methodName, methDefaultArgs[argName] ) )
kwargs[argName] = methDefaultArgs[argName]
res = checkArgumentFormat( lfn )
if not res['OK']:
errStr = "Supplied lfns must be string, list of strings or a dictionary."
log.debug( errStr )
return res
lfnDict = res['Value']
log.verbose( "Attempting to perform '%s' operation with %s lfns." % ( self.methodName, len( lfnDict ) ) )
res = self.isValid( operation = self.methodName )
if not res['OK']:
return res
else:
if not self.valid:
return S_ERROR( self.errorReason )
successful = {}
failed = {}
localSE = self.__isLocalSE()['Value']
# Try all of the storages one by one
for storage in self.storages:
# Determine whether to use this storage object
storageParameters = storage.getParameters()
if not storageParameters:
log.debug( "Failed to get storage parameters.", "%s %s" % ( self.name, res['Message'] ) )
continue
pluginName = storageParameters['PluginName']
if not lfnDict:
log.debug( "No lfns to be attempted for %s protocol." % pluginName )
continue
if not ( pluginName in self.remotePlugins ) and not localSE and not storage.pluginName == "Proxy":
# If the SE is not local then we can't use local protocols
log.debug( "Local protocol not appropriate for remote use: %s." % pluginName )
continue
log.verbose( "Generating %s protocol URLs for %s." % ( len( lfnDict ), pluginName ) )
replicaDict = kwargs.pop( 'replicaDict', {} )
if storage.pluginName != "Proxy":
res = self.__generateURLDict( lfnDict, storage, replicaDict = replicaDict )
urlDict = res['Value']['Successful'] # url : lfn
failed.update( res['Value']['Failed'] )
else:
urlDict = dict( [ ( lfn, lfn ) for lfn in lfnDict ] )
if not len( urlDict ):
log.verbose( "__executeMethod No urls generated for protocol %s." % pluginName )
else:
log.verbose( "Attempting to perform '%s' for %s physical files" % ( self.methodName, len( urlDict ) ) )
fcn = None
if hasattr( storage, self.methodName ) and callable( getattr( storage, self.methodName ) ):
fcn = getattr( storage, self.methodName )
if not fcn:
return DError( DErrno.ENOMETH, "SE.__executeMethod: unable to invoke %s, it isn't a member function of storage" )
urlsToUse = {} # url : the value of the lfn dictionary for the lfn of this url
for url in urlDict:
urlsToUse[url] = lfnDict[urlDict[url]]
startDate = datetime.datetime.utcnow()
startTime = time.time()
res = fcn( urlsToUse, *args, **kwargs )
elapsedTime = time.time() - startTime
self.addAccountingOperation( urlsToUse, startDate, elapsedTime, storageParameters, res )
if not res['OK']:
errStr = "Completely failed to perform %s." % self.methodName
log.debug( errStr, 'with plugin %s: %s' % ( pluginName, res['Message'] ) )
for lfn in urlDict.values():
if lfn not in failed:
failed[lfn] = ''
failed[lfn] = "%s %s" % ( failed[lfn], res['Message'] ) if failed[lfn] else res['Message']
else:
for url, lfn in urlDict.items():
if url not in res['Value']['Successful']:
if lfn not in failed:
failed[lfn] = ''
if url in res['Value']['Failed']:
self.log.debug( res['Value']['Failed'][url] )
failed[lfn] = "%s %s" % ( failed[lfn], res['Value']['Failed'][url] ) if failed[lfn] else res['Value']['Failed'][url]
else:
errStr = 'No error returned from plug-in'
failed[lfn] = "%s %s" % ( failed[lfn], errStr ) if failed[lfn] else errStr
else:
successful[lfn] = res['Value']['Successful'][url]
if lfn in failed:
failed.pop( lfn )
lfnDict.pop( lfn )
gDataStoreClient.commit()
return S_OK( { 'Failed': failed, 'Successful': successful } )
def __getattr__( self, name ):
""" Forwards the equivalent Storage calls to __executeMethod"""
# We take either the equivalent name, or the name itself
self.methodName = StorageElementItem.__equivalentMethodNames.get( name, None )
if self.methodName:
return self.__executeMethod
raise AttributeError( "StorageElement does not have a method '%s'" % name )
def addAccountingOperation( self, lfns, startDate, elapsedTime, storageParameters, callRes ):
"""
Generates a DataOperation accounting if needs to be, and adds it to the DataStore client cache
:param lfns : list of lfns on which we attempted the operation
:param startDate : datetime, start of the operation
:param elapsedTime : time (seconds) the operation took
:param storageParameters : the parameters of the plugins used to perform the operation
:param callRes : the return of the method call, S_OK or S_ERROR
The operation is generated with the OperationType "se.methodName"
The TransferSize and TransferTotal for directory methods actually take into
account the files inside the directory, and not the amount of directory given
as parameter
"""
if self.methodName not in ( self.readMethods + self.writeMethods + self.removeMethods ):
return
baseAccountingDict = {}
baseAccountingDict['OperationType'] = 'se.%s' % self.methodName
baseAccountingDict['User'] = getProxyInfo().get( 'Value', {} ).get( 'username', 'unknown' )
baseAccountingDict['RegistrationTime'] = 0.0
baseAccountingDict['RegistrationOK'] = 0
baseAccountingDict['RegistrationTotal'] = 0
# if it is a get method, then source and destination of the transfer should be inverted
if self.methodName in ( 'putFile', 'getFile' ):
baseAccountingDict['Destination'] = siteName()
baseAccountingDict[ 'Source'] = self.name
else:
baseAccountingDict['Destination'] = self.name
baseAccountingDict['Source'] = siteName()
baseAccountingDict['TransferTotal'] = 0
baseAccountingDict['TransferOK'] = 0
baseAccountingDict['TransferSize'] = 0
baseAccountingDict['TransferTime'] = 0.0
baseAccountingDict['FinalStatus'] = 'Successful'
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( baseAccountingDict )
oDataOperation.setStartTime( startDate )
oDataOperation.setEndTime( startDate + datetime.timedelta( seconds = elapsedTime ) )
oDataOperation.setValueByKey( 'TransferTime', elapsedTime )
oDataOperation.setValueByKey( 'Protocol', storageParameters.get( 'Protocol', 'unknown' ) )
if not callRes['OK']:
# Everything failed
oDataOperation.setValueByKey( 'TransferTotal', len( lfns ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
else:
succ = callRes.get( 'Value', {} ).get( 'Successful', {} )
failed = callRes.get( 'Value', {} ).get( 'Failed', {} )
totalSize = 0
# We don't take len(lfns) in order to make two
# separate entries in case of few failures
totalSucc = len( succ )
if self.methodName in ( 'putFile', 'getFile' ):
# putFile and getFile return for each entry
# in the successful dir the size of the corresponding file
totalSize = sum( succ.values() )
elif self.methodName in ( 'putDirectory', 'getDirectory' ):
# putDirectory and getDirectory return for each dir name
# a dictionnary with the keys 'Files' and 'Size'
totalSize = sum( val.get( 'Size', 0 ) for val in succ.values() if isinstance( val, dict ) )
totalSucc = sum( val.get( 'Files', 0 ) for val in succ.values() if isinstance( val, dict ) )
oDataOperation.setValueByKey( 'TransferOK', len( succ ) )
oDataOperation.setValueByKey( 'TransferSize', totalSize )
oDataOperation.setValueByKey( 'TransferTotal', totalSucc )
oDataOperation.setValueByKey( 'TransferOK', totalSucc )
if callRes['Value']['Failed']:
oDataOperationFailed = copy.deepcopy( oDataOperation )
oDataOperationFailed.setValueByKey( 'TransferTotal', len( failed ) )
oDataOperationFailed.setValueByKey( 'TransferOK', 0 )
oDataOperationFailed.setValueByKey( 'TransferSize', 0 )
oDataOperationFailed.setValueByKey( 'FinalStatus', 'Failed' )
accRes = gDataStoreClient.addRegister( oDataOperationFailed )
if not accRes['OK']:
self.log.error( "Could not send failed accounting report", accRes['Message'] )
accRes = gDataStoreClient.addRegister( oDataOperation )
if not accRes['OK']:
self.log.error( "Could not send accounting report", accRes['Message'] )
StorageElement = StorageElementCache()
| gpl-3.0 | 3,227,499,265,825,615,000 | 40.34965 | 141 | 0.621681 | false | 4.02017 | false | false | false |
lukasmartinelli/py14 | py14/scope.py | 1 | 2384 | import ast
from contextlib import contextmanager
def add_scope_context(node):
"""Provide to scope context to all nodes"""
return ScopeTransformer().visit(node)
class ScopeMixin(object):
"""
Adds a scope property with the current scope (function, module)
a node is part of.
"""
scopes = []
@contextmanager
def enter_scope(self, node):
if self._is_scopable_node(node):
self.scopes.append(node)
yield
self.scopes.pop()
else:
yield
@property
def scope(self):
try:
return self.scopes[-1]
except IndexError:
return None
def _is_scopable_node(self, node):
scopes = [ast.Module, ast.FunctionDef, ast.For, ast.If, ast.With]
return len([s for s in scopes if isinstance(node, s)]) > 0
class ScopeList(list):
"""
Wraps around list of scopes and provides find method for finding
the definition of a variable
"""
def find(self, lookup):
"""Find definition of variable lookup."""
def is_match(var):
return ((isinstance(var, ast.alias) and var.name == lookup) or
(isinstance(var, ast.Name) and var.id == lookup))
def find_definition(scope, var_attr="vars"):
for var in getattr(scope, var_attr):
if is_match(var):
return var
for scope in self:
defn = find_definition(scope)
if not defn and hasattr(scope, "body_vars"):
defn = find_definition(scope, "body_vars")
if not defn and hasattr(scope, "orelse_vars"):
defn = find_definition(scope, "orelse_vars")
if defn:
return defn
def find_import(self, lookup):
for scope in reversed(self):
if hasattr(scope, "imports"):
for imp in scope.imports:
if imp.name == lookup:
return imp
class ScopeTransformer(ast.NodeTransformer, ScopeMixin):
"""
Adds a scope attribute to each node.
The scope contains the current scope (function, module, for loop)
a node is part of.
"""
def visit(self, node):
with self.enter_scope(node):
node.scopes = ScopeList(self.scopes)
return super(ScopeTransformer, self).visit(node)
| mit | 1,515,808,432,834,027,300 | 28.432099 | 74 | 0.570889 | false | 4.219469 | false | false | false |
faroit/loudness | python/tests/test_FrameGenerator.py | 1 | 1471 | import numpy as np
import loudness as ln
fs = 32000
N = 10000
x = np.arange(0, N)
# Input SignalBank
bufSize = 32
nEars = 2
nChannels = 1
inputBank = ln.SignalBank()
inputBank.initialize(nEars, nChannels, bufSize, int(fs))
# Frame generator
frameSize = 2048
hopSize = 32
startAtWindowCentre = True
frameGen = ln.FrameGenerator(frameSize, hopSize, startAtWindowCentre)
frameGen.initialize(inputBank)
outputBank = frameGen.getOutput()
nBlocks = int(x.size / bufSize)
if startAtWindowCentre:
nProcessedBlocks = int(nBlocks - 0.5 * frameSize / hopSize + 1)
else:
nProcessedBlocks = int(nBlocks - frameSize / hopSize + 1)
frames = np.zeros((nEars, nProcessedBlocks, frameSize))
frameIndex = 0
for block in range(nBlocks):
# Update input bank
idx = block * bufSize
inputBank.setSignal(0, 0, x[idx:idx + bufSize])
inputBank.setSignal(1, 0, x[idx:idx + bufSize])
# process it
frameGen.process(inputBank)
# get output
if(outputBank.getTrig()):
frames[:, frameIndex, :] = outputBank.getSignals().reshape((2, frameSize))
frameIndex += 1
# Check frames are correct
if startAtWindowCentre:
x = np.hstack((np.zeros(np.ceil((frameSize - 1) / 2.0)), x))
for ear in range(nEars):
for i, frame in enumerate(frames[ear]):
start = i * hopSize
if all(frame == x[start:start + frameSize]):
print("Frame number %d correct" % i)
else:
print("Frame number %d incorrect" % i)
| gpl-3.0 | -7,065,117,697,718,872,000 | 25.745455 | 82 | 0.674371 | false | 3.149893 | false | false | false |
robinkraft/cloudless | src/cloudless/train/predict.py | 1 | 7335 | import os
import caffe
import numpy as np
import plyvel
import skimage
from caffe_pb2 import Datum
import constants
def predict(image_path):
"""
Takes a single image, and makes a prediction whether it has a cloud or not.
"""
print "Generating prediction for %s..." % image_path
net, transformer = _initialize_caffe()
im = caffe.io.load_image(image_path)
prob = _predict_image(im, net, transformer)
print "Probability this image has a cloud: {0:.2f}%".format(prob)
def test_validation():
"""
Takes validation images and runs them through a trained model to see how
well they do. Generates statistics like precision and recall, F1, and a confusion matrix,
in order to gauge progress.
"""
print "Generating predictions for validation images..."
validation_data = _load_validation_data()
target_details = _run_through_caffe(validation_data)
statistics = _calculate_positives_negatives(target_details)
accuracy = _calculate_accuracy(statistics)
precision = _calculate_precision(statistics)
recall = _calculate_recall(statistics)
f1 = _calculate_f1(precision, recall)
# TODO: Write these out to a file as well as the screen.
results = ""
results += "\n"
results += "\nStatistics on validation dataset using threshold %f:" % constants.THRESHOLD
results += "\n\tAccuracy: {0:.2f}%".format(accuracy)
results += "\n\tPrecision: %.2f" % precision
results += "\n\tRecall: %.2f" % recall
results += "\n\tF1 Score: %.2f" % f1
results += "\n"
results += _print_confusion_matrix(statistics)
print results
with open(constants.OUTPUT_LOG_PREFIX + ".statistics.txt", "w") as f:
f.write(results)
def _load_validation_data():
"""
Loads all of our validation data from our leveldb database, producing unrolled numpy input
vectors ready to test along with their correct, expected target values.
"""
print "\tLoading validation data..."
input_vectors = []
expected_targets = []
db = plyvel.DB(constants.VALIDATION_FILE)
for key, value in db:
datum = Datum()
datum.ParseFromString(value)
data = np.fromstring(datum.data, dtype=np.uint8)
data = np.reshape(data, (3, constants.HEIGHT, constants.WIDTH))
# Move the color channel to the end to match what Caffe wants.
data = np.swapaxes(data, 0, 2) # Swap channel with width.
data = np.swapaxes(data, 0, 1) # Swap width with height, to yield final h x w x channel.
input_vectors.append(data)
expected_targets.append(datum.label)
db.close()
print "\t\tValidation data has %d images" % len(input_vectors)
return {
"input_vectors": np.asarray(input_vectors),
"expected_targets": np.asarray(expected_targets)
}
def _initialize_caffe():
"""
Initializes Caffe to prepare to run some data through the model for inference.
"""
caffe.set_mode_gpu()
net = caffe.Net(constants.DEPLOY_FILE, constants.WEIGHTS_FINETUNED, caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
# PIL.Image loads the data with the channel last.
# TODO: Think through whether these should be BGR during training and validation.
transformer.set_transpose("data", (2, 0, 1))
# Mean pixel.
transformer.set_mean("data", np.load(constants.TRAINING_MEAN_PICKLE).mean(1).mean(1))
# The reference model operates on images in [0, 255] range instead of [0, 1].
transformer.set_raw_scale("data", 255)
# The reference model has channels in BGR order instead of RGB.
transformer.set_channel_swap("data", (2, 1, 0))
net.blobs["data"].reshape(1, 3, constants.INFERENCE_HEIGHT, constants.INFERENCE_WIDTH)
return (net, transformer)
def _run_through_caffe(validation_data):
"""
Runs our validation images through Caffe.
"""
print "\tInitializing Caffe..."
net, transformer = _initialize_caffe()
print "\tComputing probabilities using Caffe..."
results = []
for idx in range(len(validation_data["input_vectors"])):
im = validation_data["input_vectors"][idx]
prob = _predict_image(im, net, transformer)
expected_target = validation_data["expected_targets"][idx]
predicted_target = 0
if prob >= constants.THRESHOLD:
predicted_target = 1
results.append({
"expected_target": expected_target,
"predicted_target": predicted_target
})
return results
def _predict_image(im, net, transformer):
"""
Given a caffe.io.load_image, returns the probability that it contains a cloud.
"""
net.blobs["data"].data[...] = transformer.preprocess("data", im)
out = net.forward()
probs = out["prob"][0]
prob_cloud = probs[1] * 100.0
return prob_cloud
def _calculate_positives_negatives(target_details):
"""
Takes expected and actual target values, generating true and false positives and negatives,
including the actual correct # of positive and negative values.
"""
true_positive = 0
true_negative = 0
false_negative = 0
false_positive = 0
actual_positive = 0
actual_negative = 0
for idx in range(len(target_details)):
predicted_target = target_details[idx]["predicted_target"]
expected_target = target_details[idx]["expected_target"]
if expected_target == 1:
actual_positive = actual_positive + 1
else:
actual_negative = actual_negative + 1
if predicted_target == 1 and expected_target == 1:
true_positive = true_positive + 1
elif predicted_target == 0 and expected_target == 0:
true_negative = true_negative + 1
elif predicted_target == 1 and expected_target == 0:
false_positive = false_positive + 1
elif predicted_target == 0 and expected_target == 1:
false_negative = false_negative + 1
return {
"true_positive": float(true_positive),
"false_positive": float(false_positive),
"actual_positive": float(actual_positive),
"true_negative": float(true_negative),
"false_negative": float(false_negative),
"actual_negative": float(actual_negative),
}
def _calculate_accuracy(s):
top = (s["true_positive"] + s["true_negative"])
bottom = (s["actual_positive"] + s["actual_negative"])
return (top / bottom) * 100.0
def _calculate_precision(s):
return s["true_positive"] / (s["true_positive"] + s["false_positive"])
def _calculate_recall(s):
return s["true_positive"] / (s["true_positive"] + s["false_negative"])
def _calculate_f1(precision, recall):
return 2.0 * ((precision * recall) / (precision + recall))
def _print_confusion_matrix(s):
results = ""
results += "\nConfusion matrix:"
results += "\n\t\t\t\tPositive\t\tNegative"
results += "\nPositive (%d)\t\t\tTrue Positive (%d)\tFalse Positive (%d)" % \
(s["actual_positive"], s["true_positive"], s["false_positive"])
results += "\nNegative (%d)\t\t\tFalse Negative (%d)\tTrue Negative (%d)" % \
(s["actual_negative"], s["false_negative"], s["true_negative"])
return results
| apache-2.0 | -1,540,303,570,774,137,900 | 33.763033 | 96 | 0.647307 | false | 3.79462 | false | false | false |
praveenkumar/ansible | lib/ansible/plugins/strategies/linear.py | 1 | 14325 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
display.debug("counting tasks in each state of execution")
for (k, v) in iteritems(host_tasks):
if v is None:
continue
(s, t) = v
if t is None:
continue
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
self._display.debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator)
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
self._display.debug("done getting variables")
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = unicode(templar.template(task.name, fail_on_undefined=False))
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
# go to next host/task group
if skip_rest:
continue
self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
self._display.debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError as e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
except (IOError, EOFError) as e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 | 7,524,632,294,346,595,000 | 45.209677 | 179 | 0.548063 | false | 4.776592 | false | false | false |
The-Compiler/qutebrowser | tests/helpers/messagemock.py | 1 | 2576 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""pytest helper to monkeypatch the message module."""
import logging
import attr
import pytest
from qutebrowser.utils import usertypes, message
@attr.s
class Message:
"""Information about a shown message."""
level = attr.ib()
text = attr.ib()
class MessageMock:
"""Helper object for message_mock.
Attributes:
Message: A object representing a message.
messages: A list of Message objects.
"""
def __init__(self):
self.messages = []
def _record_message(self, level, text):
log_levels = {
usertypes.MessageLevel.error: logging.ERROR,
usertypes.MessageLevel.info: logging.INFO,
usertypes.MessageLevel.warning: logging.WARNING,
}
log_level = log_levels[level]
logging.getLogger('messagemock').log(log_level, text)
self.messages.append(Message(level, text))
def getmsg(self, level=None):
"""Get the only message in self.messages.
Raises ValueError if there are multiple or no messages.
Args:
level: The message level to check against, or None.
"""
assert len(self.messages) == 1
msg = self.messages[0]
if level is not None:
assert msg.level == level
return msg
def patch(self):
"""Start recording messages."""
message.global_bridge.show_message.connect(self._record_message)
message.global_bridge._connected = True
def unpatch(self):
"""Stop recording messages."""
message.global_bridge.show_message.disconnect(self._record_message)
@pytest.fixture
def message_mock():
"""Fixture to get a MessageMock."""
mmock = MessageMock()
mmock.patch()
yield mmock
mmock.unpatch()
| gpl-3.0 | 6,713,423,093,732,650,000 | 27 | 75 | 0.667314 | false | 4.075949 | false | false | false |
mholgatem/GPIOnext | config/menus.py | 1 | 4183 | import time
from config.constants import *
from config import SQL
from cursesmenu import *
from cursesmenu.items import *
import curses
'''
---------------------------------------------------------
This script handles menu navigation
RETURNS: dictionary containing device name,
number of buttons, number of axis
---------------------------------------------------------
'''
GOTO_MAIN = -999
def close():
if CursesMenu.stdscr != None:
CursesMenu().exit()
def clearPreviousMenu():
# clear any previous menus
if CursesMenu.stdscr != None:
CursesMenu.stdscr.erase()
def showMainMenu():
global currentDevice
clearPreviousMenu()
currentDevice = {'name': None,
'axisCount': 0,
'buttons': 0}
options = DEVICE_LIST + ['Clear Device']
choice = SelectionMenu.get_selection(
strings = options,
title = 'GPIOnext Config',
subtitle = 'Which virtual device do you want to CONFIGURE?'
)
try:
currentDevice['name'] = options [ choice ]
except IndexError: # user selected 'Exit'
return None
if currentDevice['name'] == 'Clear Device':
return clearDevice()
elif currentDevice['name']== 'Keyboard':
title = 'Select the keys that you want to assign'
return selectFromList( KEY_LIST, title )
elif currentDevice['name'] == 'Commands':
return currentDevice
else:
return getJoyAxisCount()
def clearDevice():
clearPreviousMenu()
options = DEVICE_LIST + ['← Return to Main Menu']
choice = SelectionMenu.get_selection(
strings = options,
title = 'CLEAR DEVICE',
subtitle = 'Remove configs for which device?',
exit_option = False
)
currentDevice['name'] = options[choice]
if currentDevice['name'] == '← Return to Main Menu':
return GOTO_MAIN
else:
clearPreviousMenu()
print( 'Deleting config files for {0}...'.format( currentDevice['name'] ))
SQL.deleteDevice( currentDevice['name'] )
time.sleep(1)
return clearDevice()
def getJoyAxisCount( ):
global currentDevice
clearPreviousMenu()
axisList = ['0','1','2','3','4','← Return to Main Menu']
dpadCount = SelectionMenu.get_selection(
strings = axisList,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'How many Dpads/Joysticks does this controller have?',
exit_option = False
)
currentDevice['axisCount'] = dpadCount
# if Return to Main Menu
if dpadCount == 5:
return GOTO_MAIN
else:
title = 'Select the buttons that you want to assign'
return selectFromList( BUTTON_LIST, title)
def editCommandButton():
global currentDevice
cmdList = SQL.getDeviceRaw( 'Commands' )
entries = [ '• Edit Command: {0}'.format( x['name'] ) for x in cmdList ]
entries.insert( 0, '• Add New Command' )
entries.append( '← Return to Main Menu' )
edit = 2
while edit == 2:
clearPreviousMenu()
choice = SelectionMenu.get_selection(
strings = entries,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'Select a command to edit',
exit_option = False
)
if choice == 0:
return ( 'EDIT', {'command':'', 'pins': None, 'id': None, 'device': None, 'name': '', 'type':'COMMAND' } )
elif choice == len( entries ) - 1:
return GOTO_MAIN
clearPreviousMenu()
edit = SelectionMenu.get_selection(
strings = ['Edit', 'Delete', '← Go Back' ],
title = 'Configuring {0}'.format( cmdList[ choice - 1 ]['name'] ),
subtitle = 'Edit or Delete this command?',
exit_option = False
)
edit = 'EDIT' if edit == 0 else 'DELETE'
return ( edit, cmdList[ choice - 1 ] )
def selectFromList( currentList, title ):
global currentDevice
buttonNames = [ b[0] for b in currentList ]
buttonNames.append( '← Return to Main Menu' )
# returns list of buttons to configure
choice = MultiSelect.get_selection(
strings = buttonNames,
title = title,
exit_option = False
)
# return to main menu
if choice == [-1]:
return GOTO_MAIN
chosenButtons = [b for b in currentList if b[0] in choice]
currentDevice['buttons'] = chosenButtons
return currentDevice
| mit | -1,539,677,668,419,638,800 | 27.346939 | 109 | 0.63067 | false | 3.423993 | true | false | false |
mahim97/zulip | zerver/management/commands/set_default_streams.py | 8 | 1912 |
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any, Dict, Text
from zerver.lib.actions import set_default_streams
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Set default streams for a realm
Users created under this realm will start out with these streams. This
command is not additive: if you re-run it on a realm with a different
set of default streams, those will be the new complete set of default
streams.
For example:
./manage.py set_default_streams --realm=foo --streams=foo,bar,baz
./manage.py set_default_streams --realm=foo --streams="foo,bar,baz with space"
./manage.py set_default_streams --realm=foo --streams=
"""
# Fix support for multi-line usage
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-s', '--streams',
dest='streams',
type=str,
help='A comma-separated list of stream names.')
self.add_realm_args(parser, True)
def handle(self, **options: str) -> None:
realm = self.get_realm(options)
if options["streams"] is None:
print("Please provide a default set of streams (which can be empty,\
with `--streams=`).", file=sys.stderr)
exit(1)
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
stream_dict = {
stream.strip(): {"description": stream.strip(), "invite_only": False}
for stream in options["streams"].split(",")
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(realm, stream_dict)
| apache-2.0 | -3,494,797,348,858,399,000 | 36.490196 | 81 | 0.648536 | false | 4.094218 | false | false | false |
endlessm/chromium-browser | third_party/shaderc/src/glslc/test/option_dash_M.py | 3 | 33038 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
import sys
from environment import File, Directory
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader
from glslc_test_framework import GlslCTest
MINIMAL_SHADER = '#version 140\nvoid main() {}'
EMPTY_SHADER_IN_CURDIR = Directory('.', [File('shader.vert', MINIMAL_SHADER)])
EMPTY_SHADER_IN_SUBDIR = Directory('subdir',
[File('shader.vert', MINIMAL_SHADER)])
def process_test_specified_dependency_info_rules(test_specified_rules):
"""A helper function to process the expected dependency info rules
specified in tests before checking the actual dependency rule output.
This is required because the filename and path of temporary files created
through FileShader is unknown at the time the expected dependency info rules
are declared.
Note this function process the given rule list in-place.
"""
for rule in test_specified_rules:
# If the 'target' value is not a hard-coded file name but a
# FileShader, we need its full path, append extension to it and
# strip the directory component from it to get the complete target
# name.
if isinstance(rule['target'], FileShader):
rule['target'] = rule['target'].filename
if 'target_extension' in rule:
if rule['target_extension'] is not None:
rule['target'] = rule['target'] + rule['target_extension']
rule.pop('target_extension')
rule['target'] = os.path.basename(rule['target'])
# The dependency set may have FileShader too, we need to replace
# them with their absolute paths.
dependent_file_name_set = set()
for dependent_file in rule['dependency']:
if isinstance(dependent_file, FileShader):
dependent_file_name_set.add(dependent_file.filename)
else:
dependent_file_name_set.add(dependent_file)
rule['dependency'] = dependent_file_name_set
def parse_text_rules(text_lines):
""" A helper function to read text lines and construct and returns a list of
dependency rules which can be used for comparison.
The list is built with the text order. Each rule is described in the
following way:
{'target': <target name>, 'dependency': <set of dependent filenames>}
"""
rules = []
for line in text_lines:
if line.strip() == "":
continue
rule = {'target': line.split(': ')[0].strip(),
'dependency': set(line.split(': ')[-1].strip().split(' '))}
rules.append(rule)
return rules
class DependencyInfoStdoutMatch(GlslCTest):
"""Mixin class for tests that can expect dependency info in Stdout.
To mix in this class, the subclass needs to provide
dependency_rules_expected as a list of dictionaries, each dictionary
describes one expected make rule for a target file. A expected rule should
be specified in the following way:
rule = {'target': <target name>,
'target_extension': <.spv, .spvasm or None>,
'dependency': <dependent file names>}
The 'target_extension' field is optional, its value will be appended to
'target' to get complete target name.
And the list 'dependency_rules_expected' is a list of such rules and the
order of the rules does matter.
"""
def check_stdout_dependency_info(self, status):
if not status.stdout:
return False, 'Expect dependency rules on stdout'
if sys.version_info[0] == 2:
rules = parse_text_rules(status.stdout.decode('utf-8').split('\n'))
elif sys.version_info[0] == 3:
rules = parse_text_rules(str(status.stdout,
encoding='utf-8',
errors='ignore').split('\n'))
process_test_specified_dependency_info_rules(
self.dependency_rules_expected)
if self.dependency_rules_expected != rules:
return False, ('Incorrect dependency info:\n{ac_rules}\n'
'Expected:\n{ex_rules}\n'
'Stdout output:\n{ac_stdout}\n'.format(
ac_rules=rules,
ex_rules=self.dependency_rules_expected,
ac_stdout=status.stdout))
return True, ''
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathNoInclude(DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in relative path.
e.g. glslc -M shader.vert
=> shader.vert.spv: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert']
dependency_rules_expected = [{'target': "shader.vert.spv",
'dependency': {"shader.vert"}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputAbsolutePathNoInclude(DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in absolute path.
e.g. glslc -M /usr/local/shader.vert
=> shader.vert.spv: /usr/local/shader.vert
"""
shader = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-M', shader]
dependency_rules_expected = [{'target': shader,
'target_extension': '.spv',
'dependency': {shader}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does contain #include and is
represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert b.vert
"""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b.vert"\nvoid main(){}\n'),
File('b.vert', 'void foo(){}\n'),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithIncludeSubdir(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does #include another file in a
subdirectory of current directory and is represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert include/b.vert
"""
environment = Directory('.', [
File('a.vert', ('#version 140\n#include "include/b.vert"'
'\nvoid main(){}\n')),
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'include/b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithDashI(DependencyInfoStdoutMatch):
"""Tests -M with single input file works with -I option. The #include
directive does not specify 'include/' for the file to be include.
e.g. glslc -M a.vert -I include
=> a.vert.spv: a.vert include/b.vert
"""
environment = Directory('.', [
File('a.vert', ('#version 140\n#include "b.vert"'
'\nvoid main(){}\n')),
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
glslc_args = ['-M', 'a.vert', '-I', 'include']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'include/b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithNestedInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file under nested #include case. The input file
is represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert b.vert c.vert
"""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b.vert"\nvoid main(){}\n'),
File('b.vert', 'void foo(){}\n#include "c.vert"\n'),
File('c.vert', 'void bar(){}\n'),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency':
{'a.vert', 'b.vert', 'c.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMultipleInputRelativePathNoInclude(
DependencyInfoStdoutMatch):
"""Tests -M with multiple input file which don't contain #include and are
represented in relative paths.
e.g. glslc -M a.vert b.vert
=> a.vert.spv: a.vert
b.vert.spv: b.vert
"""
environment = Directory('.', [
File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER),
])
glslc_args = ['-M', 'a.vert', 'b.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert'}},
{'target': 'b.vert.spv',
'dependency': {'b.vert'}}, ]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMultipleInputAbsolutePathNoInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in absolute path.
e.g. glslc -M /usr/local/a.vert /usr/local/b.vert
=> a.vert.spv: /usr/local/a.vert
b.vert.spv: /usr/local/b.vert
"""
shader_a = FileShader(MINIMAL_SHADER, '.vert')
shader_b = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-M', shader_a, shader_b]
dependency_rules_expected = [{'target': shader_a,
'target_extension': '.spv',
'dependency': {shader_a}},
{'target': shader_b,
'target_extension': '.spv',
'dependency': {shader_b}}, ]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDashCapMT(DependencyInfoStdoutMatch):
"""Tests -MT works with -M. User can specify the target object name in the
generated dependency info.
e.g. glslc -M shader.vert -MT target
=> target: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert', '-MT', 'target']
dependency_rules_expected = [{'target': 'target',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMInputAbsolutePathWithInclude(DependencyInfoStdoutMatch):
"""Tests -M have included files represented in absolute paths when the input
file is represented in absolute path.
E.g. Assume a.vert has '#include "b.vert"'
glslc -M /usr/local/a.vert
=> a.vert.spv: /usr/local/a.vert /usr/local/b.vert
"""
environment = Directory('.', [File('b.vert', 'void foo(){}\n')])
shader_main = FileShader(
'#version 140\n#include "b.vert"\nvoid main(){}\n', '.vert')
glslc_args = ['-M', shader_main]
dependency_rules_expected = [{
'target': shader_main,
'target_extension': '.spv',
'dependency': {shader_main}
# The dependency here is not complete. we can not get the absolute path
# of b.vert here. It will be added in check_stdout_dependency_info()
}]
def check_stdout_dependency_info(self, status):
# Add the absolute path of b.vert to the dependency set
self.dependency_rules_expected[0]['dependency'].add(os.path.dirname(
self.shader_main.filename) + '/b.vert')
return DependencyInfoStdoutMatch.check_stdout_dependency_info(self,
status)
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputAbsolutePathWithIncludeSubdir(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does #include another file in a
subdirectory of current directory and is represented in absolute path.
e.g. glslc -M /usr/local/a.vert
=> a.vert.spv: /usr/local/a.vert /usr/local/include/b.vert
"""
environment = Directory('.', [
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
shader_main = FileShader('#version 140\n#include "include/b.vert"\n',
'.vert')
glslc_args = ['-M', shader_main]
dependency_rules_expected = [{
'target': shader_main,
'target_extension': '.spv',
'dependency': {shader_main}
# The dependency here is not complete. we can not get the absolute
# path of include/b.vert here. It will be added in
# check_stdout_dependency_info()
}]
def check_stdout_dependency_info(self, status):
# Add the absolute path of include/b.vert to the dependency set
self.dependency_rules_expected[0]['dependency'].add(os.path.dirname(
self.shader_main.filename) + '/include/b.vert')
return DependencyInfoStdoutMatch.check_stdout_dependency_info(self,
status)
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMOverridesOtherModes(DependencyInfoStdoutMatch):
"""Tests -M overrides other compiler mode options, includeing -E, -c and -S.
"""
environment = Directory('.', [
File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER),
])
glslc_args = ['-M', '-E', '-c', '-S', 'a.vert', 'b.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert'}},
{'target': 'b.vert.spv',
'dependency': {'b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMEquivalentToCapM(DependencyInfoStdoutMatch):
"""Tests that -MM behaves as -M.
e.g. glslc -MM shader.vert
=> shader.vert.spv: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MM', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMImpliesDashCapE(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -E, a .glsl file without an explict stage should
not generate an error.
e.g. glslc -M shader.glsl
=> shader.spv: shader.glsl
<no error message should be generated>
"""
environment = Directory('.', [File('shader.glsl', MINIMAL_SHADER)])
glslc_args = ['-M', 'shader.glsl']
dependency_rules_expected = [{'target': 'shader.spv',
'dependency': {'shader.glsl'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMImpliesDashW(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -w, a deprecated attribute should not generate
warning message.
e.g. glslc -M shader.vert
=> shader.vert.spv: shader.vert
<no warning message should be generated>
"""
environment = Directory('.', [File(
'shader.vert', """#version 400
layout(location=0) attribute float x;
void main() {}""")])
glslc_args = ['-M', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMImpliesDashCapE(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -E, a .glsl file without an explict stage should
not generate an error.
e.g. glslc -MM shader.glsl
=> shader.spv: shader.glsl
<no error message should be generated>
"""
environment = Directory('.', [File('shader.glsl', MINIMAL_SHADER)])
glslc_args = ['-MM', 'shader.glsl']
dependency_rules_expected = [{'target': 'shader.spv',
'dependency': {'shader.glsl'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMImpliesDashW(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -MM implies -w, a deprecated attribute should not generate
warning message.
e.g. glslc -MM shader.vert
=> shader.vert.spv: shader.vert
<no warning message should be generated>
"""
environment = Directory('.', [File(
'shader.vert', """
#version 400
layout(location = 0) attribute float x;
void main() {}""")])
glslc_args = ['-MM', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMD(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MD generates dependency info file and compilation output.
e.g. glslc -MD shader.vert
=> <a.spv: valid SPIR-V object file>
=> <shader.vert.spv.d: dependency info>
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert']
expected_object_filenames = ('a.spv', )
target_filename = 'shader.vert.spv.d'
expected_file_contents = ['shader.vert.spv: shader.vert\n']
class DependencyInfoFileMatch(GlslCTest):
"""Mixin class for tests that can expect dependency info files.
To mix in this class, subclasses need to provide dependency_info_filenames
and dependency_info_files_expected_contents which are two lists.
list dependency_info_filenames contains the dependency info file names and
list dependency_info_files_expected_contents contains the expected matching
dependency rules.
The item order of the two lists should match, which means:
dependency_info_files_expected_contents[i] should describe the
dependency rules saved in dependency_info_filenames[i]
The content of each dependency info file is described in same 'list of dict'
structure explained in class DependencyInfoStdoutMatch's doc string.
"""
def check_dependency_info_files(self, status):
dep_info_files = \
[os.path.join(status.directory,
f) for f in self.dependency_info_filenames]
for i, df in enumerate(dep_info_files):
if not os.path.isfile(df):
return False, 'Cannot find file: ' + df
try:
with open(df, 'r') as dff:
content = dff.read()
rules = parse_text_rules(content.split('\n'))
process_test_specified_dependency_info_rules(
self.dependency_info_files_expected_contents[i])
if self.dependency_info_files_expected_contents[
i] != rules:
return False, (
'Incorrect dependency info:\n{ac_rules}\n'
'Expected:\n{ex_rules}\n'
'Incorrect file output:\n{ac_out}\n'
'Incorrect dependency info file:\n{ac_file}\n'.format(
ac_rules=rules,
ex_rules=self.dependency_rules_expected,
ac_stdout=content,
ac_file=df))
except IOError:
return False, ('Could not open dependency info file ' + df +
' for reading')
return True, ''
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMWorksWithDashO(DependencyInfoFileMatch):
"""Tests -M works with -o option. When user specifies an output file name
with -o, the dependency info should be dumped to the user specified output
file.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert', '-o', 'dep_info']
dependency_info_filenames = ('dep_info', )
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append(
[{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFile(expect.ValidNamedObjectFile,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files.
e.g. glslc -MD a.vert b.vert -c
=> <a.vert.spv: valid SPIR-V object file>
=> <a.vert.spv.d: dependency info: "a.vert.spv: a.vert">
=> <b.vert.spv: valid SPIR-V object file>
=> <b.vert.spv.d: dependency info: "b.vert.spv: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-c']
expected_object_filenames = ('a.vert.spv', 'b.vert.spv', )
dependency_info_filenames = ['a.vert.spv.d', 'b.vert.spv.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spv',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spv',
'dependency': {'b.vert'}}
])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFilePreprocessingOnlyMode(expect.StdoutMatch,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files in
preprocessing only mode.
e.g. glslc -MD a.vert b.vert -E
=> stdout: preprocess result of a.vert and b.vert
=> <a.vert.spv.d: dependency info: "a.vert.spv: a.vert">
=> <b.vert.spv.d: dependency info: "b.vert.spv: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-E']
dependency_info_filenames = ['a.vert.spv.d', 'b.vert.spv.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spv',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spv',
'dependency': {'b.vert'}}
])
expected_stdout = ("#version 140\nvoid main(){ }\n"
"#version 140\nvoid main(){ }\n")
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFileDisassemblyMode(expect.ValidNamedAssemblyFile,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files in
disassembly mode.
e.g. glslc -MD a.vert b.vert -S
=> <a.vert.spvasm: valid SPIR-V assembly file>
=> <a.vert.spvasm.d: dependency info: "a.vert.spvasm: a.vert">
=> <b.vert.spvasm: valid SPIR-V assembly file>
=> <b.vert.spvasm.d: dependency info: "b.vert.spvasm: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-S']
expected_assembly_filenames = ('a.vert.spvasm', 'b.vert.spvasm', )
dependency_info_filenames = ['a.vert.spvasm.d', 'b.vert.spvasm.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spvasm',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spvasm',
'dependency': {'b.vert'}}
])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMT(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MT generates dependency info file with specified target label.
e.g. glslc -MD shader.vert -MT target_label
=> <a.spv: valid SPIR-V object file>
=> <shader.vert.spv.d: dependency info: "target_label: shader.vert">
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert', '-MT', 'target_label']
expected_object_filenames = ('a.spv', )
target_filename = 'shader.vert.spv.d'
expected_file_contents = ['target_label: shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMF(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MF dumps dependency info into specified file.
e.g. glslc -MD shader.vert -MF dep_file
=> <a.spv: valid SPIR-V object file>
=> <dep_file: dependency info: "shader.vert.spv: shader.vert">
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert', '-MF', 'dep_file']
expected_object_filenames = ('a.spv', )
target_filename = 'dep_file'
expected_file_contents = ['shader.vert.spv: shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDSpecifyOutputFileName(expect.ValidFileContents,
expect.ValidNamedObjectFile):
"""Tests that -MD has the default dependency info file name and target
label correct when -o <output_file_name> appears in the command line.
The default dependency info file name and target label should be deduced
from the linking-disabled compilation output.
e.g. glslc -MD subdir/shader.vert -c -o output
=> <./output: valid SPIR-V object file>
=> <./output.d: dependency info: "output: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-c', '-o', 'output']
expected_object_filenames = ('output', )
target_filename = 'output.d'
expected_file_contents = ['output: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDWithDashMFDashMTDashO(expect.ValidFileContents,
expect.ValidNamedObjectFile):
"""Tests that -MD, -MF, -MT and -o gernates dependency info file and
compilation output file correctly
e.g. glslc -MD subdir/shader.vert -c -o subdir/out -MF dep_info -MT label
=> <subdir/out: valid SPIR-V object file>
=> <dep_info: dependency info: "label: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-c', '-o', 'subdir/out', '-MF',
'dep_info', '-MT', 'label']
expected_object_filenames = ('subdir/out', )
target_filename = 'dep_info'
expected_file_contents = ['label: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDWithDashMFDashMTDashODisassemblyMode(
expect.ValidFileContents, expect.ValidNamedAssemblyFile):
"""Tests that -MD, -MF, -MT and -o gernates dependency info file and
compilation output file correctly in disassembly mode
e.g. glslc -MD subdir/shader.vert -s -o subdir/out -MF dep_info -MT label
=> <subdir/out: valid SPIR-V object file>
=> <dep_info: dependency info: "label: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-S', '-o', 'subdir/out', '-MF',
'dep_info', '-MT', 'label']
expected_assembly_filenames = ('subdir/out', )
target_filename = 'dep_info'
expected_file_contents = ['label: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorSetBothDashCapMAndDashCapMD(expect.StderrMatch):
"""Tests that when both -M (or -MM) and -MD are specified, glslc should exit
with an error message complaining the case and neither dependency info
output nor compilation output. This test has -MD before -M flag.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', '-M', 'shader.vert']
expected_stderr = ['glslc: error: both -M (or -MM) and -MD are specified. '
'Only one should be used at one time.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorSetBothDashCapMDAndDashCapM(expect.StderrMatch):
"""Tests that when both -M (or -MM) and -MD are specified, glslc should exit
with an error message complaining the case and neither dependency info
output nor compilation output. This test has -M before -MD flag.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', '-MD', 'shader.vert']
expected_stderr = ['glslc: error: both -M (or -MM) and -MD are specified. '
'Only one should be used at one time.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMFWithMultipleInputFiles(expect.StderrMatch):
"""Tests that when -MF option is specified, only one input file should be
provided."""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-c', '-MF', 'dep_info']
expected_stderr = ['glslc: error: '
'to specify dependency info file name or dependency '
'info target, only one input file is allowed.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMTWithMultipleInputFiles(expect.StderrMatch):
"""Tests that when -MT option is specified, only one input file should be
provided."""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-M', 'a.vert', 'b.vert', '-c', '-MT', 'target']
expected_stderr = ['glslc: error: '
'to specify dependency info file name or dependency '
'info target, only one input file is allowed.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMFMissingDashMAndDashMD(expect.StderrMatch):
"""Tests that when only -MF is specified while -M and -MD are not specified,
glslc should emit an error complaining that the user must specifiy either
-M (-MM) or -MD to generate dependency info.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MF', 'dep_info', 'shader.vert', '-c']
expected_stderr = ['glslc: error: '
'to generate dependencies you must specify either -M '
'(-MM) or -MD\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMTMissingDashMAndMDWith(expect.StderrMatch):
"""Tests that when only -MF and -MT is specified while -M and -MD are not
specified, glslc should emit an error complaining that the user must
specifiy either -M (-MM) or -MD to generate dependency info.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MF', 'dep_info', '-MT', 'target', 'shader.vert', '-c']
expected_stderr = ['glslc: error: '
'to generate dependencies you must specify either -M '
'(-MM) or -MD\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorMissingDependencyInfoFileName(expect.StderrMatch):
"""Tests that dependency file name is missing when -MF is specified."""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['target', 'shader.vert', '-c', '-MF']
expected_stderr = ['glslc: error: '
'missing dependency info filename after \'-MF\'\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorMissingDependencyTargetName(expect.StderrMatch):
"""Tests that dependency target name is missing when -MT is specified."""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['target', 'shader.vert', '-c', '-MT']
expected_stderr = ['glslc: error: '
'missing dependency info target after \'-MT\'\n']
| bsd-3-clause | -7,187,553,513,730,992,000 | 42.816976 | 82 | 0.602004 | false | 3.842968 | true | false | false |
superclass/superwas | nagios.py | 1 | 12906 | #!/usr/bin/python
# This file is part of Superwas.
#
# Superwas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Superwas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Superwas. If not, see <http://www.gnu.org/licenses/>.
# Classes to create Nagios statistics from WAS PMI data
#
# Author: Andre van Dijk (SuperClass IT)
# Date: $Date: 2013-01-18 16:38:05 +0100 (vr, 18 jan 2013) $
# $Id: nagios.py 428 2013-01-18 15:38:05Z andre $
class NagiosStatus:
def __init__(self, code, message, perfdata):
self.code=code
self.message=message
self.perfdata=perfdata
def getCode(self):
return self.code
def getMessage(self):
return self.message
def getPerformanceData(self):
return self.perfdata
class NagiosStat:
# Nagio Return values
OK=0 # indicates a service is working properly.
WARNING=1 # indicates a service is in warning state.
CRITICAL=2 # indicates a service is in critical state.
UNKNOWN=3 # indicates a service is in unknown state.
STATUS=["OK","WARNING","CRITICAL","UNKOWN"]
def __init__(self):
self.criticalThreshold=0
self.warningThreshold=0
self.statusinput=[]
def setStatus(self, stats):
pass
def setCriticalThreshold(self, critical):
self.criticalThreshold=int(critical)
def setWarningThreshold(self, warning):
self.warningThreshold=int(warning)
class HeapStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.current=-1
self.count=-1
def setCurrentHeapSize(self, current):
self.current=int(current)
def setUsedMemory(self, count):
self.count=int(count)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('HeapSize')
if pu is not None:
self.setCurrentHeapSize(pu.getCurrent())
pu=stat.getStatistic('UsedMemory')
if pu is not None:
self.setUsedMemory(pu.getCount())
def getStatus(self):
percentage=-1
status=self.UNKNOWN
message="HeapStatus unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Heap stats off, returning OK")
return NagiosStatus(self.OK, "Heap thresholds unset", "")
if self.count!=-1 and self.current!=-1:
if self.count!=0:
percentage=(float(self.count)/self.current)*100
else:
percentage=0
if percentage>=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL heapSize %d/%d" % (percentage,self.criticalThreshold)
elif percentage>=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING heapSize %d/%d" % (percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK heapSize %d/%d" % (percentage,self.warningThreshold)
logger.debug("Heap stats: %s %s" % (status,message))
return NagiosStatus(status, message,"Heap=%d%%;%d;%d;;;" % (percentage,self.warningThreshold,self.criticalThreshold))
class CPUStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.percentage=-1
def setCPUPercentage(self, percentage):
self.percentage=int(percentage)
def getStatus(self):
status=NagiosStat.UNKNOWN
message="CPU Usage unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("CPU stats off, returning OK")
return NagiosStatus(self.OK, "CPU thresholds unset", "")
if self.percentage!=-1:
if self.percentage >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL CPU Usage %d/%d" % (self.percentage,self.criticalThreshold)
elif self.percentage >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING CPU Usage %d/%d" % (self.percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK CPU Usage %d/%d" % (self.percentage,self.warningThreshold)
return NagiosStatus(status, message, "CPU=%d%%;%d;%d;;;" % (self.percentage,self.warningThreshold,self.criticalThreshold))
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('ProcessCpuUsage')
if pu is not None:
self.setCPUPercentage(pu.getCount())
class DataSourceUsageStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.percentUsed=-1
def setPercentUsed(self, percentUsed):
self.percentUsed=float(percentUsed)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('PercentUsed')
if pu is not None:
self.setPercentUsed(pu.getCurrent())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="DataSource connection pool usage unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("DataSource usage stats off, returning OK")
return NagiosStatus(self.OK, "DataSource usage thresholds unset", "")
if self.percentUsed!=-1:
if self.percentUsed >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL DataSource pool usage %d/%d" % (self.percentUsed,self.criticalThreshold)
elif self.percentUsed >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING DataSource pool usage %d/%d" % (self.percentUsed,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK DataSource usage %d/%d" % (self.percentUsed,self.warningThreshold)
return NagiosStatus(status, message, "DataSourceUsage=%d%%;%d;%d;;;" % (self.percentUsed,self.warningThreshold,self.criticalThreshold))
class DataSourceWaitStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.waitTime=-1
def setWaitTime(self, waitTime):
self.waitTime=float(waitTime)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('WaitTime')
if pu is not None:
self.setWaitTime(pu.getMean())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="DataSource connection pool wait time unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("DataSource wait stats off, returning OK")
return NagiosStatus(self.OK, "DataSource wait time thresholds unset", "")
if self.waitTime!=-1:
if self.waitTime >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL DataSource wait time %d/%d" % (self.waitTime,self.criticalThreshold)
elif self.waitTime >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING DataSource wait time %d/%d" % (self.waitTime,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK DataSource wait time %d/%d" % (self.waitTime,self.warningThreshold)
return NagiosStatus(status, message, "DataSourceWait=%dms;%d;%d;;;" % (self.waitTime,self.warningThreshold,self.criticalThreshold))
class DataSourceUsetimeStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.useTime=-1
def setUseTime(self, useTime):
self.useTime=float(useTime)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('UseTime')
if pu is not None:
self.setUseTime(pu.getMean())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="DataSource connection pool use time unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("DataSource use time stats off, returning OK")
return NagiosStatus(self.OK, "DataSource use time thresholds unset", "")
if self.useTime!=-1:
if self.useTime >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL DataSource use time %d/%d" % (self.useTime,self.criticalThreshold)
elif self.useTime >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING DataSource use time %d/%d" % (self.useTime,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK DataSource use time %d/%d" % (self.useTime,self.warningThreshold)
return NagiosStatus(status, message, "DataSourceUsetime=%dms;%d;%d;;;" % (self.useTime,self.warningThreshold,self.criticalThreshold))
class WebContainerConcurrentHungThreadCount(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.hungThreads=-1
self.maxPoolSize=-1
def setHungThreads(self, hungThreads):
self.hungThreads=int(hungThreads)
def setMaxPoolSize(self, maxpoolsize):
self.maxPoolSize=int(maxpoolsize)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('ConcurrentHungThreadCount')
if pu is not None:
self.setHungThreads(pu.getCurrent())
pu=stat.getStatistic('PoolSize')
if pu is not None:
self.setMaxPoolSize(pu.getUpperBound())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="Webcontainer hung threads unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Webcontainer hung threads stats off, returning OK")
return NagiosStatus(self.OK, "WebContainer hung threads thresholds unset", "")
if self.hungThreads!=-1 and self.maxPoolSize!=-1:
if self.maxPoolSize!=0:
percentage=(float(self.hungThreads)/self.maxPoolSize)*100
else:
percentage=0
if percentage >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL Webcontainer hung threads %d/%d" % (percentage,self.criticalThreshold)
elif percentage >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING Webcontainer hung threads %d/%d" % (percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK Webcontainer hung threads %d/%d" % (percentage,self.warningThreshold)
return NagiosStatus(status, message, "WebContainerConcurrentHungThreadCount=%d%%;%d;%d;;;" % (self.hungThreads,self.warningThreshold,self.criticalThreshold))
class WebContainerActiveStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.active=-1
self.maxPoolSize=-1
def setActive(self, active):
self.active=int(active)
def setMaxPoolSize(self, maxpoolsize):
self.maxPoolSize=int(maxpoolsize)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('ActiveCount')
if pu is not None:
self.setActive(pu.getCurrent())
pu=stat.getStatistic('PoolSize')
if pu is not None:
self.setMaxPoolSize(pu.getUpperBound())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="Webcontainer usage unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Webcontainer stats off, returning OK")
return NagiosStatus(self.OK, "WebContainer thresholds unset", "")
if self.active!=-1 and self.maxPoolSize!=-1:
if self.maxPoolSize!=0:
percentage=(float(self.active)/self.maxPoolSize)*100
else:
percentage=0
if percentage >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL Webcontainer usage %d/%d" % (percentage,self.criticalThreshold)
elif percentage >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING Webcontainer usage %d/%d" % (percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK Webcontainer usage %d/%d" % (percentage,self.warningThreshold)
return NagiosStatus(status, message, "WebContainerActiveStat=%d%%;%d;%d;;;" % (self.active,self.warningThreshold,self.criticalThreshold))
class LiveSessionStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.live=-1
def setLive(self, live):
self.live=int(live)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('LiveCount')
if pu is not None:
self.setLive(pu.getCurrent())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="Live sessions unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Live sessions stats off, returning OK")
return NagiosStatus(self.OK, "Live sesions thresholds unset", "")
if self.live!=-1:
if self.live>=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL Live sessions %d/%d" % (self.live,self.criticalThreshold)
elif self.live >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING Live sessions %d/%d" % (self.live,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK Live sessions %d/%d" % (self.live,self.warningThreshold)
return NagiosStatus(status, message, "LiveSession=%d;%d;%d;;;" % (self.live,self.warningThreshold,self.criticalThreshold))
| gpl-2.0 | -9,097,531,912,660,383,000 | 36.086207 | 159 | 0.707423 | false | 3.413383 | false | false | false |
wichert/rest_toolkit | tests/ext/test_colander.py | 1 | 1039 | import pytest
from pyramid.httpexceptions import HTTPBadRequest
from rest_toolkit.abc import EditableResource
from rest_toolkit.ext.colander import ColanderSchemaValidationMixin
import colander
class AccountSchema(colander.Schema):
email = colander.SchemaNode(colander.String())
password = colander.SchemaNode(colander.String())
class DummyResource(ColanderSchemaValidationMixin, EditableResource):
schema = AccountSchema
def to_dict(self):
return {}
def update_from_dict(self, data, partial):
pass
def test_valid_request():
resource = DummyResource()
resource.validate({'email': '[email protected]', 'password': 'Jane'}, partial=False)
def test_validation_error():
resource = DummyResource()
with pytest.raises(HTTPBadRequest):
resource.validate({'email': '[email protected]'}, partial=False)
def test_partial_data():
resource = DummyResource()
resource.to_dict = lambda: {'password': 'Jane'}
resource.validate({'email': '[email protected]'}, partial=True)
| bsd-2-clause | 5,855,686,844,315,270,000 | 27.081081 | 87 | 0.725698 | false | 3.935606 | false | false | false |
yaelelmatad/EtsyApiTest | findSimilarShopsAllShopsByPopularity.py | 1 | 6336 | from __future__ import division
import json
import sys
import math
import random
#hard coded number of similar stores to spit out since HW said 5, could always add to command line
nSimilarStores = 5
maxBonus = 0.0005
class vectors:
def __init__(self, featureVectorName, multiplier, shopVectors):
self.multiplier= multiplier
#this will then hold 1/count
self.shopVectors= shopVectors
#spare feature vectors that only include features which appear in this particular shop
#eventually get normalized so that |shopVector| = 1
self.featureVectorProperty = featureVectorName
def getMultiplier(self):
'''return the multiplier after training, make sure to train and normalize before calling this function'''
return self.multiplier
def getShopVectors(self):
'''return the shopvectors. make sure to train and normalize before calling this routine'''
return self.shopVectors
def calculateDistance(self, shop1, shop2):
'''given two shop names, calculate the distance for this typeOfVector only'''
#check that both of the vectors are in this class, if not use the default empty dictionary
vec1 = {}
vec2 = {}
if shop1 in self.shopVectors:
vec1 = self.shopVectors[shop1]
if shop2 in self.shopVectors:
vec2 = self.shopVectors[shop2]
#the vectors are sparse, so not all keys appear in all vectors. Figure out which keys are in just one, and which are in both
allKeys = vec1.keys() + vec2.keys()
sharedKeys = []
justInFirst = []
justInSecond = []
for key in set(allKeys):
if key in vec1.keys() and key in vec2.keys():
sharedKeys.append(key)
elif key in vec1.keys():
justInFirst.append(key)
else:
justInSecond.append(key)
dist2 = 0 #actually the squared distance
#since we used all our store data to train our multiplier, we know that the multiplier contains all keys
for key in justInFirst:
dist2 += math.pow(vec1[key],2)*(self.multiplier[key])
#dist2 += math.pow(vec1[key],2)
for key in justInSecond:
dist2 += math.pow(vec2[key],2)*(self.multiplier[key])
#dist2 += math.pow(vec2[key],2)
for key in sharedKeys:
dist2 += math.pow(vec2[key]-vec1[key],2)*(self.multiplier[key])
#dist2 += math.pow(vec2[key]-vec1[key],2)
return math.sqrt(dist2)
def main(jsonInputForMultiplier, jsonInputFileForVectors, jsonShopInfo, outputFileName):
#read the json input
multFile = open(jsonInputForMultiplier,'r')
multipliers =json.load(multFile)
multFile.close()
shopVecFile = open(jsonInputFileForVectors,'r')
shopVectors = json.load(shopVecFile)
shopVecFile.close()
jsonShopFile = open(jsonShopInfo,'r')
shopDetails = json.load(jsonShopFile)
jsonShopFile.close()
#here is where I calculate what "bonus" to give the store if it is very popular
maxPopularity = 1
for shop in shopDetails:
currPop = shopDetails[shop][0]["num_favorers"]
if currPop > maxPopularity:
maxPopularity = currPop
#max seems to be ~170 for my data
#find out how many different things we trained against
typesOfVectors = [key for key in multipliers]
#initialize the vectorClasses with the trained data
vectorClasses = {}
for typeVec in typesOfVectors:
vectorClasses[typeVec] = vectors(typeVec, multipliers[typeVec],shopVectors[typeVec])
#find all the shop names (not necessarily unique)
shopNamesNotSet = []
#so we can get all shops, not all shops appear in all feature sets
for typeVec in typesOfVectors:
shopNamesNotSet += [shop for shop in shopVectors[typeVec]]
#now remove duplicates
shopNames = set(shopNamesNotSet)
outputFile = open(outputFileName, 'wb')
for originalShop in shopNames:
distances = []
accum = 0
for shop in shopNames:
dist = 0
#go through all the shops and calculate the distance
if shop == originalShop:
#don't waste your time calculating self distance
continue
for typeVec in typesOfVectors:
#there are len(typesOfVectors) different "length" vectors to calculate
dist+=vectorClasses[typeVec].calculateDistance(originalShop,shop)
#if shop != originalShop:
accum += dist
#subtract a bit of distance if a store is really popular.
dist+= (-1)*maxBonus*float(shopDetails[shop][0]["num_favorers"])/float(maxPopularity)
distances.append((shop,dist))
#print "average ", float(accum)/float(len(distances))
#certainly not necessary to keep all the distances and then sort. could just keep the list of "nSimilarStores" currently with lowest distane values, but the sort is quick on only 5000 members
sortedDist = sorted(distances, key=lambda t: t[1])
#sort on second element of tuple
stringToPrint = originalShop+ ": " + sortedDist[0][0]
for i in range(1,nSimilarStores):
stringToPrint += ", " + sortedDist[i][0]
stringToPrint += "\n"
outputFile.write(stringToPrint)
outputFile.close()
def usage():
sys.stderr.write("""
given a multiplier.json and a shopvectors.json goes through ALL the stores and finds the five most similar stores. This version also gives stores that are more popular a bonus. Avg distance 0.3. Stores can reduce the distance to current store by up to 0.05 if they have most favorers of the list. If there are no favorers, there is no distance reduction.
\n Third argument should be output file you want to write to like "similarShops.dat" for example you might use: \n
python findSimilarShopsALlShopsByPopularity.py multiplier.json vectors.json storeData.json similarShopsByPopularity.dat
\n""")
if __name__ == "__main__":
#check the usage is correct, user can specif 2 or 3 arguments
if len(sys.argv) != 5:
usage()
sys.exit(1)
main(sys.argv[1],sys.argv[2], sys.argv[3], sys.argv[4])
| gpl-3.0 | -9,196,436,886,653,949,000 | 40.142857 | 360 | 0.652778 | false | 4.010127 | false | false | false |
ZeitOnline/zeit.content.article | src/zeit/content/article/edit/browser/tests/test_rawxml.py | 1 | 1493 | import zeit.content.article.edit.browser.testing
class Form(zeit.content.article.edit.browser.testing.BrowserTestCase):
block_type = 'raw'
def test_inline_form_saves_values(self):
self.get_article(with_empty_block=True)
b = self.browser
b.open('editable-body/blockname/@@edit-rawxml?show_form=1')
b.getControl('XML source').value = """\
<raw xmlns:ns0="http://namespaces.zeit.de/CMS/cp" ns0:__name__="blockname">
<foo> </foo>
</raw>
"""
b.getControl('Apply').click()
b.open('@@edit-rawxml?show_form=1')
self.assertEllipsis("""\
<raw...xmlns:ns0="http://namespaces.zeit.de/CMS/cp"...ns0:__name__="blockname"...>
<foo> </foo>
</raw>
""", b.getControl('XML source').value)
def test_xml_is_validated_root_must_be_raw_element(self):
self.get_article(with_empty_block=True)
b = self.browser
b.open('editable-body/blockname/@@edit-rawxml?show_form=1')
b.getControl('XML source').value = '<foo />'
b.getControl('Apply').click()
self.assertIn(
'<span class="error">The root element must be <raw>.</span>',
b.contents)
class FormLoader(zeit.content.article.edit.browser.testing.EditorTestCase):
def test_rawxml_form_is_loaded(self):
s = self.selenium
self.add_article()
self.create_block('raw')
s.assertElementPresent('css=.block.type-raw .inline-form '
'.field.fieldname-xml')
| bsd-3-clause | -7,119,949,674,202,443,000 | 33.72093 | 82 | 0.616879 | false | 3.295806 | true | false | false |
yeleman/snisi | snisi_maint/management/commands/update-cluster-from-std-csv.py | 1 | 3120 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import os
from django.core.management.base import BaseCommand
from optparse import make_option
from py3compat import PY2
from snisi_core.models.Entities import Entity
from snisi_core.models.Projects import Cluster, Participation
if PY2:
import unicodecsv as csv
else:
import csv
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f',
help='CSV file',
action='store',
dest='filename'),
)
def handle(self, *args, **options):
if not os.path.exists(options.get('filename') or ""):
logger.error("CSV file `{}` does not exist."
.format(options.get('filename')))
return
headers = ['action', 'slug', 'cluster', 'include_hc']
input_csv_file = open(options.get('filename'), 'r')
csv_reader = csv.DictReader(input_csv_file, fieldnames=headers)
for entry in csv_reader:
if csv_reader.line_num == 1:
continue
entity = Entity.get_or_none(entry.get('slug'))
if entity is None:
logger.warning("Entity `{}` does not exist."
.format(entry.get('SNISI')))
continue
cluster = Cluster.get_or_none(entry.get('cluster'))
if cluster is None:
logger.error("Cluster `{}` does not exist."
.format(options.get('cluster_slug')))
continue
include_hc = bool(entry.get('include_hc'))
entities = [entity]
if include_hc:
entities += entity.get_health_centers()
if entry.get('action') == 'add':
for e in entities:
p, created = Participation.objects.get_or_create(
cluster=cluster,
entity=e,
is_active=True)
logger.info(p)
if entry.get('action') == 'disable':
for p in Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]):
p.is_active = False
p.save()
logger.info(p)
if entry.get('action') == 'enable':
for p in Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]):
p.is_active = True
p.save()
logger.info(p)
if entry.get('action') == 'remove':
Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]).delete()
logger.info("All Done")
| mit | -835,938,490,400,088,200 | 31.842105 | 73 | 0.501923 | false | 4.431818 | false | false | false |
mbartling/TAMU_senior_design | Python/get_xbee_servo.py | 1 | 4195 | #! /usr/bin/env python
import serial
import sys
import os
import MySQLdb
from subprocess import call
from datetime import date
FORCE_WRITE = 0
HORIZONTAL = 0
VERTICAL = 90
today = date.today()
try:
address_array = []
# open data base
db = MySQLdb.connect(host="localhost", user="", passwd="team05", db="xbee_teensy")
cur = db.cursor()
cur.execute("select version()")
data = cur.fetchone()
print "Database version: ", data
cur.execute("truncate table raw_data")
# open serial port
xbee = serial.Serial()
xbee.baudrate = 57600
if len(sys.argv) > 1:
xbee.port = sys.argv[1]
else:
xbee.port = '/dev/ttyACM0'
if xbee.isOpen():
xbee.close()
xbee.open()
print xbee
xbee.write("?")
if xbee.isOpen:
for line in xbee:
line = line.strip()
packet = line.split()
print line;
if len(packet) > 1 and packet[0] == '7E':
if len(packet) < 26 or int(packet[11], 16) != 0x64:
print "Packet len is: " + "{0}".format(len(packet))
continue;
# calling system command for timestamp
p = os.popen('date "+%F %T"')
timestamp = p.readline()
p.close()
timestamp = timestamp.rstrip('\n')
timestamp = timestamp.rstrip('\0')
print "Time is: " + timestamp
# parse address
addressH = packet[8:11]
addressH.append(packet[14])
# convert to dec, then string
addressString = ''
for item in addressH:
x = int(item, 16)
addressString += str(x) + '.'
addressString = addressString[:-1]
print "Initial Address: " + addressString
# parse rssi
rssi = int(packet[15], 16)
print "RSSI = ", rssi
# parse survo position
servoPos = int(packet[16], 16)
print "servoPos =", servoPos
# parse gps
latArray = packet[17:21]
latHex = ''.join(latArray)
print latHex
if latHex == '0000':
lat = 0
else:
lat = int(latHex, 16)
lonArray = packet [21:25]
lonHex = ''.join(lonArray)
print lonHex
if lonHex == '0000':
lon = 0;
else:
lon = int(lonHex, 16)
lon = lon ^ 0xFFFFFFFF
lon += 1
lon *= -1
print lat, lon
if FORCE_WRITE:
cmd = "insert into raw_data values(\"%s\",\"%s\", %d, %d, %d, %d)" %(timestamp, addressString, servoPos, rssi, lat, lon)
print cmd
cur.execute(cmd)
db.commit()
print "new row added to mysql"
if not addressString in address_array:
print "Adding address string: " + addressString
address_array.append(addressString)
else:
if lon > -970000000 and lon < -960000000 and lat > 306000000 and lat < 307000000:
cmd = "insert into raw_data values(\"%s\",\"%s\", %d, %d, %d, %d)" %(timestamp, addressString, rssi, servoPos, lat, lon)
print cmd
cur.execute(cmd)
db.commit()
print "new row added to mysql"
if not addressString in address_array:
print "Adding address string: " + addressString
address_array.append(addressString)
print "Closing Xbee Port"
finally:
print "output data to file"
# os.popen('rm -f /home/walter/Code/rawData/*.txt')
# os.popen('rm -f /tmp/raw101.txt')
for address in address_array:
# write horizontal
address_split = address.split('.');
filename = '/tmp/raw' + address_split[3] + 'horiz.txt'
os.popen('rm ' + filename)
print filename
cmd = "select row, col, rssi from raw_data where address = \'%s\' and servoPos = %d into outfile \'%s\' fields terminated by ','" %(address, HORIZONTAL, filename)
print cmd
cur.execute(cmd)
cmd = 'cp ' + filename + ' /home/walter/Code/rawData/raw' + address_split[3] + today.strftime("-%y-%m-%d") + 'horiz.out'
print cmd
os.popen(cmd)
filename = '/tmp/raw' + address_split[3] + 'vert.txt'
os.popen('rm ' + filename)
print filename
cmd = "select row, col, rssi from raw_data where address = \'%s\' and servoPos = %d into outfile \'%s\' fields terminated by ','" %(address, VERTICAL, filename)
print cmd
cur.execute(cmd)
cmd = 'cp ' + filename + ' /home/walter/Code/rawData/raw' + address_split[3] + today.strftime("-%y-%m-%d") + 'vert.out'
print cmd
os.popen(cmd)
print "closing xbee port and database"
db.close()
xbee.close()
| mit | -2,135,066,010,321,820,200 | 26.598684 | 164 | 0.615256 | false | 2.947997 | false | false | false |
Syncleus/apex | src/apex/kiss/kiss_serial.py | 1 | 3486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""KISS Core Classes."""
# These imports are for python3 compatibility inside python2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import serial
import six
from apex.kiss import constants as kiss_constants
from .kiss import Kiss
__author__ = 'Jeffrey Phillips Freeman (WI2ARD)'
__maintainer__ = 'Jeffrey Phillips Freeman (WI2ARD)'
__email__ = '[email protected]'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright 2016, Syncleus, Inc. and contributors'
__credits__ = []
class KissSerial(Kiss):
"""KISS Serial Object Class."""
logger = logging.getLogger(__name__)
logger.setLevel(kiss_constants.LOG_LEVEL)
console_handler = logging.StreamHandler()
console_handler.setLevel(kiss_constants.LOG_LEVEL)
formatter = logging.Formatter(kiss_constants.LOG_FORMAT)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.propagate = False
def __init__(self, strip_df_start=True,
com_port=None,
baud=38400,
parity=serial.PARITY_NONE,
stop_bits=serial.STOPBITS_ONE,
byte_size=serial.EIGHTBITS):
super(KissSerial, self).__init__(strip_df_start)
self.com_port = com_port
self.baud = baud
self.parity = parity
self.stop_bits = stop_bits
self.byte_size = byte_size
self.serial = None
self.logger.info('Using interface_mode=Serial')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.serial.close()
def __del__(self):
if self.serial and self.serial.isOpen():
self.serial.close()
def _read_interface(self):
read_data = self.serial.read(kiss_constants.READ_BYTES)
waiting_data = self.serial.inWaiting()
if waiting_data:
read_data += self.serial.read(waiting_data)
return [ord(c) if six.PY2 else c for c in read_data]
def _write_interface(self, data):
self.serial.write(data)
def connect(self, mode_init=None, **kwargs):
"""
Initializes the KISS device and commits configuration.
See http://en.wikipedia.org/wiki/KISS_(TNC)#Command_codes
for configuration names.
:param **kwargs: name/value pairs to use as initial config values.
"""
self.logger.debug('kwargs=%s', kwargs)
self.serial = serial.Serial(port=self.com_port, baudrate=self.baud, parity=self.parity,
stopbits=self.stop_bits, bytesize=self.byte_size)
self.serial.timeout = kiss_constants.SERIAL_TIMEOUT
if mode_init is not None:
self.serial.write(mode_init)
self.exit_kiss = True
else:
self.exit_kiss = False
# Previous verious defaulted to Xastir-friendly configs. Unfortunately
# those don't work with Bluetooth TNCs, so we're reverting to None.
if kwargs:
for name, value in kwargs.items():
super(KissSerial, self)._write_setting(name, value)
def close(self):
super(KissSerial, self).close()
if not self.serial:
raise RuntimeError('Attempting to close before the class has been started.')
elif self.serial.isOpen():
self.serial.close()
| apache-2.0 | -7,112,238,142,381,846,000 | 30.690909 | 95 | 0.626219 | false | 3.732334 | false | false | false |
khertan/gedit_flake8 | gedit_flake8/__init__.py | 1 | 14861 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""gedit-flake8 : A plugin for gedit
to display error and warning from flake8."""
__author__ = "Benoît HERVIER"
__copyright__ = "Copyright 2012 " + __author__
__license__ = "GPLv3"
__version__ = "0.7.0"
__maintainer__ = "Benoît HERVIER"
__email__ = "[email protected]"
__status__ = "Beta"
try:
from gi.repository import GObject, Gtk, Gedit, Pango
except ImportError as err:
print('GEdit-Flake8 need to be launched by GEdit 3')
print(err)
import re
from subprocess import Popen, PIPE, call
import threading
GObject.threads_init()
def _remove_tags(document, errors_tag):
"""Remove not anymore used tags"""
if errors_tag:
start, end = document.get_bounds()
document.remove_tag(errors_tag, start, end)
def apply_style(style, tag):
"""Apply a style to a tag from the default theme style
This lightly modified code come from the synctext.py gedit plugin"""
def apply_style_prop(tag, style, prop):
if style.get_property(prop + "-set"):
tag.set_property(prop, style.get_property(prop))
else:
tag.set_property(prop, None)
def apply_style_prop_bool(tag, style, prop, whentrue, whenfalse):
if style.get_property(prop + "-set"):
prop_value = whentrue if style.get_property(prop) else whenfalse
tag.set_property(prop, prop_value)
apply_style_prop(tag, style, "foreground")
apply_style_prop(tag, style, "background")
try:
apply_style_prop_bool(tag,
style,
"weight",
Pango.Weight.BOLD,
Pango.Weight.NORMAL)
except TypeError as err:
# Different version of gtk 3 have different properties ... :(
print(err)
apply_style_prop_bool(tag,
style,
"italic",
Pango.Style.ITALIC,
Pango.Style.NORMAL)
apply_style_prop_bool(tag,
style,
"underline",
Pango.Underline.SINGLE,
Pango.Underline.NONE)
apply_style_prop(tag, style, "strikethrough")
class _IdleObject(GObject.Object):
"""
Override gobject.GObject to always emit signals in the main thread
by emmitting on an idle handler
"""
def __init__(self):
GObject.Object.__init__(self)
def emit(self, *args):
GObject.idle_add(GObject.Object.emit, self, *args)
class Message(object):
def __init__(self, document, lineno, column, message):
self._doc = document
self._lineno = lineno
self._column = column
self._message = message
self._start_iter = None
self._end_iter = None
self._stock_id = self._get_stock_id(message)
def _get_stock_id(self, message):
if message.startswith('E'):
return Gtk.STOCK_DIALOG_ERROR
elif message.startswith('W'):
return Gtk.STOCK_DIALOG_WARNING
elif message.startswith('C'):
return Gtk.STOCK_DIALOG_INFO
else:
return Gtk.STOCK_DIALOG_INFO
def setWordBounds(self, start, end):
self._start_iter = start
self._end_iter = end
doc = property(lambda self: self.__doc)
lineno = property(lambda self: self._lineno)
column = property(lambda self: self._lineno)
message = property(lambda self: self._message)
start = property(lambda self: self._start_iter)
end = property(lambda self: self._end_iter)
stock_id = property(lambda self: self._stock_id)
class ResultsModel(Gtk.ListStore):
def __init__(self):
super(ResultsModel, self).__init__(int, int, str)
def add(self, msg):
self.append([msg.lineno, msg.column, msg.message])
class ResultsView(Gtk.TreeView):
def __init__(self, panel):
super(ResultsView, self).__init__()
self._panel = panel
linha = Gtk.TreeViewColumn("Line")
linha_cell = Gtk.CellRendererText()
linha.pack_start(linha_cell, True)
linha.add_attribute(linha_cell, 'text', 0)
linha.set_sort_column_id(0)
self.append_column(linha)
msgtype = Gtk.TreeViewColumn("Column")
msgtype_cell = Gtk.CellRendererText()
msgtype.pack_start(msgtype_cell, True)
msgtype.add_attribute(msgtype_cell, 'text', 1)
msgtype.set_sort_column_id(1)
self.append_column(msgtype)
msg = Gtk.TreeViewColumn("Message")
msg_cell = Gtk.CellRendererText()
msg.pack_start(msg_cell, True)
msg.add_attribute(msg_cell, 'text', 2)
msg.set_sort_column_id(2)
self.append_column(msg)
self.connect("row-activated", self._row_activated_cb)
def _row_activated_cb(self, view, row, column):
model = view.get_model()
iter = model.get_iter(row)
window = self._panel.get_window()
document = window.get_active_document()
line = model.get_value(iter, 0) - 1
document.goto_line(line)
view = window.get_active_view()
text_iter = document.get_iter_at_line(line)
view.scroll_to_iter(text_iter, 0.25, False, 0.5, 0.5)
view.grab_focus()
class ResultsPanel(Gtk.ScrolledWindow):
def __init__(self, window):
super(ResultsPanel, self).__init__()
self.window = window
self.view = ResultsView(self)
self.add(self.view)
self.view.show()
def set_model(self, model):
self.view.set_model(model)
def get_window(self):
return self.window
class Worker(threading.Thread, _IdleObject):
__gsignals__ = {
"completed": (
GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, []), }
def __init__(self, document, errors_tag):
self.document = document
threading.Thread.__init__(self)
_IdleObject.__init__(self)
if errors_tag is None:
self._add_tags(document)
else:
self._errors_tag = errors_tag
self._results = []
self._errors = []
self.cancelled = False
def _add_tags(self, document):
"""Register new tags in the sourcebuffer"""
style = document.get_style_scheme().get_style('def:error')
self._errors_tag = \
document.create_tag("flake8-error",
underline=Pango.Underline.ERROR)
apply_style(style, self._errors_tag)
def _highlight_errors(self, errors):
"""Colorize error in the sourcebuffer"""
document = self.document
for err in errors:
start = document.get_iter_at_line(err.lineno - 1)
end = document.get_iter_at_line(err.lineno - 1)
end.forward_to_line_end()
# apply tag to entire line
document.apply_tag(self._errors_tag, start, end)
def _flake8_bin(self):
"""Returns a flake8 valid executable
flake8 is the default executable, but in Debian systems,
for example, package pyflakes provides a pyflakes binary
instead of flake8
"""
# list of flake binaries
flake8_binaries = ('flake8', 'pyflakes')
def cmd_exists(cmd):
return call("type " + cmd,
shell=True,
stdout=PIPE, stderr=PIPE) == 0
for flake8 in flake8_binaries:
if cmd_exists(flake8):
return flake8
# default
return "flake8"
def run(self):
errors = []
location = self.document.get_location()
_remove_tags(self.document, self._errors_tag)
if location is None:
print('Location not found ...')
return
path = location.get_path()
if path is None:
import codecs
try:
encoding = self.document.get_encoding().get_charset()
except Exception as err:
encoding = 'utf-8'
path = '/tmp/gedit_flake8.py'
start, end = self.document.get_bounds()
with codecs.open(path, 'w', encoding=encoding) as fh:
fh.write(str(
self.document.get_text(start, end,
include_hidden_chars=True),
encoding))
stdout, stderr = Popen([self._flake8_bin(), path],
stdout=PIPE, stderr=PIPE).communicate()
output = stdout if stdout else stderr
line_format = re.compile(
'(?P<path>[^:]+):(?P<line>\d+):'
+ '(?P<character>\d+:)?\s(?P<message>.*$)')
self._results = ResultsModel()
if not output:
if not self.cancelled:
self.emit("completed")
return
for line in output.splitlines():
m = line_format.match(line.decode('utf-8'))
if not m:
continue
groups = m.groupdict()
if groups['character']:
err = Message(self.document,
int(groups['line']),
int(groups['character'].strip(':')),
groups['message'],)
else:
err = Message(self.document,
int(groups['line']),
0,
groups['message'],)
errors.append(err)
self._results.add(err)
_remove_tags(self.document, self._errors_tag)
self._errors = errors
self._highlight_errors(self._errors)
if not self.cancelled:
self.emit("completed")
class Flake8Plugin(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "Flake8"
window = GObject.property(type=Gedit.Window)
documents = []
_errors_tag = {}
_results = {}
_errors = {}
_worker = None
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
# self._insert_panel()
self._panel = ResultsPanel(self.window)
self._panel.show()
bottom = self.window.get_bottom_panel()
bottom.add_titled(self._panel, "ResultsPanel", "Flake8 Results")
self.window.connect("tab-added", self.on_tab_added)
self.window.connect("tab-removed", self.on_tab_removed)
self.window.connect("active-tab-changed", self.on_active_tab_changed)
def do_deactivate(self):
# self._remove_panel()
pass
def on_notify_style_scheme(self, document, param_object):
style = document.get_style_scheme().get_style('def:error')
apply_style(style, self._errors_tag[document])
def _insert_panel(self):
"""Insert bottom GEdit panel"""
self._panel = ResultsPanel(self.window)
image = Gtk.Image()
image.set_from_icon_name('gnome-mime-text-x-python',
Gtk.IconSize.MENU)
bottom_panel = self.window.get_bottom_panel()
bottom_panel.add_item(self._panel,
'ResultsPanel',
'Flake8 Results',
image)
def display_error_msg(self, document):
"""Display a statusbar message if the current line have errors"""
if document is None:
return True
try:
if document.get_language().get_name() != 'Python':
return True
except AttributeError as err:
return True
curline = document.get_iter_at_mark(
document.get_insert()).get_line() + 1
for err in self._errors[document]:
if err.lineno == curline:
statusbar = self.window.get_statusbar()
statusbar_ctxtid = statusbar.get_context_id('Flake8')
statusbar.push(statusbar_ctxtid, 'Line : %s : %s'
% (err.lineno, err.message))
return True
return False
def _remove_panel(self):
"""Remove the inserted panel from GEdit"""
bottom_panel = self.window.get_bottom_panel()
bottom_panel.remove_item(self._panel)
def on_active_tab_changed(self, window, tab):
self._panel.set_model(self._results[tab.get_document()])
def on_tab_added(self, window, tab):
"""Initialize the required vars"""
document = tab.get_document()
self._results[document] = ResultsModel()
self._errors[document] = []
self._errors_tag[document] = None
document.connect('loaded', self.analyse)
document.connect('saved', self.analyse)
document.connect('cursor-moved', self.display_error_msg)
def on_tab_removed(self, window, tab):
"""Cleaning results not needed anymore"""
document = tab.get_document()
if document in self._results:
self._results[document] = None
del self._results[document]
self._errors[document] = None
del self._errors[document]
_remove_tags(document, self._errors_tag[document])
def completedCb(self, *userData):
errors = self._worker._errors
document = self._worker.document
self._errors[document] = errors
self._results[document] = self._worker._results
self._errors_tag[document] = self._worker._errors_tag
if len(errors) > 0:
if not self.display_error_msg(document):
statusbar = self.window.get_statusbar()
statusbar_ctxtid = statusbar.get_context_id('Flake8')
statusbar.push(statusbar_ctxtid,
'Line : %s : %s'
% (errors[0].lineno, errors[0].message))
else:
statusbar = self.window.get_statusbar()
statusbar_ctxtid = statusbar.get_context_id('Flake8')
statusbar.push(statusbar_ctxtid,
"No errors found")
try:
self._panel.set_model(self._results[document])
except:
pass
self._worker = None
def analyse(self, doc):
"""Launch a process and populate vars"""
document = self.window.get_active_document()
if document is None:
return True
try:
if document.get_language().get_name() != 'Python':
return True
except AttributeError:
return True
if self._worker is not None:
self._worker.cancelled = True
self._worker = Worker(document, self._errors_tag[document])
self._worker.connect("completed", self.completedCb)
self._worker.start()
| gpl-3.0 | 2,962,702,410,115,638,000 | 29.95625 | 77 | 0.553671 | false | 4.019205 | false | false | false |
schocco/mds-web | mds_website/settings.py | 1 | 8687 | # -*- coding: utf-8 -*-
# Django settings for mds_website project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_DIR = os.path.dirname(__file__)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mdsdb', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
#dont force trailing backslash
#APPEND_SLASH = False
#TASTYPIE_ALLOW_MISSING_SLASH = APPEND_SLASH
TASTYPIE_DEFAULT_FORMATS = ['json']
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "media"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "../sitestatic"))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/dist")),
)
STATICFILES_STORAGE = 'webpack.storage.WebpackHashStorage'
WEBPACK_ASSETS_FILE = os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/webpack-assets.json"))
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'use your own secret key.'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# google api console: https://console.developers.google.com/project/api-access-tests/apiui/credential?authuser=0
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.vk.VKOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['https://www.googleapis.com/auth/profile', 'https://www.googleapis.com/auth/email']
LOGIN_REDIRECT_URL = '/'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'apps.mds_auth.middleware.SocialAuthExceptionHandlerMiddleware'
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'apps.mds_auth.auth_pipeline.save_profile', # get profile data from oauth resource
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'apps.mds_auth.auth_pipeline.device_redirect', # change ?next parameter to provide access token for mobile apps
)
ROOT_URLCONF = 'mds_website.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mds_website.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
############# CELERY SETTINGS
## Using the database to store task state and results.
CELERY_RESULT_BACKEND = 'amqp'
BROKER_HOST = "localhost"
#BROKER_URL = 'amqp://guest:guest@localhost:5672/celeryvhost'
CELERY_TIMEZONE = TIME_ZONE
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.gis',
'social.apps.django_app.default',
'tastypie',
'apps.muni_scales',
'apps.trails',
'apps.mds_auth',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'custom': {
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
}
}
}
# import local settings file if one exists
# apparantly using system environments is the better solution
try:
from settings_local import *
except Exception, e:
print("Could not find a local settings file.")
| mit | -8,026,578,457,532,090,000 | 33.200787 | 127 | 0.687234 | false | 3.603069 | false | false | false |
micjabbour/AndroidGuard-WebApp | AndroidGuard/models.py | 1 | 3073 | from . import db
from .config import AppConfig
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import desc
from itsdangerous import Serializer, BadSignature
class Location(db.Model):
id = db.Column(db.Integer, primary_key=True)
latitude = db.Column(db.DECIMAL(9,6), nullable=False)
longitude = db.Column(db.DECIMAL(9,6), nullable=False)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
device_id = db.Column(db.Integer, db.ForeignKey('device.id'), nullable=False)
def serialize(self):
return {'latitude': str(self.latitude),
'longitude': str(self.longitude),
'timestamp': self.timestamp.isoformat()+'Z' # HACK
}
class Device(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
fcm_token = db.Column(db.Text)
locations = db.relationship('Location', backref='device', lazy='select')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
db.UniqueConstraint('name', 'user_id')
@property
def last_location(self):
return Location.query.filter_by(device_id=self.id).order_by(desc('location.id')).first()
def get_device_dict(self):
device_dict = {'id': self.id, 'name': self.name}
if self.last_location:
device_dict['last_location'] = self.last_location.serialize()
return device_dict
def generate_auth_token(self):
s = Serializer(AppConfig.SECRET_KEY)
return s.dumps(self.id)
@staticmethod
def verify_auth_token(token):
s = Serializer(AppConfig.SECRET_KEY)
try:
id = s.loads(token)
except BadSignature:
return None
device = Device.query.get(id)
return device
@staticmethod
def get_by_devicename(user, name):
device_list = user.devices
for device in device_list:
if device.name == name:
return device
return None
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Text, unique=True)
password_hash = db.Column(db.Text)
devices = db.relationship('Device', backref='user', lazy='dynamic')
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def get_by_username(username):
return User.query.filter_by(username=username).first()
@staticmethod
def verify_credentials(username, password):
user = User.get_by_username(username)
if user is not None and user.check_password(password):
return user
return None
def __repr__(self):
return "<User '{}'>".format(self.username)
| unlicense | 256,992,587,173,349,120 | 31.691489 | 96 | 0.649528 | false | 3.860553 | false | false | false |
noironetworks/neutron | neutron/db/rbac_db_mixin.py | 1 | 6467 | # Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import exceptions as c_exc
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from oslo_db import exception as db_exc
from neutron.db import common_db_mixin
from neutron.extensions import rbac as ext_rbac
from neutron.objects import base as base_obj
from neutron.objects import rbac as rbac_obj
class RbacPluginMixin(common_db_mixin.CommonDbMixin):
"""Plugin mixin that implements the RBAC DB operations."""
object_type_cache = {}
supported_extension_aliases = ['rbac-policies']
@db_api.retry_if_session_inactive()
def create_rbac_policy(self, context, rbac_policy):
e = rbac_policy['rbac_policy']
try:
registry.notify(resources.RBAC_POLICY, events.BEFORE_CREATE, self,
context=context, object_type=e['object_type'],
policy=e)
except c_exc.CallbackFailure as e:
raise n_exc.InvalidInput(error_message=e)
rbac_class = (
rbac_obj.RBACBaseObject.get_type_class_map()[e['object_type']])
try:
rbac_args = {'project_id': e['project_id'],
'object_id': e['object_id'],
'action': e['action'],
'target_tenant': e['target_tenant']}
_rbac_obj = rbac_class(context, **rbac_args)
_rbac_obj.create()
except db_exc.DBDuplicateEntry:
raise ext_rbac.DuplicateRbacPolicy()
return self._make_rbac_policy_dict(_rbac_obj)
@staticmethod
def _make_rbac_policy_dict(entry, fields=None):
res = {f: entry[f] for f in ('id', 'project_id', 'target_tenant',
'action', 'object_id')}
res['object_type'] = entry.db_model.object_type
return db_utils.resource_fields(res, fields)
@db_api.retry_if_session_inactive()
def update_rbac_policy(self, context, id, rbac_policy):
pol = rbac_policy['rbac_policy']
entry = self._get_rbac_policy(context, id)
object_type = entry.db_model.object_type
try:
registry.notify(resources.RBAC_POLICY, events.BEFORE_UPDATE, self,
context=context, policy=entry,
object_type=object_type, policy_update=pol)
except c_exc.CallbackFailure as ex:
raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id,
details=ex)
entry.update_fields(pol)
entry.update()
return self._make_rbac_policy_dict(entry)
@db_api.retry_if_session_inactive()
def delete_rbac_policy(self, context, id):
entry = self._get_rbac_policy(context, id)
object_type = entry.db_model.object_type
try:
registry.notify(resources.RBAC_POLICY, events.BEFORE_DELETE, self,
context=context, object_type=object_type,
policy=entry)
except c_exc.CallbackFailure as ex:
raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id,
details=ex)
# make a dict copy because deleting the entry will nullify its
# object_id link to network
entry_dict = entry.to_dict()
entry.delete()
registry.notify(resources.RBAC_POLICY, events.AFTER_DELETE, self,
context=context, object_type=object_type,
policy=entry_dict)
self.object_type_cache.pop(id, None)
def _get_rbac_policy(self, context, id):
object_type = self._get_object_type(context, id)
rbac_class = rbac_obj.RBACBaseObject.get_type_class_map()[object_type]
_rbac_obj = rbac_class.get_object(context, id=id)
if not _rbac_obj:
raise ext_rbac.RbacPolicyNotFound(id=id, object_type=object_type)
return _rbac_obj
@db_api.retry_if_session_inactive()
def get_rbac_policy(self, context, id, fields=None):
return self._make_rbac_policy_dict(
self._get_rbac_policy(context, id), fields=fields)
@db_api.retry_if_session_inactive()
def get_rbac_policies(self, context, filters=None, fields=None,
sorts=None, limit=None, page_reverse=False):
pager = base_obj.Pager(sorts, limit, page_reverse)
filters = filters or {}
object_types = filters.pop('object_type', None)
rbac_classes_to_query = [
o for t, o in rbac_obj.RBACBaseObject.get_type_class_map().items()
if not object_types or t in object_types]
rbac_objs = []
for rbac_class in rbac_classes_to_query:
rbac_objs += rbac_class.get_objects(context, _pager=pager,
**filters)
return [self._make_rbac_policy_dict(_rbac_obj, fields)
for _rbac_obj in rbac_objs]
def _get_object_type(self, context, entry_id):
"""Scans all RBAC tables for an ID to figure out the type.
This will be an expensive operation as the number of RBAC tables grows.
The result is cached since object types cannot be updated for a policy.
"""
if entry_id in self.object_type_cache:
return self.object_type_cache[entry_id]
for otype, rbac_class in \
rbac_obj.RBACBaseObject.get_type_class_map().items():
if rbac_class.count(context, id=entry_id):
self.object_type_cache[entry_id] = otype
return otype
raise ext_rbac.RbacPolicyNotFound(id=entry_id, object_type='unknown')
| apache-2.0 | 5,662,070,589,634,627,000 | 43.909722 | 79 | 0.613422 | false | 3.742477 | false | false | false |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/docstring.py | 1 | 2522 | from __future__ import print_function
import string
from matplotlib import inspect
class FormatDict(dict):
"""Adapted from http://stackoverflow.com/questions/11283961/partial-string-formatting"""
def __missing__(self, key):
return "{" + key + "}"
class DocReplacer(object):
"""Decorator object for replacing patterns in docstrings using string.format."""
def __init__(self, auto_dedent=True, allow_partial_formatting=False, **doc_dict):
'''
Parameters
-------------
auto_indent : bool
Flag for automatically indenting the replaced lines to the level of the docstring.
allow_partial_formatting : bool
Emnables partial formatting (i.e., not all keys are available in the dictionary)
doc_dict : kwargs
Pattern in docstring that a key in this dict will be replaced by the corresponding values.
Example
-------------
TODO: Update this documentation
@DocReplacer({'p1': 'p1 : int\n\tFirst parameter'})
def foo(p1):
"""
Some functions.
Params:
{p1}
"""
will result in foo's docstring being:
"""
Some functions.
Params:
p1 : int
First parameter
"""
'''
self.doc_dict = doc_dict
self.auto_dedent = auto_dedent
self.allow_partial_formatting = allow_partial_formatting
def __call__(self, func):
if func.__doc__:
doc = func.__doc__
if self.auto_dedent:
doc = inspect.cleandoc(doc)
func.__doc__ = self._format(doc)
return func
def replace(self):
"""Reformat values inside the self.doc_dict using self.doc_dict
TODO: Make support for partial_formatting
"""
doc_dict = self.doc_dict.copy()
for k, v in doc_dict.items():
if '{' and '}' in v:
self.doc_dict[k] = v.format(**doc_dict)
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.doc_dict.update(*args, **kwargs)
def _format(self, doc):
""" Formats the docstring using self.doc_dict """
if self.allow_partial_formatting:
mapping = FormatDict(self.doc_dict)
else:
mapping = self.doc_dict
formatter = string.Formatter()
return formatter.vformat(doc, (), mapping)
| mit | 8,740,734,155,669,062,000 | 30.135802 | 102 | 0.560666 | false | 4.355786 | false | false | false |
JustFixNYC/who-owns-what | wow/tests/conftest.py | 1 | 1024 | import pytest
import psycopg2
import dbtool
@pytest.fixture(scope='session')
def django_db_setup(django_db_setup, django_db_blocker):
from django.conf import settings
wow = settings.DATABASES['wow']
with django_db_blocker.unblock():
db = dbtool.DbContext(
host=wow['HOST'],
database=wow['NAME'],
user=wow['USER'],
password=wow['PASSWORD'],
port=wow['PORT'] or 5432,
)
# If we're run with --reuse-db, the database might already
# be scaffolded for us, in which case we don't need to
# do anything.
is_already_built = False
conn = db.connection()
with conn:
with conn.cursor() as cursor:
try:
cursor.execute('select * from wow_bldgs limit 1;')
is_already_built = True
except psycopg2.errors.UndefinedTable:
pass
if not is_already_built:
dbtool.loadtestdata(db)
| gpl-3.0 | -1,612,363,369,483,214,000 | 29.117647 | 70 | 0.553711 | false | 4.047431 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.