code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class QoSPolicy(neutron.NeutronResource):
"""A resource for Neutron QoS Policy.
This QoS policy can be associated with neutron resources,
such as port and network, to provide QoS capabilities.
The default policy usage of this resource is limited to
administrators only.
"""
required_service_extension = 'qos'
support_status = support.SupportStatus(version='6.0.0')
PROPERTIES = (
NAME, DESCRIPTION, SHARED, TENANT_ID,
) = (
'name', 'description', 'shared', 'tenant_id',
)
ATTRIBUTES = (
RULES_ATTR,
) = (
'rules',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('The name for the QoS policy.'),
required=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('The description for the QoS policy.'),
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this QoS policy should be shared to other tenants.'),
default=False,
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The owner tenant ID of this QoS policy.')
),
}
attributes_schema = {
RULES_ATTR: attributes.Schema(
_("A list of all rules for the QoS policy."),
type=attributes.Schema.LIST
)
}
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
policy = self.client().create_qos_policy({'policy': props})['policy']
self.resource_id_set(policy['id'])
def handle_delete(self):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
self.client().delete_qos_policy(self.resource_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
self.client().update_qos_policy(
self.resource_id,
{'policy': prop_diff})
def _show_resource(self):
return self.client().show_qos_policy(
self.resource_id)['policy']
class QoSRule(neutron.NeutronResource):
"""A resource for Neutron QoS base rule."""
required_service_extension = 'qos'
support_status = support.SupportStatus(version='6.0.0')
PROPERTIES = (
POLICY, TENANT_ID,
) = (
'policy', 'tenant_id',
)
properties_schema = {
POLICY: properties.Schema(
properties.Schema.STRING,
_('ID or name of the QoS policy.'),
required=True,
constraints=[constraints.CustomConstraint('neutron.qos_policy')]
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The owner tenant ID of this rule.')
),
}
def __init__(self, name, json_snippet, stack):
super(QoSRule, self).__init__(name, json_snippet, stack)
self._policy_id = None
@property
def policy_id(self):
if not self._policy_id:
self._policy_id = self.client_plugin().get_qos_policy_id(
self.properties[self.POLICY])
return self._policy_id
class QoSBandwidthLimitRule(QoSRule):
"""A resource for Neutron QoS bandwidth limit rule.
This rule can be associated with QoS policy, and then the policy
can be used by neutron port and network, to provide bandwidth limit
QoS capabilities.
The default policy usage of this resource is limited to
administrators only.
"""
PROPERTIES = (
MAX_BANDWIDTH, MAX_BURST_BANDWIDTH,
) = (
'max_kbps', 'max_burst_kbps',
)
properties_schema = {
MAX_BANDWIDTH: properties.Schema(
properties.Schema.INTEGER,
_('Max bandwidth in kbps.'),
required=True,
update_allowed=True,
constraints=[
constraints.Range(min=0)
]
),
MAX_BURST_BANDWIDTH: properties.Schema(
properties.Schema.INTEGER,
_('Max burst bandwidth in kbps.'),
update_allowed=True,
constraints=[
constraints.Range(min=0)
],
default=0
)
}
properties_schema.update(QoSRule.properties_schema)
def handle_create(self):
props = self.prepare_properties(self.properties,
self.physical_resource_name())
props.pop(self.POLICY)
rule = self.client().create_bandwidth_limit_rule(
self.policy_id,
{'bandwidth_limit_rule': props})['bandwidth_limit_rule']
self.resource_id_set(rule['id'])
def handle_delete(self):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
self.client().delete_bandwidth_limit_rule(
self.resource_id, self.policy_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.client().update_bandwidth_limit_rule(
self.resource_id,
self.policy_id,
{'bandwidth_limit_rule': prop_diff})
def _show_resource(self):
return self.client().show_bandwidth_limit_rule(
self.resource_id, self.policy_id)['bandwidth_limit_rule']
def resource_mapping():
return {
'OS::Neutron::QoSPolicy': QoSPolicy,
'OS::Neutron::QoSBandwidthLimitRule': QoSBandwidthLimitRule
}
| dims/heat | heat/engine/resources/openstack/neutron/qos.py | Python | apache-2.0 | 6,587 |
#!/usr/bin/python3.7
import ssl
import remi.gui as gui
from remi import start, App
class Camera(App):
def __init__(self, *args, **kwargs):
super(Camera, self).__init__(*args)
def video_widgets(self):
width = '300'
height = '300'
self.video = gui.Widget(_type='video')
self.video.style['overflow'] = 'hidden'
self.video.attributes['autoplay'] = 'true'
self.video.attributes['width'] = width
self.video.attributes['height'] = height
def video_start(self, widget, callback_function):
self.execute_javascript("""
var params={};
var frame = 0;
document.video_stop = false;
const video = document.querySelector('video');
video.setAttribute("playsinline", true);
const canvas = document.createElement('canvas');
navigator.mediaDevices.getUserMedia({video: { facingMode: { ideal: "environment" } }, audio: false}).
then((stream) => {video.srcObject = stream});
const render = () => {
if (document.video_stop) { return; }
if (frame==30) {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
params['image']=canvas.toDataURL()
remi.sendCallbackParam('%(id)s','%(callback_function)s',params)
frame = 0;
}
frame+=1;
requestAnimationFrame(render);
}
requestAnimationFrame(render);
"""%{'id':str(id(self)), 'callback_function': str(callback_function)})
def video_stop(self, widget):
self.execute_javascript("""
document.video_stop = true;
const video = document.querySelector('video');
video.srcObject.getTracks()[0].stop();
""")
def process_image(self, **kwargs):
image = kwargs['image']
print('I am here')
### Do whatever you want with the image here
return
def main(self):
self.video_widgets()
screen = [self.video]
start_button = gui.Button('Start Video')
start_button.onclick.do(self.video_start, 'process_image')
screen.append(start_button)
stop_button = gui.Button('Stop Video')
stop_button.onclick.do(self.video_stop)
screen.append(stop_button)
return gui.VBox(children=screen)
if __name__ == "__main__":
start(Camera,
address='0.0.0.0',
port=2020,
multiple_instance=True,
enable_file_cache=True,
start_browser=False,
debug=False)
# certfile='./ssl_keys/fullchain.pem',
# keyfile='./ssl_keys/privkey.pem',
# ssl_version=ssl.PROTOCOL_TLSv1_2,
| dddomodossola/remi | examples/examples_from_contributors/camera.py | Python | apache-2.0 | 2,815 |
# Ivysalt's sentry module. It keeps track of people who join and leave a chat.
# LICENSE: This single module is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
# @category Tools
# @copyright Copyright (c) 2018 dpc
# @version 1.1
# @author dpc
import asyncio
import json
import os
from discord.ext import commands
from cogs.utils import checks
from cogs.utils.dataIO import fileIO
ban_message = "``Omae wa mou shindeiru.``"
joinleave_path = 'data/sentry/joinleave.json'
bans_path = 'data/sentry/bans.json'
def is_int(s):
"""Checks whether the input is an integer."""
try:
int(s)
if float(s) % 1 == 0:
return True
else:
return False
except ValueError:
return False
def check_folders():
folders = ["data/sentry"]
for folder in folders:
if not os.path.exists(folder):
print("Creating " + folder + " folder...")
os.makedirs(folder)
def check_files():
default = {}
if not os.path.isfile(joinleave_path):
print("Creating joinleave.json")
fileIO(joinleave_path, "save", default)
if not os.path.isfile(bans_path):
print("Creating bans.json")
fileIO(bans_path, "save", default)
# validating data
check_folders()
check_files()
with open(joinleave_path) as joinleave_file:
joinleave_data = json.load(joinleave_file)
with open(bans_path) as sentry_file:
sentry_bans = json.load(sentry_file)
def save(path, data):
with open(path, "w") as file:
json.dump(data, file, indent=4)
class Sentry:
"""Adds various sentry commands.
This module was written specifically for a few servers."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def preban(self, ctx, user_id: str):
"""Users added with this command will be banned on sight.
Only admins may use this command."""
# adding user id to the ban list
if is_int(user_id):
if (ctx.message.server.id in sentry_bans):
if (user_id in sentry_bans[ctx.message.server.id]):
yield from self.bot.say("That user is already pre-banned from this server.")
else:
sentry_bans[ctx.message.server.id].append(user_id)
save(bans_path, sentry_bans)
yield from self.bot.say("User has been pre-banned from this server.")
else:
sentry_bans[ctx.message.server.id] = [user_id]
save(bans_path, sentry_bans)
yield from self.bot.say("User has been pre-banned from this server.")
else:
yield from self.bot.say("Improper command usage.")
# checking if user's already in the server, and banning them if they are
for member in ctx.message.server.members:
if (member.id in sentry_bans[member.server.id]):
#yield from self.bot.send_message(member, ban_message)
yield from (asyncio.sleep(2))
yield from self.bot.ban(member, 7)
print("Banning user {0}#{2} with id {3} from {1}...".format(member.name, member.server.name, member.discriminator, member.id))
@commands.command(pass_context=True, no_pm=True, description=
"Note: users that have been already banned will not be unbanned.")
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def unpreban(self, ctx, user_id: str):
"""Users removed with this command will not be banned on sight.
Only admins may use this command."""
if (ctx.message.server.id in sentry_bans):
if (user_id in sentry_bans[ctx.message.server.id]):
sentry_bans[ctx.message.server.id].remove(user_id)
save(bans_path, sentry_bans)
yield from self.bot.say("User removed from pre-ban list on this server.")
else:
yield from self.bot.say("User is not pre-banned on this server.")
else:
yield from self.bot.say("User is not pre-banned on this server.")
@commands.command(pass_context=True, no_pm=True, description=
"Note: users that have been already banned will not be unbanned.")
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def listpreban(self, ctx):
"""Users removed with this command will not be banned on sight.
Only admins may use this command."""
if (ctx.message.server.id in sentry_bans):
if len(sentry_bans[ctx.message.server.id]) > 0:
user_id_list = "```\n=== Prebans in server {} ===\n".format(ctx.message.server.name)
for user_id in sentry_bans[ctx.message.server.id]:
user_id_list += user_id
user_id_list += "\n"
user_id_list += "```"
yield from self.bot.send_message(ctx.message.author, user_id_list)
else:
yield from self.bot.say("No pre-bans on this server.")
else:
yield from self.bot.say("No pre-bans on this server.")
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def unban(self, ctx, *, uid: str = None):
"""Removes a ban from the server.
Only admins may use this command."""
user = yield from self.bot.get_user_info(uid)
yield from self.bot.unban(ctx.message.server, user)
yield from self.bot.say('User {} unbanned.'.format(user.name))
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def setannounce(self, ctx, channel: str = "current"):
"""Sets the channel to announce server's arrivals and parts.\n\nOnly admins may use this command."""
# parses the input as a channel id
if (len(ctx.message.channel_mentions) == 1):
channel_id = ctx.message.channel_mentions[0].id
elif is_int(channel):
channel_id = channel
elif channel == "current":
channel_id = ctx.message.channel
else:
yield from self.bot.say("Sorry, I don't know what channel that is.")
return
#checks if channel is in server
channel_object = ctx.message.server.get_channel(channel_id)
if channel_object is None:
yield from self.bot.say("Sorry, I can't tell what channel that is.")
return
# assigns the announce channel
if (ctx.message.server.id in joinleave_data):
joinleave_data[ctx.message.server.id]["announce_channel"] = channel_id
save(joinleave_path, joinleave_data)
yield from self.bot.say("Saved announce channel {}.".format(channel_object.mention))
else:
joinleave_data[ctx.message.server.id] = {"announce_channel": channel_id, "autoassign_role": "", "join_announce": False, "leave_announce": True}
save(joinleave_path, joinleave_data)
yield from self.bot.say("Saved announce channel {}.".format(channel_object.mention))
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def delannounce(self, ctx):
"""Removes the bot announcements in this server.\n\nOnly admins may use this command."""
# assigns the announce channel
if (ctx.message.server.id in joinleave_data):
joinleave_data[ctx.message.server.id]["announce_channel"] = ""
yield from self.bot.say("Removed announce channel for this server.")
else:
joinleave_data[ctx.message.server.id] = {"announce_channel": "", "autoassign_role": "", "join_announce": False, "leave_announce": True}
yield from self.bot.say("There was no announce channel for this server.")
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def announcejoin(self, ctx, join: bool = False):
"""Sets the bot to announce server's new arrivals.\n\nOnly admins may use this command."""
# assigns the announce channel
if (ctx.message.server.id in joinleave_data):
joinleave_data[ctx.message.server.id]["join_announce"] = join
save(joinleave_path, joinleave_data)
yield from self.bot.say("Setting for join announcement set to ``{}``.".format(join))
else:
yield from self.bot.say("Server data not found. Set an announcement channel with ``?setannounce`` first.")
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def announceleave(self, ctx, leave: bool = True):
"""Sets the bot to announce server's new arrivals.\n\nOnly admins may use this command."""
# assigns the announce channel
if (ctx.message.server.id in joinleave_data):
joinleave_data[ctx.message.server.id]["leave_announce"] = leave
save(joinleave_path, joinleave_data)
yield from self.bot.say("Setting for leave announcement set to ``{}``.".format(leave))
else:
yield from self.bot.say("Server data not found. Set an announcement channel with ``?setannounce`` first.")
@asyncio.coroutine
def on_member_join(self, member):
if (member.server.id in sentry_bans):
if (member.id in sentry_bans[member.server.id]):
#yield from self.bot.send_message(member, ban_message)
yield from (asyncio.sleep(2))
yield from self.bot.ban(member, 7)
print("Banning user {0}#{2} with ID {3} from {1}...".format(member.name, member.server.name, member.discriminator, member.id))
if (member.server.id in joinleave_data):
yield from self.bot.send_message(member.server.get_channel(joinleave_data[member.server.id]["announce_channel"]), "Intruder **{0}#{2}** with ID ``{3}`` sighted! Banning from {1}.".format(member.name, member.server.name, member.discriminator, member.id))
if (member.server.id in joinleave_data) and (joinleave_data[member.server.id]["join_announce"] == True):
yield from self.bot.send_message(member.server.get_channel(joinleave_data[member.server.id]["announce_channel"]),"**{0}#{1}**, with user ID {2}, just joined **{3}**!".format(member.name, member.discriminator, member.id, member.server.name))
@asyncio.coroutine
def on_member_remove(self, member):
if (member.server.id in joinleave_data) and (joinleave_data[member.server.id]["leave_announce"] != False):
yield from self.bot.send_message(member.server.get_channel(joinleave_data[member.server.id]["announce_channel"]),"**{0}#{1}**, with user ID {2}, just left **{3}**!".format(member.name, member.discriminator, member.id, member.server.name))
@asyncio.coroutine
def on_ready(self):
for server in self.bot.servers:
if (server.id in sentry_bans):
for member in server.members:
if (member.id in sentry_bans[server.id]):
#yield from self.bot.send_message(member, ban_message)
yield from (asyncio.sleep(2))
yield from self.bot.ban(member, 7)
print("Banning user {0}#{2} with ID {3} from {1}...".format(member.name, server.name, member.discriminator, member.id))
def setup(bot):
check_folders()
check_files()
bot.add_cog(Sentry(bot))
| retrodpc/Bulbaspot-Cogs | sentry/sentry.py | Python | apache-2.0 | 11,849 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class Untitled(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost/"
self.verificationErrors = []
self.accept_next_alert = True
def test_untitled(self):
driver = self.driver
driver.get(self.base_url + "/php4dvd/")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("submit").click()
driver.find_element_by_css_selector("img[alt=\"Add movie\"]").click()
driver.find_element_by_name("name").clear()
driver.find_element_by_name("name").send_keys() #send_keys("aaaaaaaaaaaaaaaaa")
driver.find_element_by_name("year").clear()
driver.find_element_by_name("year").send_keys("1977")
driver.find_element_by_css_selector("img[alt=\"Save\"]").click()
driver.find_element_by_id("submit").click()
driver.find_element_by_css_selector("img[alt=\"Own\"]").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| gena701/Seleniun_php4dvd_lesson2_Rovinsky | php4dvd/php4dvd_negative.py | Python | apache-2.0 | 2,371 |
# -*- test-case-name: txdav.who.test.test_augment -*-
##
# Copyright (c) 2013-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Augmenting Directory Service
"""
__all__ = [
"AugmentedDirectoryService",
]
import time
from zope.interface import implementer
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.directory.augment import AugmentRecord
from twext.python.log import Logger
from twext.who.directory import DirectoryRecord
from twext.who.directory import DirectoryService as BaseDirectoryService
from twext.who.idirectory import (
IDirectoryService, RecordType, FieldName as BaseFieldName, NotAllowedError
)
from twext.who.util import ConstantsContainer
from txdav.common.idirectoryservice import IStoreDirectoryService
from txdav.who.directory import (
CalendarDirectoryRecordMixin, CalendarDirectoryServiceMixin,
)
from txdav.who.idirectory import (
AutoScheduleMode, FieldName, RecordType as CalRecordType
)
log = Logger()
def timed(f):
"""
A decorator which keeps track of the wrapped function's call count and
total duration
"""
def recordTiming(result, key, startTime):
"""
Figures out how much time to add to the total time spent within the
method identified by key and stores that in the timings dict.
@param result: the result of the wrapped method
@param timings: the dictionary to store timings in
@type timings: C{dict}
@param key: the method name
@type key: C{str}
@param startTime: the start time of the call in seconds
@type startTime: C{float}
"""
AugmentedDirectoryService._addTiming(key, time.time() - startTime)
return result
def timingWrapper(self, *args, **kwds):
"""
Records the start time of the call and the method's name
"""
startTime = time.time()
d = f(self, *args, **kwds)
d.addBoth(recordTiming, f.func_name, startTime)
return d
return timingWrapper
@implementer(IDirectoryService, IStoreDirectoryService)
class AugmentedDirectoryService(
BaseDirectoryService, CalendarDirectoryServiceMixin
):
"""
Augmented directory service.
This is a directory service that wraps an L{IDirectoryService} and augments
directory records with additional or modified fields.
"""
fieldName = ConstantsContainer((
BaseFieldName,
FieldName,
))
_timings = {}
def __init__(self, directory, store, augmentDB):
BaseDirectoryService.__init__(self, directory.realmName)
self._directory = directory
self._store = store
self._augmentDB = augmentDB
# An LDAP DS has extra info to expose via the dashboard
# This is assigned in buildDirectory()
self._ldapDS = None
@classmethod
def _addTiming(cls, key, duration):
if key not in cls._timings:
cls._timings[key] = (0, 0.0)
count, timeSpent = cls._timings[key]
count += 1
timeSpent += duration
cls._timings[key] = (count, timeSpent)
def flush(self):
return self._directory.flush()
def stats(self):
results = {}
results.update(self._timings)
# An LDAP DS has extra info to expose via the dashboard
if self._ldapDS is not None:
results.update(self._ldapDS.poolStats)
return succeed(results)
@property
def recordType(self):
# Defer to the directory service we're augmenting
return self._directory.recordType
def recordTypes(self):
# Defer to the directory service we're augmenting
return self._directory.recordTypes()
@inlineCallbacks
def recordsFromExpression(
self, expression, recordTypes=None,
limitResults=None, timeoutSeconds=None
):
records = yield self._directory.recordsFromExpression(
expression, recordTypes=recordTypes,
limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@inlineCallbacks
def recordsWithFieldValue(
self, fieldName, value, limitResults=None, timeoutSeconds=None
):
records = yield self._directory.recordsWithFieldValue(
fieldName, value,
limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@timed
@inlineCallbacks
def recordWithUID(self, uid, timeoutSeconds=None):
# MOVE2WHO, REMOVE THIS:
if not isinstance(uid, unicode):
# log.warn("Need to change uid to unicode")
uid = uid.decode("utf-8")
record = yield self._directory.recordWithUID(
uid, timeoutSeconds=timeoutSeconds
)
record = yield self._augment(record)
returnValue(record)
@timed
@inlineCallbacks
def recordWithGUID(self, guid, timeoutSeconds=None):
record = yield self._directory.recordWithGUID(
guid, timeoutSeconds=timeoutSeconds
)
record = yield self._augment(record)
returnValue(record)
@timed
@inlineCallbacks
def recordsWithRecordType(
self, recordType, limitResults=None, timeoutSeconds=None
):
records = yield self._directory.recordsWithRecordType(
recordType, limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@timed
@inlineCallbacks
def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):
# MOVE2WHO, REMOVE THIS:
if not isinstance(shortName, unicode):
# log.warn("Need to change shortName to unicode")
shortName = shortName.decode("utf-8")
record = yield self._directory.recordWithShortName(
recordType, shortName, timeoutSeconds=timeoutSeconds
)
record = yield self._augment(record)
returnValue(record)
@timed
@inlineCallbacks
def recordsWithEmailAddress(
self, emailAddress, limitResults=None, timeoutSeconds=None
):
# MOVE2WHO, REMOVE THIS:
if not isinstance(emailAddress, unicode):
# log.warn("Need to change emailAddress to unicode")
emailAddress = emailAddress.decode("utf-8")
records = yield self._directory.recordsWithEmailAddress(
emailAddress,
limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@timed
def recordWithCalendarUserAddress(self, *args, **kwds):
return CalendarDirectoryServiceMixin.recordWithCalendarUserAddress(
self, *args, **kwds
)
@timed
def recordsMatchingTokens(self, *args, **kwds):
return CalendarDirectoryServiceMixin.recordsMatchingTokens(
self, *args, **kwds
)
@timed
def recordsMatchingFields(self, *args, **kwds):
return CalendarDirectoryServiceMixin.recordsMatchingFields(
self, *args, **kwds
)
@timed
@inlineCallbacks
def updateRecords(self, records, create=False):
"""
Pull out the augmented fields from each record, apply those to the
augments database, then update the base records.
"""
baseRecords = []
augmentRecords = []
for record in records:
# Split out the base fields from the augment fields
baseFields, augmentFields = self._splitFields(record)
# Ignore groups for now
if augmentFields and record.recordType != RecordType.group:
# Create an AugmentRecord
autoScheduleMode = {
AutoScheduleMode.none: "none",
AutoScheduleMode.accept: "accept-always",
AutoScheduleMode.decline: "decline-always",
AutoScheduleMode.acceptIfFree: "accept-if-free",
AutoScheduleMode.declineIfBusy: "decline-if-busy",
AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
}.get(augmentFields.get(FieldName.autoScheduleMode, None), None)
kwargs = {
"uid": record.uid,
"autoScheduleMode": autoScheduleMode,
}
if FieldName.hasCalendars in augmentFields:
kwargs["enabledForCalendaring"] = augmentFields[FieldName.hasCalendars]
if FieldName.hasContacts in augmentFields:
kwargs["enabledForAddressBooks"] = augmentFields[FieldName.hasContacts]
if FieldName.loginAllowed in augmentFields:
kwargs["enabledForLogin"] = augmentFields[FieldName.loginAllowed]
if FieldName.autoAcceptGroup in augmentFields:
kwargs["autoAcceptGroup"] = augmentFields[FieldName.autoAcceptGroup]
if FieldName.serviceNodeUID in augmentFields:
kwargs["serverID"] = augmentFields[FieldName.serviceNodeUID]
augmentRecord = AugmentRecord(**kwargs)
augmentRecords.append(augmentRecord)
# Create new base records:
baseRecords.append(DirectoryRecord(self._directory, record._baseRecord.fields if hasattr(record, "_baseRecord") else baseFields))
# Apply the augment records
if augmentRecords:
yield self._augmentDB.addAugmentRecords(augmentRecords)
# Apply the base records
if baseRecords:
try:
yield self._directory.updateRecords(baseRecords, create=create)
except NotAllowedError:
pass
def _splitFields(self, record):
"""
Returns a tuple of two dictionaries; the first contains all the non
augment fields, and the second contains all the augment fields.
"""
if record is None:
return None
augmentFields = {}
baseFields = record.fields.copy()
for field in (
FieldName.loginAllowed,
FieldName.hasCalendars, FieldName.hasContacts,
FieldName.autoScheduleMode, FieldName.autoAcceptGroup,
FieldName.serviceNodeUID
):
if field in baseFields:
augmentFields[field] = baseFields[field]
del baseFields[field]
return (baseFields, augmentFields)
@inlineCallbacks
def removeRecords(self, uids):
yield self._augmentDB.removeAugmentRecords(uids)
yield self._directory.removeRecords(uids)
def _assignToField(self, fields, name, value):
"""
Assign a value to a field only if not already present in fields.
"""
field = self.fieldName.lookupByName(name)
if field not in fields:
fields[field] = value
@inlineCallbacks
def _augment(self, record):
if record is None:
returnValue(None)
augmentRecord = yield self._augmentDB.getAugmentRecord(
record.uid,
self.recordTypeToOldName(record.recordType)
)
if augmentRecord is None:
# Augments does not know about this record type, so return
# the original record
returnValue(record)
fields = record.fields.copy()
if augmentRecord:
if record.recordType == RecordType.group:
self._assignToField(fields, "hasCalendars", False)
self._assignToField(fields, "hasContacts", False)
else:
self._assignToField(
fields, "hasCalendars",
augmentRecord.enabledForCalendaring
)
self._assignToField(
fields, "hasContacts",
augmentRecord.enabledForAddressBooks
)
# In the case of XML augments, a missing auto-schedule-mode
# element has the same meaning an element with a value of "default"
# in which case augmentRecord.autoScheduleMode = "default". On
# the record we're augmenting, "default" mode means autoScheduleMode
# gets set to None (distinct from AutoScheduleMode.none!),
# which gets swapped for config.Scheduling.Options.AutoSchedule.DefaultMode
# in checkAttendeeAutoReply().
# ...Except for locations/resources which will default to automatic
autoScheduleMode = {
"none": AutoScheduleMode.none,
"accept-always": AutoScheduleMode.accept,
"decline-always": AutoScheduleMode.decline,
"accept-if-free": AutoScheduleMode.acceptIfFree,
"decline-if-busy": AutoScheduleMode.declineIfBusy,
"automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
}.get(augmentRecord.autoScheduleMode, None)
# Resources/Locations default to automatic
if record.recordType in (
CalRecordType.location,
CalRecordType.resource
):
if autoScheduleMode is None:
autoScheduleMode = AutoScheduleMode.acceptIfFreeDeclineIfBusy
self._assignToField(
fields, "autoScheduleMode",
autoScheduleMode
)
if augmentRecord.autoAcceptGroup is not None:
self._assignToField(
fields, "autoAcceptGroup",
augmentRecord.autoAcceptGroup.decode("utf-8")
)
self._assignToField(
fields, "loginAllowed",
augmentRecord.enabledForLogin
)
self._assignToField(
fields, "serviceNodeUID",
augmentRecord.serverID.decode("utf-8")
)
else:
self._assignToField(fields, "hasCalendars", False)
self._assignToField(fields, "hasContacts", False)
self._assignToField(fields, "loginAllowed", False)
# print("Augmented fields", fields)
# Clone to a new record with the augmented fields
augmentedRecord = AugmentedDirectoryRecord(self, record, fields)
returnValue(augmentedRecord)
@inlineCallbacks
def setAutoScheduleMode(self, record, autoScheduleMode):
augmentRecord = yield self._augmentDB.getAugmentRecord(
record.uid,
self.recordTypeToOldName(record.recordType)
)
if augmentRecord is not None:
autoScheduleMode = {
AutoScheduleMode.none: "none",
AutoScheduleMode.accept: "accept-always",
AutoScheduleMode.decline: "decline-always",
AutoScheduleMode.acceptIfFree: "accept-if-free",
AutoScheduleMode.declineIfBusy: "decline-if-busy",
AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
}.get(autoScheduleMode)
augmentRecord.autoScheduleMode = autoScheduleMode
yield self._augmentDB.addAugmentRecords([augmentRecord])
class AugmentedDirectoryRecord(DirectoryRecord, CalendarDirectoryRecordMixin):
"""
Augmented directory record.
"""
def __init__(self, service, baseRecord, augmentedFields):
DirectoryRecord.__init__(self, service, augmentedFields)
CalendarDirectoryRecordMixin.__init__(self)
self._baseRecord = baseRecord
@timed
@inlineCallbacks
def members(self):
augmented = []
records = yield self._baseRecord.members()
for record in records:
augmented.append((yield self.service._augment(record)))
returnValue(augmented)
def addMembers(self, memberRecords):
return self._baseRecord.addMembers(memberRecords)
def removeMembers(self, memberRecords):
return self._baseRecord.removeMembers(memberRecords)
def setMembers(self, memberRecords):
return self._baseRecord.setMembers(memberRecords)
@timed
@inlineCallbacks
def groups(self):
augmented = []
def _groupUIDsFor(txn):
return txn.groupUIDsFor(self.uid)
groupUIDs = yield self.service._store.inTransaction(
"AugmentedDirectoryRecord.groups",
_groupUIDsFor
)
for groupUID in groupUIDs:
groupRecord = yield self.service.recordWithUID(
groupUID
)
if groupRecord:
augmented.append((yield self.service._augment(groupRecord)))
returnValue(augmented)
@timed
def verifyPlaintextPassword(self, password):
return self._baseRecord.verifyPlaintextPassword(password)
@timed
def verifyHTTPDigest(self, *args):
return self._baseRecord.verifyHTTPDigest(*args)
@timed
def accessForRecord(self, record):
return self._baseRecord.accessForRecord(record)
| macosforge/ccs-calendarserver | txdav/who/augment.py | Python | apache-2.0 | 18,083 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from common import find_target_items
if len(sys.argv) != 3:
print("Find the value keyword in all pairs")
print(("Usage: ", sys.argv[0], "[input] [keyword]"))
exit(1)
find_target_items(sys.argv[1], sys.argv[2])
| BYVoid/OpenCC | data/scripts/find_target.py | Python | apache-2.0 | 282 |
#!/usr/bin/env python
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "') and u='c'"
| jacoboariza/BotIntendHub | yahooWeatherForecast.py | Python | apache-2.0 | 335 |
from __future__ import unicode_literals
import datetime
import django
from django.utils.timezone import now
from django.test import TransactionTestCase
from mock import call, patch
from job.seed.metadata import SeedMetadata
from source.configuration.source_data_file import SourceDataFileParseSaver
from storage.models import ScaleFile, Workspace
from util.parse import parse_datetime
class TestSourceDataFileParseSaverSaveParseResults(TransactionTestCase):
def setUp(self):
django.setup()
self.workspace = Workspace.objects.create(name='Test workspace')
self.file_name_1 = 'my_file.txt'
self.media_type_1 = 'text/plain'
self.source_file_1 = ScaleFile.objects.create(file_name=self.file_name_1, file_type='SOURCE',
media_type=self.media_type_1, file_size=10, data_type_tags=['Dummy'],
file_path='the_path', workspace=self.workspace)
self.file_name_2 = 'my_file.json'
self.media_type_2 = 'application/json'
self.source_file_2 = ScaleFile.objects.create(file_name=self.file_name_2, file_type='SOURCE',
media_type=self.media_type_2, file_size=10, data_type_tags=['Dummy'],
file_path='the_path', workspace=self.workspace)
self.extra_source_file_id = 99999
@patch('source.configuration.source_data_file.SourceFile.objects.save_parse_results')
def test_successful(self, mock_save):
"""Tests calling SourceDataFileParseSaver.save_parse_results() successfully"""
geo_json = {'type': 'Feature'}
started = now()
ended = started + datetime.timedelta(days=1)
# quick hack to give these a valid timezone. Easier than creating a TZ object since we don't really care about the time for this test.
started = parse_datetime(started.isoformat() + "Z")
ended = parse_datetime(ended.isoformat() + "Z")
file_ids = [self.source_file_1.id, self.source_file_2.id, self.extra_source_file_id]
parse_results = {self.file_name_1: (geo_json, started, None, [], None),
self.file_name_2: (None, None, ended, [], None),
'FILE_WITH_NO_SOURCE_FILE_MODEL': (None, None, None, None, None)}
SourceDataFileParseSaver().save_parse_results(parse_results, file_ids)
calls = [call(self.source_file_1.id, geo_json, started, None, [], None),
call(self.source_file_2.id, None, None, ended, [], None)]
self.assertEqual(mock_save.call_count, 2)
mock_save.assert_has_calls(calls, any_order=True)
@patch('source.configuration.source_data_file.SourceFile.objects.save_parse_results')
def test_successful_v6(self, mock_save):
"""Tests calling SourceDataFileParseSaver.save_parse_results_v6() successfully"""
started = '2018-06-01T00:00:00Z'
ended = '2018-06-01T01:00:00Z'
types = ['one', 'two', 'three']
new_workspace_path = 'awful/path'
data = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [0, 1]
},
'properties':
{
'dataStarted': started,
'dataEnded': ended,
'dataTypes': types,
'newWorkspacePath': new_workspace_path
}
}
metadata = {self.source_file_1.id: SeedMetadata.metadata_from_json(data, do_validate=False)}
calls = [call(self.source_file_1.id, data, parse_datetime(started), parse_datetime(ended), types, new_workspace_path)]
SourceDataFileParseSaver().save_parse_results_v6(metadata)
self.assertEqual(mock_save.call_count, 1)
mock_save.assert_has_calls(calls, any_order=True)
| ngageoint/scale | scale/source/test/configuration/test_source_data_file.py | Python | apache-2.0 | 4,106 |
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
@decorators.related_bug('1420008')
@decorators.idempotent_id('646a6eaa-135f-4493-a0af-12583021224e')
def test_create_image_owner_param(self):
# NOTE: Create image with owner different from tenant owner by
# using "owner" parameter requires an admin privileges.
random_id = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id)
self.addCleanup(self.admin_client.delete_image, image['id'])
image_info = self.admin_client.show_image(image['id'])
self.assertEqual(random_id, image_info['owner'])
@decorators.related_bug('1420008')
@decorators.idempotent_id('525ba546-10ef-4aad-bba1-1858095ce553')
def test_update_image_owner_param(self):
random_id_1 = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id_1)
self.addCleanup(self.admin_client.delete_image, image['id'])
created_image_info = self.admin_client.show_image(image['id'])
random_id_2 = data_utils.rand_uuid_hex()
self.admin_client.update_image(
image['id'], [dict(replace="/owner", value=random_id_2)])
updated_image_info = self.admin_client.show_image(image['id'])
self.assertEqual(random_id_2, updated_image_info['owner'])
self.assertNotEqual(created_image_info['owner'],
updated_image_info['owner'])
| masayukig/tempest | tempest/api/image/v2/admin/test_images.py | Python | apache-2.0 | 2,341 |
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.framework import arg_scope
from tensormate.graph import TfGgraphBuilder
class ImageGraphBuilder(TfGgraphBuilder):
def __init__(self, scope=None, device=None, plain=False, data_format="NHWC",
data_format_ops=(layers.conv2d,
layers.convolution2d,
layers.convolution2d_transpose,
layers.convolution2d_in_plane,
layers.convolution2d_transpose,
layers.conv2d_in_plane,
layers.conv2d_transpose,
layers.separable_conv2d,
layers.separable_convolution2d,
layers.avg_pool2d,
layers.max_pool2d,
layers.batch_norm)):
super(ImageGraphBuilder, self).__init__(scope=scope, device=device, plain=plain)
self.data_format = data_format
self.data_format_ops = data_format_ops if data_format_ops is not None else []
def _call_body(self, *args, **kwargs):
# is_training = kwargs.get("is_training", True)
# reuse = self.ref_count > 0
with tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE):
with arg_scope(self.data_format_ops, data_format=self.data_format):
if self._device is None:
output = self._build(*args, **kwargs)
else:
with tf.device(self._device):
output = self._build(*args, **kwargs)
return output
| songgc/tensormate | tensormate/graph/image_graph.py | Python | apache-2.0 | 1,742 |
__author__ = 'sianwahl'
from string import Template
class NedGenerator:
def __init__(self, number_of_channels):
self.number_of_channels = number_of_channels
def generate(self):
return self._generate_tuplefeeder_ned(), self._generate_m2etis_ned()
def _generate_tuplefeeder_ned(self):
template = """
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program. If not, see http://www.gnu.org/licenses/.
//
package m2etis.applications.TupleFeeder;
import oversim.common.BaseApp;
import oversim.common.ITier;
simple TupleFeeder extends BaseApp
{
parameters:
@class(TupleFeeder);
int largestKey; // largest key we can pick
int numSubs;
int numPubs;
int numPubSubs;
int numRend;
int channelCount;
double stopAvg;
int waitForSubscribe @unit(s);
int waitForPublish @unit(s);
$channel_specific_parameters
}
module TupleFeederModules like ITier
{
parameters:
@display("i=block/segm");
gates:
input from_lowerTier; // gate from the lower tier
input from_upperTier; // gate from the upper tier
output to_lowerTier; // gate to the lower tier
output to_upperTier; // gate to the upper tier
input trace_in; // gate for trace file commands
input udpIn;
output udpOut;
input tcpIn;
output tcpOut;
submodules:
tupleFeeder: TupleFeeder;
connections allowunconnected:
from_lowerTier --> tupleFeeder.from_lowerTier;
to_lowerTier <-- tupleFeeder.to_lowerTier;
trace_in --> tupleFeeder.trace_in;
udpIn --> tupleFeeder.udpIn;
udpOut <-- tupleFeeder.udpOut;
}
"""
channel_specific_parameters = ""
for i in range(0, self.number_of_channels):
channel_specific_parameters += "int numToSend_" + str(i) + ";\n\t"
channel_specific_parameters += "int burstAmount_" + str(i) + ";\n\t"
channel_specific_parameters += "int burstFrequency_" + str(i) + " @unit(s);\n\t"
channel_specific_parameters += "int burstDuration_" + str(i) + " @unit(s);\n\t"
channel_specific_parameters += "int chanceToUnsubscribe_" + str(i) + ";\n\t"
channel_specific_parameters += "int timeToUnsubscribe_" + str(i) + " @unit(s);\n\t"
channel_specific_parameters += "int timeToSubscribe_" + str(i) + " @unit(s);\n\t"
channel_specific_parameters += "int dropChance_" + str(i) + ";\n\t"
channel_specific_parameters += "bool compensateDrop_" + str(i) + ";\n\t"
channel_specific_parameters += "double fluctuation_" + str(i) + ";\n\t"
template_prepared = Template(template)
result = template_prepared.substitute(
channel_specific_parameters=channel_specific_parameters
)
return result
def _generate_m2etis_ned(self):
template = """
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program. If not, see http://www.gnu.org/licenses/.
//
package m2etis.middleware;
import oversim.common.BaseApp;
import oversim.common.ITier;
//
// TODO auto-generated type
//
simple M2etisAdapter extends BaseApp
{
parameters:
@class(M2etisAdapter);
$disable_overlays
int packetSize @unit(B);
int queueSize @unit(B);
int channelCount;
int downstream @unit(bps);
int upstream @unit(bps);
int headerSize @unit(B);
int startRoot;
int endRoot;
int rendezvousNode;
double stopAvg;
double simulationResolution @unit(s);
bool queueDisabled;
}
module M2etisPubSub like ITier
{
gates:
input udpIn; // gate from the UDP layer
output udpOut; // gate to the UDP layer
input from_lowerTier; // gate from the lower tier
input from_upperTier; // gate from the upper tier
output to_lowerTier; // gate to the lower tier
output to_upperTier; // gate to the upper tier
input trace_in; // gate for trace file commands
input tcpIn; // gate from the TCP layer
output tcpOut; // gate to the TCP layer
submodules:
m2etis: M2etisAdapter;
connections allowunconnected:
from_lowerTier --> m2etis.from_lowerTier;
to_lowerTier <-- m2etis.to_lowerTier;
from_upperTier --> m2etis.from_upperTier;
to_upperTier <-- m2etis.to_upperTier;
udpIn --> m2etis.udpIn;
udpOut <-- m2etis.udpOut;
}
"""
disable_overlays = ""
for i in range(0, self.number_of_channels):
disable_overlays += "bool disableOverlay_" + str(i) + ";\n\t"
template_prepared = Template(template)
result = template_prepared.substitute(
disable_overlays=disable_overlays
)
return result
| ClockworkOrigins/m2etis | configurator/configurator/NedGenerator.py | Python | apache-2.0 | 6,119 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resources for mapping between user emails and host usernames/owners."""
from upvote.gae import settings
def UsernameToEmail(username):
if '@' in username:
return username
return '@'.join((username, settings.USER_EMAIL_DOMAIN))
def EmailToUsername(email):
return email.partition('@')[0]
| google/upvote_py2 | upvote/gae/utils/user_utils.py | Python | apache-2.0 | 901 |
"""Class to hold all camera accessories."""
import asyncio
from datetime import timedelta
import logging
from haffmpeg.core import HAFFmpeg
from pyhap.camera import (
VIDEO_CODEC_PARAM_LEVEL_TYPES,
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES,
Camera as PyhapCamera,
)
from pyhap.const import CATEGORY_CAMERA
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.util import get_local_ip
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_MOTION_DETECTED,
CHAR_MUTE,
CHAR_PROGRAMMABLE_SWITCH_EVENT,
CONF_AUDIO_CODEC,
CONF_AUDIO_MAP,
CONF_AUDIO_PACKET_SIZE,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_MAX_FPS,
CONF_MAX_HEIGHT,
CONF_MAX_WIDTH,
CONF_STREAM_ADDRESS,
CONF_STREAM_COUNT,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
CONF_VIDEO_MAP,
CONF_VIDEO_PACKET_SIZE,
DEFAULT_AUDIO_CODEC,
DEFAULT_AUDIO_MAP,
DEFAULT_AUDIO_PACKET_SIZE,
DEFAULT_MAX_FPS,
DEFAULT_MAX_HEIGHT,
DEFAULT_MAX_WIDTH,
DEFAULT_STREAM_COUNT,
DEFAULT_SUPPORT_AUDIO,
DEFAULT_VIDEO_CODEC,
DEFAULT_VIDEO_MAP,
DEFAULT_VIDEO_PACKET_SIZE,
SERV_DOORBELL,
SERV_MOTION_SENSOR,
SERV_SPEAKER,
SERV_STATELESS_PROGRAMMABLE_SWITCH,
)
from .img_util import scale_jpeg_camera_image
from .util import pid_is_alive
_LOGGER = logging.getLogger(__name__)
DOORBELL_SINGLE_PRESS = 0
DOORBELL_DOUBLE_PRESS = 1
DOORBELL_LONG_PRESS = 2
VIDEO_OUTPUT = (
"-map {v_map} -an "
"-c:v {v_codec} "
"{v_profile}"
"-tune zerolatency -pix_fmt yuv420p "
"-r {fps} "
"-b:v {v_max_bitrate}k -bufsize {v_bufsize}k -maxrate {v_max_bitrate}k "
"-payload_type 99 "
"-ssrc {v_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} "
"srtp://{address}:{v_port}?rtcpport={v_port}&"
"localrtcpport={v_port}&pkt_size={v_pkt_size}"
)
AUDIO_OUTPUT = (
"-map {a_map} -vn "
"-c:a {a_encoder} "
"{a_application}"
"-ac 1 -ar {a_sample_rate}k "
"-b:a {a_max_bitrate}k -bufsize {a_bufsize}k "
"-payload_type 110 "
"-ssrc {a_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {a_srtp_key} "
"srtp://{address}:{a_port}?rtcpport={a_port}&"
"localrtcpport={a_port}&pkt_size={a_pkt_size}"
)
SLOW_RESOLUTIONS = [
(320, 180, 15),
(320, 240, 15),
]
RESOLUTIONS = [
(320, 180),
(320, 240),
(480, 270),
(480, 360),
(640, 360),
(640, 480),
(1024, 576),
(1024, 768),
(1280, 720),
(1280, 960),
(1920, 1080),
]
VIDEO_PROFILE_NAMES = ["baseline", "main", "high"]
FFMPEG_WATCH_INTERVAL = timedelta(seconds=5)
FFMPEG_WATCHER = "ffmpeg_watcher"
FFMPEG_PID = "ffmpeg_pid"
SESSION_ID = "session_id"
CONFIG_DEFAULTS = {
CONF_SUPPORT_AUDIO: DEFAULT_SUPPORT_AUDIO,
CONF_MAX_WIDTH: DEFAULT_MAX_WIDTH,
CONF_MAX_HEIGHT: DEFAULT_MAX_HEIGHT,
CONF_MAX_FPS: DEFAULT_MAX_FPS,
CONF_AUDIO_CODEC: DEFAULT_AUDIO_CODEC,
CONF_AUDIO_MAP: DEFAULT_AUDIO_MAP,
CONF_VIDEO_MAP: DEFAULT_VIDEO_MAP,
CONF_VIDEO_CODEC: DEFAULT_VIDEO_CODEC,
CONF_AUDIO_PACKET_SIZE: DEFAULT_AUDIO_PACKET_SIZE,
CONF_VIDEO_PACKET_SIZE: DEFAULT_VIDEO_PACKET_SIZE,
CONF_STREAM_COUNT: DEFAULT_STREAM_COUNT,
}
@TYPES.register("Camera")
class Camera(HomeAccessory, PyhapCamera):
"""Generate a Camera accessory."""
def __init__(self, hass, driver, name, entity_id, aid, config):
"""Initialize a Camera accessory object."""
self._ffmpeg = hass.data[DATA_FFMPEG]
for config_key in CONFIG_DEFAULTS:
if config_key not in config:
config[config_key] = CONFIG_DEFAULTS[config_key]
max_fps = config[CONF_MAX_FPS]
max_width = config[CONF_MAX_WIDTH]
max_height = config[CONF_MAX_HEIGHT]
resolutions = [
(w, h, fps)
for w, h, fps in SLOW_RESOLUTIONS
if w <= max_width and h <= max_height and fps < max_fps
] + [
(w, h, max_fps)
for w, h in RESOLUTIONS
if w <= max_width and h <= max_height
]
video_options = {
"codec": {
"profiles": [
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["BASELINE"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["MAIN"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["HIGH"],
],
"levels": [
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_1"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_2"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE4_0"],
],
},
"resolutions": resolutions,
}
audio_options = {
"codecs": [
{"type": "OPUS", "samplerate": 24},
{"type": "OPUS", "samplerate": 16},
]
}
stream_address = config.get(CONF_STREAM_ADDRESS, get_local_ip())
options = {
"video": video_options,
"audio": audio_options,
"address": stream_address,
"srtp": True,
"stream_count": config[CONF_STREAM_COUNT],
}
super().__init__(
hass,
driver,
name,
entity_id,
aid,
config,
category=CATEGORY_CAMERA,
options=options,
)
self._char_motion_detected = None
self.linked_motion_sensor = self.config.get(CONF_LINKED_MOTION_SENSOR)
if self.linked_motion_sensor:
state = self.hass.states.get(self.linked_motion_sensor)
if state:
serv_motion = self.add_preload_service(SERV_MOTION_SENSOR)
self._char_motion_detected = serv_motion.configure_char(
CHAR_MOTION_DETECTED, value=False
)
self._async_update_motion_state(state)
self._char_doorbell_detected = None
self._char_doorbell_detected_switch = None
self.linked_doorbell_sensor = self.config.get(CONF_LINKED_DOORBELL_SENSOR)
if self.linked_doorbell_sensor:
state = self.hass.states.get(self.linked_doorbell_sensor)
if state:
serv_doorbell = self.add_preload_service(SERV_DOORBELL)
self.set_primary_service(serv_doorbell)
self._char_doorbell_detected = serv_doorbell.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT, value=0,
)
serv_stateless_switch = self.add_preload_service(
SERV_STATELESS_PROGRAMMABLE_SWITCH
)
self._char_doorbell_detected_switch = serv_stateless_switch.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT,
value=0,
valid_values={"SinglePress": DOORBELL_SINGLE_PRESS},
)
serv_speaker = self.add_preload_service(SERV_SPEAKER)
serv_speaker.configure_char(CHAR_MUTE, value=0)
self._async_update_doorbell_state(state)
async def run_handler(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
if self._char_motion_detected:
async_track_state_change_event(
self.hass,
[self.linked_motion_sensor],
self._async_update_motion_state_event,
)
if self._char_doorbell_detected:
async_track_state_change_event(
self.hass,
[self.linked_doorbell_sensor],
self._async_update_doorbell_state_event,
)
await super().run_handler()
@callback
def _async_update_motion_state_event(self, event):
"""Handle state change event listener callback."""
self._async_update_motion_state(event.data.get("new_state"))
@callback
def _async_update_motion_state(self, new_state):
"""Handle link motion sensor state change to update HomeKit value."""
if not new_state:
return
detected = new_state.state == STATE_ON
if self._char_motion_detected.value == detected:
return
self._char_motion_detected.set_value(detected)
_LOGGER.debug(
"%s: Set linked motion %s sensor to %d",
self.entity_id,
self.linked_motion_sensor,
detected,
)
@callback
def _async_update_doorbell_state_event(self, event):
"""Handle state change event listener callback."""
self._async_update_doorbell_state(event.data.get("new_state"))
@callback
def _async_update_doorbell_state(self, new_state):
"""Handle link doorbell sensor state change to update HomeKit value."""
if not new_state:
return
if new_state.state == STATE_ON:
self._char_doorbell_detected.set_value(DOORBELL_SINGLE_PRESS)
self._char_doorbell_detected_switch.set_value(DOORBELL_SINGLE_PRESS)
_LOGGER.debug(
"%s: Set linked doorbell %s sensor to %d",
self.entity_id,
self.linked_doorbell_sensor,
DOORBELL_SINGLE_PRESS,
)
@callback
def async_update_state(self, new_state):
"""Handle state change to update HomeKit value."""
pass # pylint: disable=unnecessary-pass
async def _async_get_stream_source(self):
"""Find the camera stream source url."""
stream_source = self.config.get(CONF_STREAM_SOURCE)
if stream_source:
return stream_source
try:
stream_source = await self.hass.components.camera.async_get_stream_source(
self.entity_id
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Failed to get stream source - this could be a transient error or your camera might not be compatible with HomeKit yet"
)
if stream_source:
self.config[CONF_STREAM_SOURCE] = stream_source
return stream_source
async def start_stream(self, session_info, stream_config):
"""Start a new stream with the given configuration."""
_LOGGER.debug(
"[%s] Starting stream with the following parameters: %s",
session_info["id"],
stream_config,
)
input_source = await self._async_get_stream_source()
if not input_source:
_LOGGER.error("Camera has no stream source")
return False
if "-i " not in input_source:
input_source = "-i " + input_source
video_profile = ""
if self.config[CONF_VIDEO_CODEC] != "copy":
video_profile = (
"-profile:v "
+ VIDEO_PROFILE_NAMES[
int.from_bytes(stream_config["v_profile_id"], byteorder="big")
]
+ " "
)
audio_application = ""
if self.config[CONF_AUDIO_CODEC] == "libopus":
audio_application = "-application lowdelay "
output_vars = stream_config.copy()
output_vars.update(
{
"v_profile": video_profile,
"v_bufsize": stream_config["v_max_bitrate"] * 4,
"v_map": self.config[CONF_VIDEO_MAP],
"v_pkt_size": self.config[CONF_VIDEO_PACKET_SIZE],
"v_codec": self.config[CONF_VIDEO_CODEC],
"a_bufsize": stream_config["a_max_bitrate"] * 4,
"a_map": self.config[CONF_AUDIO_MAP],
"a_pkt_size": self.config[CONF_AUDIO_PACKET_SIZE],
"a_encoder": self.config[CONF_AUDIO_CODEC],
"a_application": audio_application,
}
)
output = VIDEO_OUTPUT.format(**output_vars)
if self.config[CONF_SUPPORT_AUDIO]:
output = output + " " + AUDIO_OUTPUT.format(**output_vars)
_LOGGER.debug("FFmpeg output settings: %s", output)
stream = HAFFmpeg(self._ffmpeg.binary, loop=self.driver.loop)
opened = await stream.open(
cmd=[], input_source=input_source, output=output, stdout_pipe=False
)
if not opened:
_LOGGER.error("Failed to open ffmpeg stream")
return False
_LOGGER.info(
"[%s] Started stream process - PID %d",
session_info["id"],
stream.process.pid,
)
session_info["stream"] = stream
session_info[FFMPEG_PID] = stream.process.pid
async def watch_session(_):
await self._async_ffmpeg_watch(session_info["id"])
session_info[FFMPEG_WATCHER] = async_track_time_interval(
self.hass, watch_session, FFMPEG_WATCH_INTERVAL,
)
return await self._async_ffmpeg_watch(session_info["id"])
async def _async_ffmpeg_watch(self, session_id):
"""Check to make sure ffmpeg is still running and cleanup if not."""
ffmpeg_pid = self.sessions[session_id][FFMPEG_PID]
if pid_is_alive(ffmpeg_pid):
return True
_LOGGER.warning("Streaming process ended unexpectedly - PID %d", ffmpeg_pid)
self._async_stop_ffmpeg_watch(session_id)
self.set_streaming_available(self.sessions[session_id]["stream_idx"])
return False
@callback
def _async_stop_ffmpeg_watch(self, session_id):
"""Cleanup a streaming session after stopping."""
if FFMPEG_WATCHER not in self.sessions[session_id]:
return
self.sessions[session_id].pop(FFMPEG_WATCHER)()
async def stop_stream(self, session_info):
"""Stop the stream for the given ``session_id``."""
session_id = session_info["id"]
stream = session_info.get("stream")
if not stream:
_LOGGER.debug("No stream for session ID %s", session_id)
return
self._async_stop_ffmpeg_watch(session_id)
if not pid_is_alive(stream.process.pid):
_LOGGER.info("[%s] Stream already stopped", session_id)
return True
for shutdown_method in ["close", "kill"]:
_LOGGER.info("[%s] %s stream", session_id, shutdown_method)
try:
await getattr(stream, shutdown_method)()
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"[%s] Failed to %s stream", session_id, shutdown_method
)
async def reconfigure_stream(self, session_info, stream_config):
"""Reconfigure the stream so that it uses the given ``stream_config``."""
return True
def get_snapshot(self, image_size):
"""Return a jpeg of a snapshot from the camera."""
return scale_jpeg_camera_image(
asyncio.run_coroutine_threadsafe(
self.hass.components.camera.async_get_image(self.entity_id),
self.hass.loop,
).result(),
image_size["image-width"],
image_size["image-height"],
)
| titilambert/home-assistant | homeassistant/components/homekit/type_cameras.py | Python | apache-2.0 | 15,437 |
# !/usr/bin/env python
# -*-coding:utf-8-*-
# by huangjiangbo
# 部署服务
# deploy.py
from ConfigParser import ConfigParser
ConfigFile = r'config.ini' # 读取配置文件
config = ConfigParser()
config.read(ConfigFile)
de_infos = config.items(r'deploy_server') # 远程部署服务器信息
redeploy_server_info = {}
appinfo = {}
print de_infos
for (key, value) in de_infos:
redeploy_server_info[key] = value
print redeploy_server_info
| cherry-hyx/hjb-test | 脚本/deploy/t.py | Python | artistic-2.0 | 449 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Scott Hendrickson"
__license__="Simplified BSD"
import sys
import datetime
import fileinput
from io import StringIO
# Experimental: Use numba to speed up some fo the basic function
# that are run many times per record
# from numba import jit
# use fastest option available
try:
import ujson as json
except ImportError:
try:
import json
except ImportError:
import simplejson as json
gnipError = "GNIPERROR"
gnipRemove = "GNIPREMOVE"
gnipDateTime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")
INTERNAL_EMPTY_FIELD = "GNIPEMPTYFIELD"
class _Field(object):
"""
Base class for extracting the desired value at the end of a series of keys in a JSON Activity
Streams payload. Set the application-wide default value (for e.g. missing values) here,
but also use child classes to override when necessary. Subclasses also need to define the
key-path (path) to the desired location by overwriting the path attr.
"""
# set some default values; these can be overwritten in custom classes
# twitter format
default_t_fmt = "%Y-%m-%dT%H:%M:%S.000Z"
default_value = INTERNAL_EMPTY_FIELD
path = [] # dict key-path to follow for desired value
label = 'DummyKeyPathLabel' # this must match if-statement in constructor
def __init__(self, json_record):
if self.label == 'DummyKeyPathLabel':
self.label = ':'.join(self.path)
self.value = None # str representation of the field, often = str( self.value_list )
if json_record is not None:
self.value = self.walk_path(json_record)
else:
self.value = self.default_value
def __repr__(self):
return unicode(self.value)
def walk_path(self, json_record, path=None):
res = json_record
if path is None:
path = self.path
for k in path:
if res is None:
break
if k not in res or ( type(res[k]) is list and len(res[k]) == 0 ):
# parenthetical clause for values with empty lists e.g. twitter_entities
return self.default_value
res = res[k]
# handle the special case where the walk_path found null (JSON) which converts to
# a Python None. Only use "None" (str version) if it's assigned to self.default_value
res = res if res is not None else self.default_value
return res
def walk_path_slower(self, json_record, path=None):
"""Slower version fo walk path. Depricated."""
if path is None:
path = self.path
try:
execstr = "res=json_record" + '["{}"]'*len(path)
exec(execstr.format(*path))
except (KeyError, TypeError):
res = None
if res is None:
res = self.default_value
return res
def fix_length(self, iterable, limit=None):
"""
Take an iterable (typically a list) and an optional maximum length (limit).
If limit is not given, and the input iterable is not equal to self.default_value
(typically "None"), the input iterable is returned. If limit is given, the return
value is a list that is either truncated to the first limit items, or padded
with self.default_value until it is of size limit. Note: strings are iterables,
so if you pass this function a string, it will (optionally) truncate the
number of characters in the string according to limit.
"""
res = []
if limit is None:
# no limits on the length of the result, so just return the original iterable
res = iterable
else:
#if len(iterable) == 0:
if iterable == self.default_value or len(iterable) == 0:
# if walk_path() finds the final key, but the value is an empty list
# (common for e.g. the contents of twitter_entities)
# overwrite self.value with a list of self.default_value and of length limit
res = [ self.default_value ]*limit
else:
# found something useful in the iterable, either pad the list or truncate
# to end up with something of the proper length
current_length = len( iterable )
if current_length < limit:
res = iterable + [ self.default_value
for _ in range(limit - current_length) ]
else:
res = iterable[:limit]
return res
class _LimitedField(_Field):
"""
Takes JSON record (in python dict form) and optionally a maximum length (limit,
with default length=5). Uses parent class _Field() to assign the appropriate value
to self.value. When self.value is a list of dictionaries,
inheriting from _LimitedField() class allows for the extraction and combination of
an arbitrary number of fields within self.value into self.value_list.
Ex: if your class would lead to having
self.value = [ {'a': 1, 'b': 2, 'c': 3}, {'a': 4, 'b': 5, 'c': 6} ], and what you'd like
is a list that looks like [ 1, 2, 4, 5 ], inheriting from _LimitedField() allows you
to overwrite the fields list ( fields=["a", "b"] ) to obtain this result.
Finally, self.value is set to a string representation of the final self.value_list.
"""
#TODO: is there a better way that this class and the fix_length() method in _Field class
# could be combined?
#TODO: set limit=None by default and just return as many as there are, otherwise (by specifying
# limit), return a maximum of limit.
# TODO:
# - consolidate _LimitedField() & fix_length() if possible
def __init__(self, json_record, limit=1):
self.fields = None
super(
_LimitedField
, self).__init__(json_record)
# self.value is possibly a list of dicts for each activity media object
if self.fields:
# start with default list full of the default_values
self.value_list = [ self.default_value ]*( len(self.fields)*limit )
if self.value != self.default_value:
for i,x in enumerate(self.value): # iterate over the dicts in the list
if i < limit: # ... up until you reach limit
for j,y in enumerate(self.fields): # iterate over the dict keys
self.value_list[ len( self.fields )*i + j ] = x[ self.fields[j] ]
# finally, str-ify the list
self.value = str( self.value_list )
class AcsCSV(object):
"""Base class for all delimited list objects. Basic delimited list utility functions"""
def __init__(self, delim, options_keypath):
self.delim = delim
if delim == "":
print >>sys.stderr, "Warning - Output has Null delimiter"
self.rmchars = "\n\r {}".format(self.delim)
self.options_keypath = options_keypath
def string_hook(self, record_string, mode_dummy):
"""
Returns a file-like StringIO object built from the activity record in record_string.
This is ultimately passed down to the FileInput.readline() method. The mode_dummy
parameter is only included so the signature matches other hooks.
"""
return StringIO( record_string )
def file_reader(self, options_filename=None, json_string=None):
"""
Read arbitrary input file(s) or standard Python str. When passing file_reader() a
JSON string, assign it to the json_string arg. Yields a tuple of (line number, record).
"""
line_number = 0
if json_string is not None:
hook = self.string_hook
options_filename = json_string
else:
hook = fileinput.hook_compressed
for r in fileinput.FileInput(options_filename, openhook=hook):
line_number += 1
try:
recs = [json.loads(r.strip())]
except ValueError:
try:
# maybe a missing line feed?
recs = [json.loads(x) for x in r.strip().replace("}{", "}GNIP_SPLIT{")
.split("GNIP_SPLIT")]
except ValueError:
sys.stderr.write("Invalid JSON record (%d) %s, skipping\n"
%(line_number, r.strip()))
continue
for record in recs:
if len(record) == 0:
continue
# hack: let the old source modules still have a self.cnt for error msgs
self.cnt = line_number
yield line_number, record
def cleanField(self,f):
"""Clean fields of new lines and delmiter."""
res = INTERNAL_EMPTY_FIELD
try:
res = f.strip(
).replace("\n"," "
).replace("\r"," "
).replace(self.delim, " "
)
except AttributeError:
try:
# odd edge case that f is a number
# then can't call string functions
float(f)
res = str(f)
except TypeError:
pass
return res
def buildListString(self,l):
"""Generic list builder returns a string representation of list"""
# unicode output of list (without u's)
res = '['
for r in l:
# handle the various types of lists we might see
try:
if isinstance(r, list):
res += "'" + self.buildListString(r) + "',"
elif isinstance(r, str) or isinstance(r, unicode):
res += "'" + r + "',"
else:
res += "'" + str(r) + "',"
except NameError:
if isinstance(r, list):
res += "'" + self.buildListString(r) + "',"
elif isinstance(r, str):
res += "'" + r + "',"
else:
res += "'" + str(r) + "',"
if res.endswith(','):
res = res[:-1]
res += ']'
return res
def splitId(self, x, index=1):
"""Generic functions for splitting id parts"""
tmp = x.split("/")
if len(tmp) > index:
return tmp[index]
else:
return x
def asString(self, l, emptyField):
"""Returns a delimited list object as a properly delimited string."""
if l is None:
return None
for i, x in enumerate(l):
if x == INTERNAL_EMPTY_FIELD:
l[i] = emptyField
return self.delim.join(l)
def get_source_list(self, x):
"""Wrapper for the core activity parsing function."""
source_list = self.procRecordToList(x)
if self.options_keypath:
source_list.append(self.keyPath(x))
# ensure no pipes, newlines, etc
return [ self.cleanField(x) for x in source_list ]
def procRecord(self, x, emptyField="None"):
return self.asString(self.get_source_list(x), emptyField)
def asGeoJSON(self, x):
"""Get results as GeoJSON representation."""
record_list = self.procRecordToList(x)
if self.__class__.__name__ == "TwacsCSV" and self.options_geo:
if self.geoCoordsList is None:
return
lon_lat = self.geoCoordsList[::-1]
elif self.__class__.__name__ == "FsqacsCSV" and self.options_geo:
lon_lat = self.geo_coords_list
else:
return {"Error":"This publisher doesn't have geo"}
return {
"type": "Feature"
, "geometry": { "type": "Point", "coordinates": lon_lat }
, "properties": { "id": record_list[0] }
}
def keyPath(self,d):
"""Get a generic key path specified at run time. Consider using jq instead?"""
#key_list = self.options_keypath.split(":")
delim = ":"
#print >> sys.stderr, "self.__class__ " + str(self.__class__)
if self.__class__.__name__ == "NGacsCSV":
delim = ","
key_stack = self.options_keypath.split(delim)
#print >> sys.stderr, "key_stack " + str(key_stack)
x = d
while len(key_stack) > 0:
try:
k = key_stack.pop(0)
try:
idx = int(k)
except ValueError:
# keys are ascii strings
idx = str(k)
x = x[idx]
except (IndexError, TypeError, KeyError) as e:
#sys.stderr.write("Keypath error at %s\n"%k)
return "PATH_EMPTY"
return unicode(x)
| DrSkippy/Gnacs | acscsv/acscsv.py | Python | bsd-2-clause | 13,140 |
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from cms.models import *
from cms.views import show
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
url(r'^$', show,{'slug':"/%s"%settings.HOME_SLUG}, name='cms.home'), # contenido definido en home slug
url(r'^%s/$'%settings.HOME_SLUG, RedirectView.as_view(url='/', permanent=False)), # redirect
url(r'(?P<slug>.*)/$', show,name='cms.show'),
] | eliasfernandez/django-simplecms | cms/urls.py | Python | bsd-2-clause | 537 |
import platform
import pytest
import math
import numpy as np
import dartpy as dart
def test_solve_for_free_joint():
'''
Very simple test of InverseKinematics module, applied to a FreeJoint to
ensure that the target is reachable
'''
skel = dart.dynamics.Skeleton()
[joint0, body0] = skel.createFreeJointAndBodyNodePair()
ik = body0.getOrCreateIK()
assert ik.isActive()
tf = dart.math.Isometry3()
tf.set_translation([0, 0, 0.8])
tf.set_rotation(dart.math.AngleAxis(math.pi / 8.0, [0, 1, 0]).to_rotation_matrix())
ik.getTarget().setTransform(tf)
error_method = ik.getErrorMethod()
assert error_method.getMethodName() == 'TaskSpaceRegion'
[lb, ub] = error_method.getBounds()
assert len(lb) is 6
assert len(ub) is 6
error_method.setBounds(np.ones(6) * -1e-8, np.ones(6) * 1e-8)
[lb, ub] = error_method.getBounds()
assert lb == pytest.approx(-1e-8)
assert ub == pytest.approx(1e-8)
solver = ik.getSolver()
solver.setNumMaxIterations(100)
prob = ik.getProblem()
tf_actual = ik.getTarget().getTransform().matrix()
tf_expected = body0.getTransform().matrix()
assert not np.isclose(tf_actual, tf_expected).all()
success = solver.solve()
assert success
tf_actual = ik.getTarget().getTransform().matrix()
tf_expected = body0.getTransform().matrix()
assert np.isclose(tf_actual, tf_expected).all()
class FailingSolver(dart.optimizer.Solver):
def __init__(self, constant):
super(FailingSolver, self).__init__()
self.constant = constant
def solve(self):
problem = self.getProblem()
if problem is None:
print('[FailingSolver::solve] Attempting to solve a nullptr problem! We will return false.')
return False
dim = problem.getDimension()
wrong_solution = np.ones(dim) * self.constant
problem.setOptimalSolution(wrong_solution)
return False
def getType(self):
return 'FailingSolver'
def clone(self):
return FailingSolver(self.constant)
def test_do_not_apply_solution_on_failure():
skel = dart.dynamics.Skeleton()
[joint, body] = skel.createFreeJointAndBodyNodePair()
ik = body.getIK(True)
solver = FailingSolver(10)
ik.setSolver(solver)
dofs = skel.getNumDofs()
skel.resetPositions()
assert not ik.solveAndApply(allowIncompleteResult=False)
assert np.isclose(skel.getPositions(), np.zeros(dofs)).all()
assert not ik.solveAndApply(allowIncompleteResult=True)
assert not np.isclose(skel.getPositions(), np.zeros(dofs)).all()
if __name__ == "__main__":
pytest.main()
| dartsim/dart | python/tests/unit/dynamics/test_inverse_kinematics.py | Python | bsd-2-clause | 2,669 |
'''
Project: Farnsworth
Author: Karandeep Singh Nagra
'''
from django.contrib.auth.models import User, Group, Permission
from django.core.urlresolvers import reverse
from django.db import models
from base.models import UserProfile
class Thread(models.Model):
'''
The Thread model. Used to group messages.
'''
owner = models.ForeignKey(
UserProfile,
help_text="The user who started this thread.",
)
subject = models.CharField(
blank=False,
null=False,
max_length=254,
help_text="Subject of this thread.",
)
start_date = models.DateTimeField(
auto_now_add=True,
help_text="The date this thread was started.",
)
change_date = models.DateTimeField(
auto_now_add=True,
help_text="The last time this thread was modified.",
)
number_of_messages = models.PositiveSmallIntegerField(
default=1,
help_text="The number of messages in this thread.",
)
active = models.BooleanField(
default=True,
help_text="Whether this thread is still active.",
)
views = models.PositiveIntegerField(
default=0,
help_text="The number times this thread has been viewed.",
)
followers = models.ManyToManyField(
User,
blank=True,
null=True,
related_name="following",
help_text="Users following this thread",
)
def __unicode__(self):
return self.subject
class Meta:
ordering = ['-change_date']
def is_thread(self):
return True
def get_view_url(self):
return reverse("threads:view_thread", kwargs={"pk": self.pk})
class Message(models.Model):
'''
The Message model. Contains a body, owner, and post_date, referenced by thread.
'''
body = models.TextField(
blank=False,
null=False,
help_text="Body of this message.",
)
owner = models.ForeignKey(
UserProfile,
help_text="The user who posted this message.",
)
post_date = models.DateTimeField(
auto_now_add=True,
help_text="The date this message was posted.",
)
thread = models.ForeignKey(
Thread,
help_text="The thread to which this message belongs.",
)
edited = models.BooleanField(
default=False,
)
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return self.body
class Meta:
ordering = ['post_date']
def is_message(self):
return True
def pre_save_thread(sender, instance, **kwargs):
thread = instance
thread.number_of_messages = thread.message_set.count()
def post_save_thread(sender, instance, created, **kwargs):
thread = instance
if not created and thread.number_of_messages == 0:
thread.delete()
def post_save_message(sender, instance, created, **kwargs):
message = instance
thread = message.thread
if created:
thread.change_date = message.post_date
thread.save()
def post_delete_message(sender, instance, **kwargs):
message = instance
message.thread.save()
# Connect signals with their respective functions from above.
# When a message is created, update that message's thread's change_date to the post_date of that message.
models.signals.post_save.connect(post_save_message, sender=Message)
models.signals.post_delete.connect(post_delete_message, sender=Message)
models.signals.pre_save.connect(pre_save_thread, sender=Thread)
models.signals.post_save.connect(post_save_thread, sender=Thread)
| knagra/farnsworth | threads/models.py | Python | bsd-2-clause | 3,639 |
from __future__ import unicode_literals
class MorfessorException(Exception):
"""Base class for exceptions in this module."""
pass
class ArgumentException(Exception):
"""Exception in command line argument parsing."""
pass
class InvalidCategoryError(MorfessorException):
"""Attempt to load data using a different categorization scheme."""
def __init__(self, category):
super(InvalidCategoryError, self).__init__(
self, 'This model does not recognize the category {}'.format(
category))
class InvalidOperationError(MorfessorException):
def __init__(self, operation, function_name):
super(InvalidOperationError, self).__init__(
self, ('This model does not have a method {}, and therefore cannot'
' perform operation "{}"').format(function_name, operation))
class UnsupportedConfigurationError(MorfessorException):
def __init__(self, reason):
super(UnsupportedConfigurationError, self).__init__(
self, ('This operation is not supported in this program ' +
'configuration. Reason: {}.').format(reason))
| aalto-speech/flatcat | flatcat/exception.py | Python | bsd-2-clause | 1,153 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Feed.title'
db.alter_column('feedmanager_feed', 'title', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'Feed.title'
db.alter_column('feedmanager_feed', 'title', self.gf('django.db.models.fields.CharField')(max_length=70))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feedmanager.feed': {
'Meta': {'object_name': 'Feed'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'feedmanager.item': {
'Meta': {'object_name': 'Item'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedmanager.Feed']"}),
'guid': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70'})
}
}
complete_apps = ['feedmanager']
| jacobjbollinger/sorbet | sorbet/feedmanager/migrations/0006_chg_field_feed_title.py | Python | bsd-2-clause | 5,274 |
# General utility functions can go here.
import os
import random
import string
from django.utils import functional
from django.utils.http import urlencode
def rand_string(numOfChars):
"""
Generates a string of lowercase letters and numbers.
That makes 36^10 = 3 x 10^15 possibilities.
If we generate filenames randomly, it's harder for people to guess filenames
and type in their URLs directly to bypass permissions.
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(numOfChars))
def generate_random_filename(directory, originalFilename, numOfChars):
"""
Generate a random filename for a file upload. The filename will
have numOfChars random characters. Also prepends the directory
argument to get a filepath which is only missing the MEDIA_ROOT
part at the beginning.
The return value can be used as an upload_to argument for a FileField
ImageField, ThumbnailerImageField, etc. An upload_to argument is
automatically prepended with MEDIA_ROOT to get the upload filepath.
"""
# TODO: Use the directory argument to check for filename collisions with existing files.
# To unit test this, use a Mocker or similar on the filename randomizer
# to make filename collisions far more likely.
extension = os.path.splitext(originalFilename)[1]
filenameBase = rand_string(numOfChars)
return os.path.join(directory, filenameBase + extension)
def url_with_querystring(path, **kwargs):
"""
Takes a base URL (path) and GET query arguments (kwargs).
Returns the complete GET URL.
NOTE:
Any kwargs with special characters like '/' and ' ' will be
escaped with %2f, %20, etc.
Source:
http://stackoverflow.com/a/5341769/859858
"""
return path + '?' + urlencode(kwargs)
def is_django_str(s):
"""
Checks that the argument is either:
(a) an instance of basestring, or
(b) a Django lazy-translation string.
:param s: Object to check the type of.
:return: True if s is a Django string, False otherwise.
"""
if isinstance(s, basestring):
return True
elif isinstance(s, functional.Promise):
return True
else:
return False
| DevangS/CoralNet | utils.py | Python | bsd-2-clause | 2,240 |
import django
from django.db import router
from django.db.models import signals
try:
from django.db.models.fields.related import ReverseManyRelatedObjectsDescriptor
except ImportError:
from django.db.models.fields.related import ManyToManyDescriptor as ReverseManyRelatedObjectsDescriptor
from ..utils import cached_property
class SortableReverseManyRelatedObjectsDescriptor(ReverseManyRelatedObjectsDescriptor):
@cached_property
def related_manager_cls(self):
ManyRelatedManagerBase = super(
SortableReverseManyRelatedObjectsDescriptor, self).related_manager_cls
class ManyRelatedManager(ManyRelatedManagerBase):
def _add_items(self, source_field_name, target_field_name, *objs):
"""
By default, auto_created through objects from form instances are saved using
Manager.bulk_create(). Manager.bulk_create() is passed a list containing
instances of the through model with the target and source foreign keys defined.
In order to set the position field we need to tweak this logic (the modified
lines are marked out with comments below).
This method is added to ManyRelatedManager below in
SortableDescriptorMixin.related_manager_cls
"""
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
# _get_fk_val wasn't introduced until django 1.4.2
if hasattr(self, '_get_fk_val'):
fk_val = self._get_fk_val(obj, target_field_name)
else:
fk_val = obj.pk
if fk_val is None:
raise ValueError('Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name))
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: getattr(self, '_pk_val', getattr(self, '_fk_val', self.instance.pk)),
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
######################################################################
# This is where we modify the default logic for _add_items().
# We use get_or_create for ALL objects. Typically it calls bulk_create
# ONLY on ids which have not yet been created.
######################################################################
# sort_field = self.field.sort_field
sort_field_attname = self.field.sort_field.attname
for obj in objs:
sort_position = getattr(obj, sort_field_attname)
new_obj, created = self.through._default_manager.using(db).get_or_create(**{
sort_field_attname: sort_position,
'%s_id' % source_field_name: getattr(self, '_pk_val', getattr(self, '_fk_val', self.instance.pk)),
'%s_id' % target_field_name: obj.pk,
})
if getattr(new_obj, sort_field_attname) is not sort_position:
setattr(new_obj, sort_field_attname, sort_position)
new_obj.save()
######################################################################
# End custom logic
######################################################################
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def get_queryset(self):
"""
Adds ordering to ManyRelatedManager.get_queryset(). This is
necessary in order for form widgets to display authors ordered by
position.
"""
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
if django.VERSION < (1, 7):
qset = super(ManyRelatedManager, self).get_query_set()
else:
qset = super(ManyRelatedManager, self).get_queryset()
opts = self.through._meta
# If the through table has Meta.ordering defined, order the objects
# returned by the ManyRelatedManager by those fields.
if self.field.sort_field_name:
object_name = opts.object_name.lower()
order_by = ['%s__%s' % (object_name, self.field.sort_field_name)]
if self.model._meta.ordering != order_by:
return qset.order_by(*order_by)
return qset
if django.VERSION < (1, 7):
get_query_set = get_queryset
def get_prefetch_queryset(self, instances, *args):
if django.VERSION < (1, 7):
rel_qs, rel_obj_attr, instance_attr, single, cache_name = \
super(ManyRelatedManager, self).get_prefetch_query_set(instances, *args)
else:
rel_qs, rel_obj_attr, instance_attr, single, cache_name = \
super(ManyRelatedManager, self).get_prefetch_queryset(instances, *args)
opts = self.through._meta
# If the through table has Meta.ordering defined, order the objects
# returned by the ManyRelatedManager by those fields.
if self.field.sort_field_name:
object_name = opts.object_name.lower()
order_by = ['%s__%s' % (object_name, self.field.sort_field_name)]
if self.model._meta.ordering != order_by:
rel_qs = rel_qs.order_by(*order_by)
return (rel_qs, rel_obj_attr, instance_attr, single, cache_name)
if django.VERSION < (1, 7):
get_prefetch_query_set = get_prefetch_queryset
ManyRelatedManager.field = self.field
return ManyRelatedManager
| SpectralAngel/django-select2-forms | select2/models/descriptors.py | Python | bsd-2-clause | 8,496 |
from setuptools import find_packages, setup
from auspost_pac import __version__ as version
setup(
name='python-auspost-pac',
version=version,
license='BSD',
author='Sam Kingston',
author_email='[email protected]',
description='Python API for Australia Post\'s Postage Assessment Calculator (pac).',
url='https://github.com/sjkingo/python-auspost-pac',
install_requires=[
'cached_property',
'frozendict',
'requests',
],
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
],
)
| sjkingo/python-auspost-pac | setup.py | Python | bsd-2-clause | 900 |
import io
import sys
import mock
import argparse
from monolith.compat import unittest
from monolith.cli.base import arg
from monolith.cli.base import ExecutionManager
from monolith.cli.base import SimpleExecutionManager
from monolith.cli.base import BaseCommand
from monolith.cli.base import CommandError
from monolith.cli.base import LabelCommand
from monolith.cli.base import SingleLabelCommand
from monolith.cli.base import Parser
from monolith.cli.exceptions import AlreadyRegistered
from io import StringIO
class DummyCommand(BaseCommand):
pass
class AnotherDummyCommand(BaseCommand):
pass
class TestExecutionManager(unittest.TestCase):
def assertRegistryClassesEqual(self, actual, expected):
self.assertEqual(list(sorted(actual)), list(sorted(expected)))
for key in actual:
self.assertEqual(actual[key].__class__, expected[key],
"Command class don't match for %r (it's %r but "
"expected %r)" % (key, actual[key].__class__,
expected[key]))
def setUp(self):
self.manager = ExecutionManager(['foobar'], stderr=StringIO())
def test_init_prog_name(self):
self.assertEqual(self.manager.prog_name, 'foobar')
def test_init_stderr(self):
manager = ExecutionManager()
self.assertEqual(manager.stderr, sys.stderr)
def test_default_argv(self):
with mock.patch.object(sys, 'argv', ['vcs', 'foo', 'bar']):
manager = ExecutionManager()
self.assertEqual(manager.argv, ['foo', 'bar'])
def test_get_usage(self):
self.manager.usage = 'foobar baz'
self.assertEqual(self.manager.get_usage(), 'foobar baz')
def test_get_parser(self):
self.manager.usage = 'foo bar'
parser = self.manager.get_parser()
self.assertIsInstance(parser, argparse.ArgumentParser)
self.assertEqual(parser.prog, 'foobar') # argv[0]
self.assertEqual(parser.usage, 'foo bar')
self.assertEqual(parser.stream, self.manager.stderr)
def test_get_parser_calls_setup_parser(self):
class DummyCommand(BaseCommand):
pass
self.manager.register('foo', DummyCommand)
with mock.patch.object(DummyCommand, 'setup_parser') as setup_parser:
self.manager.get_parser()
self.assertTrue(setup_parser.called)
def test_register(self):
Command = type('Command', (BaseCommand,), {})
self.manager.register('foo', Command)
self.assertRegistryClassesEqual(self.manager.registry, {'foo': Command})
command = self.manager.registry['foo']
self.assertEqual(command.manager, self.manager)
def test_register_raise_if_command_with_same_name_registered(self):
Command = type('Command', (BaseCommand,), {})
self.manager.register('foobar', Command)
with self.assertRaises(AlreadyRegistered):
self.manager.register('foobar', Command)
def test_register_respects_force_argument(self):
Command1 = type('Command', (BaseCommand,), {})
Command2 = type('Command', (BaseCommand,), {})
self.manager.register('foobar', Command1)
self.manager.register('foobar', Command2, force=True)
self.assertRegistryClassesEqual(self.manager.registry, {
'foobar': Command2})
def test_get_commands(self):
FooCommand = type('FooCommand', (BaseCommand,), {})
BarCommand = type('BarCommand', (BaseCommand,), {})
self.manager.register('foo', FooCommand)
self.manager.register('bar', BarCommand)
self.assertEqual(list(self.manager.get_commands().keys()), ['bar', 'foo'])
self.assertRegistryClassesEqual(self.manager.get_commands(), {
'foo': FooCommand,
'bar': BarCommand,
})
def test_get_commands_to_register(self):
FooCommand = type('FooCommand', (BaseCommand,), {})
BarCommand = type('BarCommand', (BaseCommand,), {})
class Manager(ExecutionManager):
def get_commands_to_register(self):
return {
'foo': FooCommand,
'bar': BarCommand,
}
manager = Manager(['foobar'])
self.assertRegistryClassesEqual(manager.registry, {
'foo': FooCommand,
'bar': BarCommand,
})
def test_call_command(self):
class Command(BaseCommand):
name = 'init'
handle = mock.Mock()
self.manager.register('init', Command)
self.manager.call_command('init')
self.assertTrue(Command.handle.called)
def test_called_command_has_prog_name_properly_set(self):
prog_names = []
class Command(BaseCommand):
name = 'init'
def handle(self, namespace):
prog_names.append(self.prog_name)
self.manager.register('init', Command)
self.manager.call_command('init')
self.assertEqual(prog_names, ['foobar'])
def test_call_command_with_args(self):
class Command(BaseCommand):
args = [
arg('-f', '--force', action='store_true', default=False),
]
name = 'add'
handle = mock.Mock()
self.manager.register('add', Command)
self.manager.call_command('add', '-f')
self.assertTrue(Command.handle.called)
namespace = Command.handle.call_args[0][0]
self.assertTrue(namespace.force)
@mock.patch('monolith.cli.base.sys.stderr')
def test_call_command_fails(self, stderr):
class Command(BaseCommand):
args = [
arg('-f', '--force', action='store_true', default=False),
]
name = 'add'
def handle(self, namespace):
raise CommandError('foo bar baz', 92)
self.manager.register('add', Command)
with self.assertRaises(SystemExit):
self.manager.call_command('add', '-f')
stderr.write.assert_called_once_with('ERROR: foo bar baz\n')
def test_execute_calls_handle_command(self):
class Command(BaseCommand):
args = [
arg('-f', '--force', action='store_true', default=False),
]
name = 'add'
handle = mock.Mock()
self.manager.register('add', Command)
with mock.patch.object(sys, 'argv', ['prog', 'add', '-f']):
self.manager.execute()
namespace = Command.handle.call_args[0][0]
Command.handle.assert_called_once_with(namespace)
class TestSimpleExecutionManager(unittest.TestCase):
def test_get_commands_to_register(self):
# importing dummy commands to local namespace so they have full class
# paths properly set
from monolith.tests.test_cli import DummyCommand
from monolith.tests.test_cli import AnotherDummyCommand
manager = SimpleExecutionManager('git', {
'push': DummyCommand,
'pull': 'monolith.tests.test_cli.AnotherDummyCommand',
})
self.assertDictEqual(manager.get_commands_to_register(), {
'push': DummyCommand,
'pull': AnotherDummyCommand,
})
class TestBaseCommand(unittest.TestCase):
def test_get_args(self):
Command = type('Command', (BaseCommand,), {'args': ['foo', 'bar']})
command = Command()
self.assertEqual(command.get_args(), ['foo', 'bar'])
def test_handle_raises_error(self):
with self.assertRaises(NotImplementedError):
BaseCommand().handle(argparse.Namespace())
def test_post_register_hooks(self):
Command = type('Command', (BaseCommand,), {'args': ['foo', 'bar']})
class Command(BaseCommand):
def post_register(self, manager):
manager.completion = True
manager = ExecutionManager()
self.assertFalse(manager.completion)
manager.register('completion', Command)
self.assertTrue(manager.completion)
class TestLabelCommand(unittest.TestCase):
def test_handle_raise_if_handle_label_not_implemented(self):
command = LabelCommand()
with self.assertRaises(NotImplementedError):
command.handle(argparse.Namespace(labels=['foo']))
def test_handle_calls_handle_label(self):
namespace = argparse.Namespace(labels=['foo', 'bar'])
command = LabelCommand()
command.handle_label = mock.Mock()
command.handle(namespace)
self.assertEqual(command.handle_label.call_args_list, [
arg('foo', namespace),
arg('bar', namespace),
])
def test_labels_required_true(self):
Command = type('Command', (LabelCommand,), {'labels_required': True})
command = Command()
self.assertEqual(command.get_args()[0].kwargs.get('nargs'), '+')
def test_labels_required_false(self):
Command = type('Command', (LabelCommand,), {'labels_required': False})
command = Command()
self.assertEqual(command.get_args()[0].kwargs.get('nargs'), '*')
def test_handle_no_labels_called_if_no_labels_given(self):
Command = type('Command', (LabelCommand,), {'labels_required': False})
command = Command()
command.handle_no_labels = mock.Mock()
namespace = argparse.Namespace(labels=[])
command.handle(namespace)
command.handle_no_labels.assert_called_once_with(namespace)
class TestSingleLabelCommand(unittest.TestCase):
def test_get_label_arg(self):
Command = type('Command', (SingleLabelCommand,), {})
label_arg = Command().get_label_arg()
self.assertEqual(label_arg, arg('label',
default=Command.label_default_value, nargs='?'))
def test_get_args(self):
Command = type('Command', (SingleLabelCommand,), {})
command = Command()
self.assertEqual(command.get_args(), [command.get_label_arg()])
def test_handle_raise_if_handle_label_not_implemented(self):
command = SingleLabelCommand()
with self.assertRaises(NotImplementedError):
command.handle(argparse.Namespace(label='foo'))
def test_handle_calls_handle_label(self):
namespace = argparse.Namespace(label='foobar')
command = SingleLabelCommand()
command.handle_label = mock.Mock()
command.handle(namespace)
self.assertEqual(command.handle_label.call_args_list, [
arg('foobar', namespace),
])
class TestArg(unittest.TestCase):
def test_args(self):
self.assertEqual(arg(1, 2, 'foo', bar='baz').args, (1, 2, 'foo'))
def test_kargs(self):
self.assertEqual(arg(1, 2, 'foo', bar='baz').kwargs, {'bar': 'baz'})
class TestParser(unittest.TestCase):
def setUp(self):
self.stream = io.StringIO()
self.parser = Parser(stream=self.stream)
def test_print_message_default_file(self):
self.parser._print_message('foobar')
self.assertEqual(self.stream.getvalue(), 'foobar')
| lukaszb/monolith | monolith/tests/test_cli.py | Python | bsd-2-clause | 11,047 |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import numpy as np
from .tod import TOD
from .noise import Noise
from ..op import Operator
from .. import timing as timing
class AnalyticNoise(Noise):
"""
Class representing an analytic noise model.
This generates an analytic PSD for a set of detectors, given
input values for the knee frequency, NET, exponent, sample rate,
minimum frequency, etc.
Args:
detectors (list): List of detectors.
rate (dict): Dictionary of sample rates in Hertz.
fmin (dict): Dictionary of minimum frequencies for high pass
fknee (dict): Dictionary of knee frequencies.
alpha (dict): Dictionary of alpha exponents (positive, not negative!).
NET (dict): Dictionary of detector NETs.
"""
def __init__(self, *, detectors, rate, fmin, fknee, alpha, NET):
self._rate = rate
self._fmin = fmin
self._fknee = fknee
self._alpha = alpha
self._NET = NET
for d in detectors:
if self._alpha[d] < 0.0:
raise RuntimeError(
"alpha exponents should be positive in this formalism")
freqs = {}
psds = {}
last_nyquist = None
for d in detectors:
if (self._fknee[d] > 0.0) and (self._fknee[d] < self._fmin[d]):
raise RuntimeError("If knee frequency is non-zero, it must "
"be greater than f_min")
nyquist = self._rate[d] / 2.0
if nyquist != last_nyquist:
tempfreq = []
# this starting point corresponds to a high-pass of
# 30 years, so should be low enough for any interpolation!
cur = 1.0e-9
# this value seems to provide a good density of points
# in log space.
while cur < nyquist:
tempfreq.append(cur)
cur *= 1.4
# put a final point at Nyquist
tempfreq.append(nyquist)
tempfreq = np.array(tempfreq, dtype=np.float64)
last_nyquist = nyquist
freqs[d] = tempfreq
if self._fknee[d] > 0.0:
ktemp = np.power(self._fknee[d], self._alpha[d])
mtemp = np.power(self._fmin[d], self._alpha[d])
temp = np.power(freqs[d], self._alpha[d])
psds[d] = (temp + ktemp) / (temp + mtemp)
psds[d] *= (self._NET[d] * self._NET[d])
else:
psds[d] = np.ones_like(freqs[d])
psds[d] *= (self._NET[d] * self._NET[d])
# call the parent class constructor to store the psds
super().__init__(detectors=detectors, freqs=freqs, psds=psds)
def rate(self, det):
"""(float): the sample rate in Hz.
"""
return self._rate[det]
def fmin(self, det):
"""(float): the minimum frequency in Hz, used as a high pass.
"""
return self._fmin[det]
def fknee(self, det):
"""(float): the knee frequency in Hz.
"""
return self._fknee[det]
def alpha(self, det):
"""(float): the (positive!) slope exponent.
"""
return self._alpha[det]
def NET(self, det):
"""(float): the NET.
"""
return self._NET[det]
| tskisner/pytoast | src/python/tod/sim_noise.py | Python | bsd-2-clause | 3,547 |
from JumpScale import j
import JumpScale.baselib.watchdog.manager
import JumpScale.baselib.redis
import JumpScale.lib.rogerthat
descr = """
critical alert
"""
organization = "jumpscale"
enable = True
REDIS_PORT = 9999
# API_KEY = j.application.config.get('rogerthat.apikey')
redis_client = j.clients.credis.getRedisClient('127.0.0.1', REDIS_PORT)
# rogerthat_client = j.clients.rogerthat.get(API_KEY)
# ANSWERS = [{'id': 'yes', 'caption': 'Take', 'action': '', 'type': 'button'},]
# def _send_message(message, contacts, answers=ANSWERS, alert_flags=6):
# result = rogerthat_client.send_message(message, contacts, answers=answers, alert_flags=alert_flags)
# if result:
# if result['error']:
# j.logger.log('Could not send rogerthat message')
# return
# else:
# message_id = result['result']
# return message_id
def escalateL1(watchdogevent):
if not j.tools.watchdog.manager.inAlert(watchdogevent):
watchdogevent.escalationstate = 'L1'
# contact1 = redis_client.hget('contacts', '1')
message = str(watchdogevent)
# message_id = _send_message(message, [contact1,])
# watchdogevent.message_id = message_id
j.tools.watchdog.manager.setAlert(watchdogevent)
print "Escalate:%s"%message
def escalateL2(watchdogevent):
if watchdogevent.escalationstate == 'L1':
watchdogevent.escalationstate = 'L2'
contacts = redis_client.hgetall('contacts')
message = str(watchdogevent)
message_id = _send_message(message, [contacts['2'], contacts['3']])
watchdogevent.message_id = message_id
j.tools.watchdog.manager.setAlert(watchdogevent)
def escalateL3(watchdogevent):
if watchdogevent.escalationstate == 'L2':
watchdogevent.escalationstate = 'L3'
contacts = redis_client.hgetall('contacts')['all'].split(',')
message = str(watchdogevent)
message_id = _send_message(message, contacts)
watchdogevent.message_id = message_id
j.tools.watchdog.manager.setAlert(watchdogevent)
| Jumpscale/jumpscale6_core | apps/watchdogmanager/alerttypes/critical.py | Python | bsd-2-clause | 2,105 |
import tornado.web
import json
from tornado_cors import CorsMixin
from common import ParameterFormat, EnumEncoder
class DefaultRequestHandler(CorsMixin, tornado.web.RequestHandler):
CORS_ORIGIN = '*'
def initialize(self):
self.default_format = self.get_argument("format", "json", True)
self.show_about = self.get_argument("show_about", True, True)
self.pg_version = self.get_argument("pg_version", 9.6, True)
self.version = "2.0 beta"
def write_about_stuff(self, format_type="alter_system"):
default_comment = "--"
if format_type == "conf":
default_comment = "#"
self.write("{} Generated by PGConfig {}\n".format(default_comment,
self.version))
self.write("{} http://pgconfig.org\n\n".format(default_comment * 2))
def write_comment(self, format_type, comment):
default_comment = "--"
if format_type == "conf":
default_comment = "#"
if comment != "NONE":
self.write("\n{} {}\n".format(default_comment, comment))
def write_config(self, output_data):
if self.show_about is True:
self.write_about_stuff("conf")
for category in output_data:
self.write("# {}\n".format(category["description"]))
for parameter in category["parameters"]:
config_value = parameter.get("config_value", "NI")
value_format = parameter.get("format", ParameterFormat.NONE)
if value_format in (ParameterFormat.String,
ParameterFormat.Time):
config_value = "'{}'".format(config_value)
parameter_comment = parameter.get("comment", "NONE")
if parameter_comment != "NONE":
self.write_comment("conf", parameter_comment)
self.write("{} = {}\n".format(parameter["name"], config_value))
self.write("\n")
def write_alter_system(self, output_data):
if float(self.pg_version) <= 9.3:
self.write("-- ALTER SYSTEM format it's only supported on version 9.4 and higher. Use 'conf' format instead.")
else:
if self.show_about is True:
self.write_about_stuff()
for category in output_data:
self.write("-- {}\n".format(category["description"]))
for parameter in category["parameters"]:
config_value = parameter.get("config_value", "NI")
parameter_comment = parameter.get("comment", "NONE")
self.write_comment("alter_system", parameter_comment)
self.write("ALTER SYSTEM SET {} TO '{}';\n".format(parameter[
"name"], config_value))
self.write("\n")
def write_plain(self, message=list()):
if len(message) == 1:
self.write(message[0])
else:
for line in message:
self.write(line + '\n')
def write_bash(self, message=list()):
bash_script = """
#!/bin/bash
"""
self.write(bash_script)
if len(message) == 1:
self.write('SQL_QUERY="{}"\n'.format(message[0]))
self.write('psql -c "${SQL_QUERY}"\n')
else:
for line in message:
self.write('SQL_QUERY="{}"\n'.format(line))
self.write('psql -c "${SQL_QUERY}"\n\n')
def write_json_api(self, message):
self.set_header('Content-Type', 'application/vnd.api+json')
_document = {}
_document["data"] = message
_meta = {}
_meta["copyright"] = "PGConfig API"
_meta["version"] = self.version
_meta["arguments"] = self.request.arguments
_document["meta"] = _meta
_document["jsonapi"] = {"version": "1.0"}
full_url = self.request.protocol + "://" + self.request.host + self.request.uri
_document["links"] = {"self": full_url}
self.write(
json.dumps(
_document,
sort_keys=True,
separators=(',', ': '),
cls=EnumEncoder))
def write_json(self, message=list()):
self.set_header('Content-Type', 'application/json')
if len(message) == 1:
self.write("{ \"output\": \"" + message[0] + "\"}")
else:
new_output = "{ \"output\": ["
first_line = True
for line in message:
if not first_line:
new_output += ","
else:
first_line = False
new_output += "\"{}\"".format(line)
new_output += "] } "
self.write(new_output)
def return_output(self, message=list()):
# default_format=self.get_argument("format", "json", True)
# converting string input into a list (for solve issue with multiline strings)
process_data = []
if not isinstance(message, list):
process_data.insert(0, message)
else:
process_data = message
if self.default_format == "json":
self.write_json_api(message)
elif self.default_format == "bash":
self.write_bash(message)
elif self.default_format == "conf":
self.write_config(message)
elif self.default_format == "alter_system":
self.write_alter_system(message)
else:
self.write_plain(message)
class GeneratorRequestHandler(DefaultRequestHandler):
pass
| sebastianwebber/pgconfig-api | common/util.py | Python | bsd-2-clause | 5,642 |
from ._stub import *
from ._fluent import *
from ._matchers import *
| manahl/mockextras | mockextras/__init__.py | Python | bsd-2-clause | 69 |
from steamstoreprice.exception import UrlNotSteam, PageNotFound, RequestGenericError
from bs4 import BeautifulSoup
import requests
class SteamStorePrice:
def normalizeurl(self, url):
"""
clean the url from referal and other stuff
:param url(string): amazon url
:return: string(url cleaned)
"""
if "://store.steampowered.com/app" in url:
return url
else:
raise UrlNotSteam("Please check the url, it doesn't contain store.steampowered.com/app*")
def normalizeprice(self, price):
"""
remove the currenty from price
:param price(string): price tag find on amazon store
:return: float(price cleaned)
"""
listreplace = ["€", "$", "£", "\t", "\r\n"]
for replacestring in listreplace:
price = price.replace(replacestring, "")
return float(price.replace(",", "."))
def getpage(self, url):
"""
Get the page and raise if status_code is not equal to 200
:param url(string): normalized(url)
:return: bs4(html)
"""
url = self.normalizeurl(url)
req = requests.get(url)
if req.status_code == 200:
return BeautifulSoup(req.text, "html.parser")
elif req.status_code == 404:
raise PageNotFound("Page not found, please check url")
else:
raise RequestGenericError("Return Code: %s, please check url" % req.status_code)
def getprice(self, url):
"""
Find the price on AmazonStore starting from URL
:param url(string): url
:return: float(price cleaned)
"""
body_content = self.getpage(self.normalizeurl(url))
try:
return self.normalizeprice(body_content.find("div", {"class": "game_purchase_price"}).contents[0])
except AttributeError:
return self.normalizeprice(body_content.find("div", {"class": "discount_final_price"}).contents[0])
| Mirio/steamstoreprice | steamstoreprice/steamstoreprice.py | Python | bsd-2-clause | 2,007 |
import common.config
import common.messaging as messaging
import common.storage
import common.debug as debug
from common.geo import Line, LineSet
from common.concurrency import ConcurrentObject
from threading import Lock, RLock
import tree.pachinko.message_type as message_type
import tree.pachinko.config as config
import tree.common.protocol.client_configuration_pb2 as client_config_protocol
import tree.pachinko.protocol.modifications_pb2 as modifications_protocol
import tree.pachinko.protocol.queries_pb2 as query_protocol
from tree.common.content_server import ContentServer as BaseContentServer
class Transaction(ConcurrentObject):
def __init__(self, client_handler, client_id, identifier):
ConcurrentObject.__init__(self)
self.client_handler = client_handler
self.client_id = client_id
self.identifier = identifier
self.write_intervals = None
self.read_intervals = None
self.end_request = None
self.dependencies = []
self.pending = []
self.done = False
def can_execute(self):
if not self.end_request:
return False
if self.done:
return False # has already been executed
return self.end_request.abort or len(self.dependencies) == 0
def set_end_request(self, end_request):
assert not self.end_request
assert not self.done
self.end_request = end_request
def commit(self):
result = modifications_protocol.ModificationResult()
result.okay = True
result.replyTo = self.identifier
result.num_inserted = 0
result.num_modified = 0
result.num_deleted = 0
for write in self.end_request.writes:
assert write.HasField('insert')
result.MergeFrom(self.client_handler.handle_insert(write.insert))
# Forward to correct client
client = self.client_handler.content_server.client_acceptor.clients[self.client_id]
with client:
client.peer.send(message_type.client.mod_result, result.SerializeToString())
self.client_handler.content_server.transactions.remove(self)
for pending in self.pending:
with pending:
pending.dependencies.remove(self)
if pending.can_execute():
pending.commit()
debug.log("Ended transaction " + str(self.identifier) + " from client " + str(self.client_id))
self.done = True
def equals(self, client_id, transaction_id):
return (self.client_id == client_id) and (self.identifier == transaction_id)
class ClientHandler(ConcurrentObject):
def __init__(self, content_server, peer):
ConcurrentObject.__init__(self)
self.content_server = content_server
self.peer = peer
self.fileno = peer.get_socket().fileno()
self.identifier = -1
def get_socket(self):
return self.peer
def is_connected(self):
return self.peer.is_connected()
def handle_insert(self, insert):
result = modifications_protocol.ModificationResult()
result.okay = True
result.num_inserted = 0
result.num_modified = 0
result.num_deleted = 0
for obj in insert.objects:
# Only insert if it is in our partition
if (obj.position >= self.content_server.content_position * self.content_server.partition_size()
and obj.position <= (1+self.content_server.content_position) * self.content_server.partition_size()):
self.content_server.storage.put(obj.position, obj.value)
result.num_inserted += 1
debug.log("Inserted " + str(result.num_inserted) + " objects")
return result
def handle_range_remove(self, range_remove):
result = modifications_protocol.ModificationResult()
result.okay = True
result.num_inserted = 0
result.num_modified = 0
result.num_deleted = self.content_server.storage.range_remove(range_remove.start, range_remove.end)
debug.log("Deleted " + str(result.num_inserted) + " objects")
return result
def handle_range_search(self, range_search):
result = query_protocol.QueryResponse()
for key,value in self.content_server.storage.find(range_search.start, range_search.end):
obj = result.objects.add()
obj.position = key
obj.value = value
debug.log("Found " + str(len(result.objects)) + " objects")
return result
def handle_transaction_start(self, start_transaction):
result = modifications_protocol.ModificationResult()
result.okay = True
result.replyTo = start_transaction.transaction_id
result.num_inserted = 0
result.num_modified = 0
result.num_deleted = 0
assert (self.identifier == -1) or (self.identifier == start_transaction.client_id)
# Read (in)validation
read_intervals = LineSet()
read_intervals.parse(start_transaction.read_intervals)
write_intervals = LineSet()
write_intervals.parse(start_transaction.write_intervals)
dependencies = []
self.content_server.transaction_lock.acquire()
for other in self.content_server.transactions:
if other.write_intervals.overlaps(read_intervals) and not other.done:
debug.log("Detected read-write conflict between client " + str(other.client_id) + " and " + str(start_transaction.client_id))
result.okay = False
break
elif other.write_intervals.overlaps(write_intervals):
other.acquire()
if other.done:
other.release()
else:
dependencies.append(other)
elif other.read_intervals.overlaps(write_intervals):
other.acquire()
if other.done:
other.release()
else:
dependencies.append(other)
assert len(start_transaction.server_ops) == 1
# only check reads if we didn't abort yet
if result.okay:
for read in start_transaction.server_ops[0].reads:
reply = self.handle_range_search(read.range_search)
if len(read.result) != len(reply.objects):
result.okay = False
else:
for i in range(0, len(read.result)):
if (read.result[i].position != reply.objects[i].position) or (read.result[i].position != reply.objects[i].position):
debug.log("Found outdated read")
result.okay = False
if result.okay:
debug.log("Started transaction " + str(start_transaction.transaction_id) + " from client " + str(start_transaction.client_id))
tx = Transaction(self, start_transaction.client_id, start_transaction.transaction_id)
tx.write_intervals = write_intervals
tx.read_intervals = read_intervals
tx.dependencies = dependencies
for other in dependencies:
other.pending.append(tx)
other.release()
self.content_server.transactions.append(tx)
else:
for other in dependencies:
other.release()
debug.log("Rejected transaction")
# Forward to correct client
client = self.content_server.client_acceptor.clients[start_transaction.client_id]
with client:
client.peer.send(message_type.client.mod_result, result.SerializeToString())
self.content_server.transaction_lock.release()
def handle_transaction_end(self, end_transaction):
with self.content_server.transaction_lock:
transaction = None
for tx in self.content_server.transactions:
if tx.equals(end_transaction.client_id, end_transaction.transaction_id):
transaction = tx
break
if not transaction:
debug.log("Failed to end transaction " + str(end_transaction.transaction_id) + " from client " + str(end_transaction.client_id))
result = modifications_protocol.ModificationResult()
# abort also succeeds if we find no such transaction on this server
result.okay = True if end_transaction.abort else False
result.replyTo = end_transaction.transaction_id
result.num_inserted = 0
result.num_modified = 0
result.num_deleted = 0
# Forward to correct client
client = self.content_server.client_acceptor.clients[end_transaction.client_id]
with client:
client.peer.send(message_type.client.mod_result, result.SerializeToString())
else:
with transaction:
transaction.set_end_request(end_transaction)
if transaction.can_execute():
transaction.commit()
def forward_transaction_start(self, transaction_start):
with self.content_server.forwarding_lock:
servers = []
assert len(transaction_start.server_ops)
for i in transaction_start.server_ops:
servers.append(i.position)
left_intersection = set(servers).intersection(self.content_server.left_partitions)
right_intersection = set(servers).intersection(self.content_server.right_partitions)
assert len(servers) > 0
assert len(servers) == (len(left_intersection)+len(right_intersection))
if len(left_intersection):
assert self.content_server.left_child
left_start = modifications_protocol.StartTransactionRequest()
left_start.has_reads = transaction_start.has_reads
left_start.client_id = transaction_start.client_id
left_start.transaction_id = transaction_start.transaction_id
for interval in transaction_start.write_intervals:
if interval.end <= self.content_server.center:
i = left_start.write_intervals.add()
i.CopyFrom(interval)
for interval in transaction_start.read_intervals:
if interval.end <= self.content_server.center:
i = left_start.read_intervals.add()
i.CopyFrom(interval)
for server_ops in transaction_start.server_ops:
if server_ops.position in self.content_server.left_partitions:
left_ops = left_start.server_ops.add()
left_ops.CopyFrom(server_ops)
self.content_server.left_child.send(message_type.client.start_transaction, left_start.SerializeToString())
if len(right_intersection):
assert self.content_server.right_child
right_start = modifications_protocol.StartTransactionRequest()
right_start.has_reads = transaction_start.has_reads
right_start.client_id = transaction_start.client_id
right_start.transaction_id = transaction_start.transaction_id
for interval in transaction_start.write_intervals:
if interval.end >= self.content_server.center:
i = right_start.write_intervals.add()
i.CopyFrom(interval)
for interval in transaction_start.read_intervals:
if interval.end >= self.content_server.center:
i = right_start.read_intervals.add()
i.CopyFrom(interval)
for server_ops in transaction_start.server_ops:
if server_ops.position in self.content_server.right_partitions:
right_ops = right_start.server_ops.add()
right_ops.CopyFrom(server_ops)
self.content_server.right_child.send(message_type.client.start_transaction, right_start.SerializeToString())
def close(self):
if self.identifier > 0:
del self.content_server.client_acceptor.clients[self.identifier]
self.content_server.unregister_handler(self.fileno, self)
for tx in self.content_server.transactions:
if tx.client_id == self.identifier:
self.content_server.transactions.remove(tx)
def update(self):
# Loop until we run out of messages
while True:
try:
msgtype, data = self.peer.receive()
except:
self.close()
return
# Connection closed
if msgtype is None:
self.close()
elif msgtype is message_type.client.mod_request:
mod_request = modifications_protocol.ModificationRequest()
mod_request.ParseFromString(data)
result = None
if mod_request.HasField("insert"):
result = self.handle_insert(mod_request.insert)
elif mod_request.HasField("range_remove"):
result = self.handle_range_remove(mod_request.range_remove)
else:
raise RuntimeError("Unknown Modification")
result.replyTo = mod_request.identifier
self.peer.send(message_type.client.mod_result, result.SerializeToString())
elif msgtype is message_type.client.notify_client_id:
message = client_config_protocol.NotifyClientId()
message.ParseFromString(data)
self.identifier = message.identifier
self.content_server.client_acceptor.clients[self.identifier] = self
self.peer.send(message_type.client.notify_cid_ack, bytes())
elif msgtype is message_type.client.start_transaction:
start_transaction = modifications_protocol.StartTransactionRequest()
start_transaction.ParseFromString(data)
if self.content_server.is_leaf():
self.handle_transaction_start(start_transaction)
else:
self.forward_transaction_start(start_transaction)
elif msgtype is message_type.client.query_request:
query_request = query_protocol.QueryRequest()
query_request.ParseFromString(data)
result = None
if query_request.HasField("range_search"):
result = self.handle_range_search(query_request.range_search)
else:
raise RuntimeError("Unknown query type")
result.replyTo = query_request.identifier
self.peer.send(message_type.client.query_result, result.SerializeToString())
elif msgtype is message_type.client.end_transaction:
end_transaction = modifications_protocol.EndTransactionRequest()
end_transaction.ParseFromString(data)
self.handle_transaction_end(end_transaction)
else:
raise RuntimeError("Received unknown message type from client")
# Done?
if not self.peer.has_messages():
return
class ContentServer(BaseContentServer):
def __init__(self, coordinator, name, level, pos):
BaseContentServer.__init__(self, "pachinko", coordinator, config.COORDINATOR_PORT_INTERNAL, name, level, pos, ClientHandler)
self.transaction_lock = RLock()
self.forwarding_lock = Lock()
| kaimast/inanutshell | tree/pachinko/content_server.py | Python | bsd-2-clause | 16,352 |
from django.test import override_settings
from incuna_test_utils.testcases.api_request import (
BaseAPIExampleTestCase, BaseAPIRequestTestCase,
)
from tests.factories import UserFactory
class APIRequestTestCase(BaseAPIRequestTestCase):
user_factory = UserFactory
def test_create_request_format(self):
request = self.create_request()
assert request.META['format'] == 'json'
def test_create_request_auth(self):
request = self.create_request()
assert request.user.is_authenticated
def test_create_request_no_auth(self):
request = self.create_request(auth=False)
assert not request.user.is_authenticated
class APIExampleTestCase(BaseAPIExampleTestCase):
@override_settings(ALLOWED_HOSTS=['localhost'])
def test_create_request(self):
request = self.create_request(auth=False)
assert request.get_host() == self.SERVER_NAME
| incuna/incuna-test-utils | tests/testcases/test_api_request.py | Python | bsd-2-clause | 918 |
from django.forms import ModelForm
from bug_reporting.models import Feedback
from CoralNet.forms import FormHelper
class FeedbackForm(ModelForm):
class Meta:
model = Feedback
fields = ('type', 'comment') # Other fields are auto-set
#error_css_class = ...
#required_css_class = ...
def clean(self):
"""
1. Strip spaces from character fields.
2. Call the parent's clean() to finish up with the default behavior.
"""
data = FormHelper.stripSpacesFromFields(
self.cleaned_data, self.fields)
self.cleaned_data = data
return super(FeedbackForm, self).clean() | DevangS/CoralNet | bug_reporting/forms.py | Python | bsd-2-clause | 661 |
import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils.parse import parse_json
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?livestream\.com/"
))
class Livestream(Plugin):
_config_re = re.compile(r"window.config = ({.+})")
_stream_config_schema = validate.Schema(validate.any({
"event": {
"stream_info": validate.any({
"is_live": bool,
"secure_m3u8_url": validate.url(scheme="https"),
}, None),
}
}, {}), validate.get("event", {}), validate.get("stream_info", {}))
def _get_streams(self):
res = self.session.http.get(self.url)
m = self._config_re.search(res.text)
if not m:
log.debug("Unable to find _config_re")
return
stream_info = parse_json(m.group(1), "config JSON",
schema=self._stream_config_schema)
log.trace("stream_info: {0!r}".format(stream_info))
if not (stream_info and stream_info["is_live"]):
log.debug("Stream might be Off Air")
return
m3u8_url = stream_info.get("secure_m3u8_url")
if m3u8_url:
yield from HLSStream.parse_variant_playlist(self.session, m3u8_url).items()
__plugin__ = Livestream
| melmorabity/streamlink | src/streamlink/plugins/livestream.py | Python | bsd-2-clause | 1,433 |
import time
import threading
import PyTango
import numpy
import h5py
THREAD_DELAY_SEC = 0.1
class HDFwriterThread(threading.Thread):
#-----------------------------------------------------------------------------------
# __init__
#-----------------------------------------------------------------------------------
def __init__(self, parent_obj, filename_in, trg_start, trg_stop):
threading.Thread.__init__(self)
self._alive = True
self.myState = PyTango.DevState.OFF
self.filename = filename_in
self.parent = parent_obj
self.trg_start = trg_start
self.trg_stop = trg_stop
self.data_queue = []
self.datasource_finished = {}
self._hdf_file = None
self.timeout_sec = 20
self.MetadataSources = {}
if "_errors" in dir(h5py):
h5py._errors.silence_errors()
#-----------------------------------------------------------------------------------
# set_Metadata_Sources
#-----------------------------------------------------------------------------------
def set_Metadata_Sources(self, MetadataSources):
self.MetadataSources = MetadataSources
#-----------------------------------------------------------------------------------
# notify_new_data
#-----------------------------------------------------------------------------------
def notify_new_data(self, daq_thread, trg):
self.data_queue.append([daq_thread, trg])
#-----------------------------------------------------------------------------------
# store_metadata
#-----------------------------------------------------------------------------------
def store_metadata(self):
for metakey in self.MetadataSources.keys():
if not self.MetadataSources[metakey]['enabled']:
continue
try:
attprx = PyTango.AttributeProxy(self.MetadataSources[metakey]['tango_attr'])
attrinfo = attprx.get_config()
attprx.get_device_proxy().set_timeout_millis(500)
data_in = attprx.read().value
del attprx
except Exception, ex:
self.MetadataSources[metakey]['status'] = 'ALARM'
print "store_metadata, attribute proxy",metakey,ex
continue
#
retries = 0
while retries < 3:
if metakey in self._hdf_file:
break
try:
# Create HDF dataset
dset = self._hdf_file.create_dataset(metakey, data=data_in)
#dset = self._hdf_file[metakey]
dset.attrs["unit"] = attrinfo.unit
break
except Exception, ex:
print "store_metadata",metakey,self.trg_start,ex
retries += 1
#-----------------------------------------------------------------------------------
# store_sync_player_metadata
#-----------------------------------------------------------------------------------
def store_sync_player_metadata(self,daq_thread):
player_metadata = daq_thread.player_metadata
dset = self._hdf_file[daq_thread.player_nickname]
for key in player_metadata.keys():
try:
attprx = PyTango.AttributeProxy(player_metadata[key].tango_attr)
attprx.get_device_proxy().set_timeout_millis(500)
data_in = attprx.read().value
del attprx
#
dset.attrs[key] = data_in
except Exception, ex:
print "store_sync_player_metadata",key,ex
#
# Unit is default
try:
attprx = PyTango.AttributeProxy(daq_thread.player_attrname)
attrinfo = attprx.get_config()
del attprx
#
dset.attrs["unit"] = attrinfo.unit
except Exception, ex:
print "store_sync_player_metadata, deafult unit",daq_thread.player_attrname,ex
#-----------------------------------------------------------------------------------
# store_data
#-----------------------------------------------------------------------------------
def store_data(self, daq_queue_item):
daq_thread = daq_queue_item[0]
trg = daq_queue_item[1]
data_in = daq_thread._data_buffer[trg]
if data_in == None:
return
if daq_thread.player_nickname not in self.datasource_finished.keys():
self.datasource_finished[daq_thread.player_nickname] = False
#
# Create HDF dataset
tokens = daq_thread.player_nickname.split("/")
groupname=""
dsetname = daq_thread.player_nickname
dataset_len = 1+self.trg_stop-self.trg_start
retries = 0
while (retries < 3):
try:
if dsetname in self._hdf_file:
break
if len(numpy.shape(data_in)) == 0: #scalar
self._hdf_file.create_dataset(dsetname,shape=(dataset_len,),dtype=numpy.dtype(type(data_in)))
elif len(numpy.shape(data_in)) == 1: #spectrum
self._hdf_file.create_dataset(dsetname, shape=(dataset_len,data_in.shape[0]), dtype=data_in.dtype)
elif len(numpy.shape(data_in)) == 2: #image
self._hdf_file.create_dataset(dsetname, shape=(dataset_len,data_in.shape[0],data_in.shape[1]), dtype=data_in.dtype)
break
except Exception, ex:
print "Create Dataset",dsetname,data_in,len(numpy.shape(data_in)),dataset_len,"\n",ex
retries += 1
#
self.store_sync_player_metadata(daq_thread)
#
retries = 0
while (retries < 3):
#update the dataset
try:
dset = self._hdf_file.get(daq_thread.player_nickname, None)
dset[slice(trg - self.trg_start,trg - self.trg_start+1)] = data_in
break
except Exception, ex:
retries += 1
print "Update Dataset",ex
#
if trg == self.trg_stop:
self.datasource_finished.pop(daq_thread.player_nickname)
#-----------------------------------------------------------------------------------
# close_file
#-----------------------------------------------------------------------------------
def close_file(self):
try:
#
data_in=numpy.arange(self.trg_start,self.trg_stop+1)
self._hdf_file.create_dataset("triggers", data = data_in)
#
self.store_metadata()
#
self._hdf_file.flush()
self._hdf_file.close()
self.parent.report_message("Closed file "+self.filename)
except Exception, ex:
print "Closing File",ex
self.parent.notify_hdf_file_finished(self)
#-----------------------------------------------------------------------------------
# run
#-----------------------------------------------------------------------------------
def run(self):
try:
self._hdf_file = h5py.File(self.filename,'w')
self.parent.report_message("Opened file "+self.filename)
except Exception, ex:
print ex
self.parent.report_message("Unable to Open file "+self.filename)
return
last_store_time = time.time()
while self._alive:
while len(self.data_queue):
#
self.store_data(self.data_queue[0])
del self.data_queue[0]
last_store_time = time.time()
#
if len(self.datasource_finished) == 0:
self.close_file()
if self.parent._paused:
last_store_time = time.time()
elif (time.time() - last_store_time) > self.timeout_sec:
print "TIMEOUT",self.filename
self.close_file()
last_store_time = time.time()
time.sleep(THREAD_DELAY_SEC)
| ess-dmsc/do-ess-data-simulator | DonkiDirector/HDFWriterThread.py | Python | bsd-2-clause | 6,785 |
from rknfilter.targets import BaseTarget
from rknfilter.db import Resource, Decision, CommitEvery
from rknfilter.core import DumpFilesParser
class StoreTarget(BaseTarget):
def __init__(self, *args, **kwargs):
super(StoreTarget, self).__init__(*args, **kwargs)
self._dump_files_parser = DumpFilesParser()
def process(self):
commit = CommitEvery(self._session)
for content, decision, domains, urls, ips, _ in self._dump_files_parser.get_data():
# TODO: move to models?
resource = Resource.get_or_create(self._session, rkn_id=content['rkn_id'])
if resource.id is None:
resource.include_date = content['include_date']
resource.entry_type = content['entry_type']
resource.urgency_type = content['urgency_type']
resource.block_type = content['block_type']
resource.decision = Decision(
date=decision['decision_date'],
org=decision['decision_org'],
num=decision['decision_num']
)
resource.sync_m2m_proxy('domains_list', domains)
resource.sync_m2m_proxy('urls_list', urls)
resource.sync_m2m_proxy('ips_list', ips)
commit()
commit(force=True)
| DmitryFillo/rknfilter | rknfilter/targets/store.py | Python | bsd-2-clause | 1,324 |
from quex.engine.generator.languages.address import Address
from quex.blackboard import E_EngineTypes, E_AcceptanceIDs, E_StateIndices, \
E_TransitionN, E_PostContextIDs, E_PreContextIDs, \
setup as Setup
def do(txt, TheState, TheAnalyzer, DefineLabelF=True, MentionStateIndexF=True):
LanguageDB = Setup.language_db
if DefineLabelF:
txt.append(Address("$drop-out", TheState.index))
if MentionStateIndexF:
txt.append(" __quex_debug_drop_out(%i);\n" % TheState.index)
if TheAnalyzer.engine_type == E_EngineTypes.BACKWARD_PRE_CONTEXT:
txt.append(" %s\n" % LanguageDB.GOTO(E_StateIndices.END_OF_PRE_CONTEXT_CHECK))
return
elif TheAnalyzer.engine_type == E_EngineTypes.BACKWARD_INPUT_POSITION:
if TheState.drop_out.reachable_f:
# Backward input position detectors are always isolated state machines.
# => TheAnalyzer.state_machine_id = id of the backward input position detector.
txt.append(' __quex_debug("backward input position %i detected\\n");\n' % \
TheAnalyzer.state_machine_id)
txt.append(" %s\n\n" % LanguageDB.INPUT_P_INCREMENT())
txt.append(" goto %s;\n" \
% LanguageDB.LABEL_NAME_BACKWARD_INPUT_POSITION_RETURN(TheAnalyzer.state_machine_id))
return
info = TheState.drop_out.trivialize()
# (1) Trivial Solution
if info is not None:
for i, easy in enumerate(info):
positioning_str = ""
if easy[1].positioning != 0:
if easy[1].positioning == E_TransitionN.VOID: register = easy[1].position_register
else: register = E_PostContextIDs.NONE
positioning_str = "%s\n" % LanguageDB.POSITIONING(easy[1].positioning, register)
goto_terminal_str = "%s" % LanguageDB.GOTO_TERMINAL(easy[1].acceptance_id)
txt.append(LanguageDB.IF_PRE_CONTEXT(i == 0, easy[0].pre_context_id,
"%s%s" % (positioning_str, goto_terminal_str)))
return
# (2) Separate: Pre-Context Check and Routing to Terminal
# (2.1) Pre-Context Check
for i, element in enumerate(TheState.drop_out.get_acceptance_checker()):
if element.pre_context_id == E_PreContextIDs.NONE \
and element.acceptance_id == E_AcceptanceIDs.VOID:
break
txt.append(
LanguageDB.IF_PRE_CONTEXT(i == 0, element.pre_context_id,
LanguageDB.ASSIGN("last_acceptance",
LanguageDB.ACCEPTANCE(element.acceptance_id)))
)
if element.pre_context_id == E_PreContextIDs.NONE:
break # No check after the unconditional acceptance
# (2.2) Routing to Terminal
# (2.2.1) If the positioning is the same for all entries (except the FAILURE)
# then, again, the routing may be simplified:
#router = TheState.drop_out.router
#prototype = (router[0].positioning, router[0].position_register)
#simple_f = True
#for element in islice(router, 1, None):
# if element.acceptance_id == E_AcceptanceIDs.FAILURE: continue
# if prototype != (element.positioning, element.position_register):
# simple_f = False
# break
#if simple_f:
# txt.append(" %s\n %s\n" %
# (LanguageDB.POSITIONING(element.positioning, element.position_register),
# LanguageDB.GOTO_TERMINAL(E_AcceptanceIDs.VOID)))
#else:
case_list = []
for element in TheState.drop_out.get_terminal_router():
if element.positioning == E_TransitionN.VOID: register = element.position_register
else: register = None
case_list.append((LanguageDB.ACCEPTANCE(element.acceptance_id),
"%s %s" % \
(LanguageDB.POSITIONING(element.positioning, register),
LanguageDB.GOTO_TERMINAL(element.acceptance_id))))
txt.extend(LanguageDB.SELECTION("last_acceptance", case_list))
| coderjames/pascal | quex-0.63.1/quex/engine/generator/state/drop_out.py | Python | bsd-2-clause | 4,373 |
# based on https://github.com/pypa/sampleproject/blob/master/setup.py
# see http://packaging.python.org/en/latest/tutorial.html#creating-your-own-project
from setuptools import setup, find_packages
from setuptools.command.install import install as stdinstall
import codecs
import os
import re
import sys
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_file_contents(filename):
with codecs.open(filename, encoding='utf-8') as f:
contents = f.read()
return contents
package_name = "typecheck-decorator"
class install_with_test(stdinstall):
def run(self):
stdinstall.run(self) # normal install
##pip/setuptools makes this unbuffering unhelpful:
#sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1) # make line-buffered
#sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1) # make line-buffered
#import typecheck.test_typecheck_decorator # execute post-install test (during beta only)
setup(
# setup customization:
cmdclass={'install': install_with_test},
# basic information:
name=package_name,
version=find_version('typecheck', '__init__.py'),
description="flexible explicit run-time type checking of function arguments (Python3-only)",
long_description=get_file_contents("README.rst"),
# The project URL:
url='http://github.com/prechelt/' + package_name,
# Author details:
author='Dmitry Dvoinikov, Lutz Prechelt',
author_email='[email protected]',
# Classification:
license='BSD License',
classifiers=[
'License :: OSI Approved :: BSD License',
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Documentation',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='type-checking',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ['typing;python_version<"3.5"'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'typecheck': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
###data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
### entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
) | prechelt/typecheck-decorator | setup.py | Python | bsd-2-clause | 3,969 |
import re, csv
header_end = re.compile("\s+START\s+END\s+")
## p. 1172 is missing a space between the final date and the description. Can't understand why the layout option does this; a space is clearly visible. I dunno.
five_data_re = re.compile("\s*([\w\d]+)\s+(\d\d\/\d\d\/\d\d\d\d)\s+(.*?)\s+(\d\d\/\d\d\/\d\d\d\d)\s+(\d\d\/\d\d\/\d\d\d\d)\s*(.+?)\s+([\d\.\-\,]+)\s*\Z")
five_data_missing_date = re.compile("\s*([\w\d]+)\s+(\d\d\/\d\d\/\d\d\d\d)\s+(.*?)\s{10,}(.*?)\s+([\d\.\-\,]+)\s*\Z")
three_data_re = re.compile("\s+(\w[\w\,\s\.\-\']+?)\s{10,}(\w.*?)\s{4,}([\d\.\-\,]+)\s*")
top_matter_end_re = re.compile("\s+DOCUMENT\s+NO\.\s+DATE\s+PAYEE")
funding_year_re = re.compile("\s*Funding\s+Year\s+(\d+)")
blank_line_re = re.compile("\s+\Z")
page_number_re = re.compile("\s+\w\-\d+")
page_number_alt_re = re.compile("\s+\w\-\d\-\d+")
continuation_with_amount_re = re.compile("\s*(.+?)\s{10,}([\d\.\-\,]+)\s+\Z")
travel_re = re.compile("\s+TRAVEL\s+AND\s+TRANSPORTATION\s+OF\s+PERSONS\s+")
it_re = re.compile("\s+INTERDEPARTMENTAL\s+TRANSPORTATION\s+")
ocs_re = re.compile("\s+OTHER\s+CONTRACTUAL\s+SERVICES\s+")
acq_re = re.compile("\s+ACQUISITION\s+OF\s+ASSETS\s+")
prsnl_re = re.compile("\s+PERSONNEL\s+BENEFITS\s+")
netpayroll_re = re.compile("\s+NET\s+PAYROLL\s+EXPENSES\s+")
persnl_comp_re = re.compile("\s+PERSONNEL COMP. FULL-TIME PERMANENT\s+")
other_personal_comp = re.compile("\s+OTHER PERSONNEL COMPENSATION\s+")
remployed_annuitants_re = re.compile("\s+RE-EMPLOYED ANNUITANTS\s+")
former_employee_benefits_re = re.compile("\s+BENEFITS FOR NON SENATE/FORMER PERSONNEL\s+")
page_number_re = re.compile("\s+B\s*\-\s*\d+\s*")
def is_subtotal(line):
if travel_re.match(line):
return True
if it_re.match(line):
return True
if ocs_re.match(line):
return True
if acq_re.match(line):
return True
if prsnl_re.match(line):
return True
if netpayroll_re.match(line):
return True
if persnl_comp_re.match(line):
return True
if other_personal_comp.match(line):
return True
if remployed_annuitants_re.match(line):
return True
if former_employee_benefits_re.match(line):
return True
return False
def compute_break_position(top_matter):
return None
for whole_line in top_matter:
if top_matter_end_re.match(whole_line):
break
if blank_line_re.match(line):
continue
return None
def process_top_matter(page_num, top_matter):
#top_matter_top_left_column_delimiter = compute_break_position(top_matter)
top_matter_top_left_column_delimiter = 48
#return None
expense_description = ''
for whole_line in top_matter:
if top_matter_end_re.match(whole_line):
break
line = whole_line[:top_matter_top_left_column_delimiter]
if blank_line_re.match(line):
continue
result = funding_year_re.match(line)
line_stripped = line.strip()
if line_stripped:
expense_description += ' ' + line_stripped + ' '
expense_description = re.sub( '\s+', ' ', expense_description ).strip()
return expense_description
# some carryover lines have amounts in them, and some don't -- that is, they are just extensions of the text field. See, e.g. p. 1672.
def test_carryover_line(line_offset, line):
# are the first n characters of the line empty ?
line_start = line[:line_offset]
if blank_line_re.match(line_start):
line_end = line[line_offset:]
if not blank_line_re.match(line_end):
#print "***possible continuation: %s" % (line_end)
return True
return False
def process_data_lines(page_num, data_lines):
missing_data = []
return_data = []
return_data_index = 0
# these are lines that describe prior lines--typically the travel associated with a per diem or a transportation line. They aren't processed in this step, but instead just recorded in the one_part_continuation_register, and processed after that.
one_part_continuation_register = []
last_line_data_index = None
for data_line in data_lines:
#print "handling %s %s" % (last_line_data_index, data_line)
if blank_line_re.match(data_line):
# don't reset last line data index--sometimes the page number appears in the middle of a page.
continue
if page_number_re.match(data_line):
# These are the page numbers
continue
if is_subtotal(data_line):
last_line_data_index = None
#assert False
continue
found_data = five_data_re.match(data_line)
if found_data:
#print found_data.groups()
if found_data:
return_data.append(['five data line', False, page_num] + list(found_data.groups()))
return_data_index += 1
#print "index of text description is: " + str(found_data.start(6))
last_line_data_index = str(found_data.start(6))
#print "Five data---last line data index: %s %s" % (last_line_data_index, found_data.groups())
# we need this to figure out if the next line is a continuation or a sub-header type thing.
else:
#pass
found_data2 = three_data_re.match(data_line)
found_data_missing_date = five_data_missing_date.match(data_line)
if found_data2:
results = list(found_data2.groups())
result_formatted = ['three data line', False, page_num, '', '', results[0], '', '', results[1], results[2]]
return_data.append(result_formatted)
return_data_index += 1
last_line_data_index = None
elif (found_data_missing_date):
print "**found missing date line"
results = list(found_data_missing_date.groups())
result_formatted = ['missing date line', False, page_num, results[0], results[1], results[2], '', '', results[3], results[4]]
return_data.append(result_formatted)
return_data_index += 1
last_line_data_index = None
else:
is_page_num = page_number_re.match(data_line)
is_page_num_alt = page_number_alt_re.match(data_line)
if is_page_num or is_page_num_alt:
continue
if last_line_data_index:
#print "running carryover test with n=%s" % (last_line_data_index)
carryover_found = test_carryover_line(int(last_line_data_index), data_line)
if carryover_found:
continuation_data = continuation_with_amount_re.match(data_line)
if continuation_data:
#print "two part continuation found: '" + continuation_data.group(1) + "'-'" + continuation_data.group(2) + "'"
# it's a two part continuation--probably per diem/travel. So add same data as for the first line.
previous_result = return_data[return_data_index-1]
result_formatted = ['continuation_data', True, previous_result[2], previous_result[3], previous_result[4], previous_result[5], previous_result[6], previous_result[7], continuation_data.group(1), continuation_data.group(2)]
return_data.append(result_formatted)
return_data_index += 1
else:
description = data_line.strip()
#print "one part continuation found: '" + description +"'"
register_data = {'array_index':return_data_index, 'data':description}
one_part_continuation_register.append(register_data)
## annoyingly, these descriptions themselves can span over multiple lines.
## e.g. p. 1557:
# WASHINGTON DC TO CHARLESTON, COLUMBIA, CHARLESTON, COLUMBIA, LEXINGTON,
# CLINTON, SPARTANBURG, GREENVILLE, COLUMBIA, AIKEN, COLUMBIA, CHARLESTON AND RETURN
# RETURN
## append it to previous rows.
else:
print "missing <" + data_line + ">"
missing_data.append({'data':data_line, 'offset':return_data_index,'page_num':page_num })
#if one_part_continuation_register:
#print "one_part_continuation_register: %s" % (one_part_continuation_register)
return {'data':return_data, 'register':one_part_continuation_register, 'missing_data':missing_data}
def find_header_index(line_array):
matches = 0
header_index = None
for index, line in enumerate(line_array):
r = header_end.search(line)
if r:
#print "match: %s: %s" % (index, line)
matches += 1
header_index = index
# break if we don't find exactly one occurrence of this per page.
assert matches == 1
return header_index
start_page = 17
end_page = 2073
#start_page = 1938
#end_page = 1938
page_file_unfilled = "pages/layout_%s.txt"
header_index_hash = {}
csvfile = open("senate_data.csv", 'wb')
datawriter = csv.writer(csvfile)
current_description = None
description = None
missing_data_file = open("missing_data.txt", 'w')
for page in range(start_page, end_page+1):
# random blank page
if page == 1884 or page == 2068:
continue
print "Processing page %s" % page
filename = page_file_unfilled % (page)
fh = open(filename, 'r')
page_array = []
for line in fh:
page_array.append(line)
header_index = find_header_index(page_array)
# keep stats on where we find the index.
try:
header_index_hash[header_index] += 1
except KeyError:
header_index_hash[header_index] = 1
# This is based on research...
if header_index > 6:
top_matter = page_array[:header_index+1]
description = process_top_matter(page, top_matter)
current_description = description
data_lines = page_array[header_index+1:]
data_found = process_data_lines(page, data_lines)
# get the data lines, and the run-on lines.
data_lines = data_found['data']
one_line_continuation_register = data_found['register']
# run through the continuation lines and append them to the right places.
for cl in one_line_continuation_register:
all_related_lines_found = False
current_line_position = cl['array_index']-1
while all_related_lines_found == False:
data_lines[current_line_position][8] = data_lines[current_line_position][8] + " + " + cl['data']
if data_lines[current_line_position][0] != 'continuation_data':
all_related_lines_found = True
else:
# it's a continuation line, so append this to the previous line too.
current_line_position -= 1
for data in data_lines:
datawriter.writerow([current_description] + data)
if data_found['missing_data']:
missing_data_file.write(str(data_found['missing_data']) + "\n")
for k,v in sorted(header_index_hash.items()):
print k,v
"""
header index frequency:
3 1240
18 117
19 33
20 34
26 9
27 16
28 349
29 12
"""
| jsfenfen/senate_disbursements | 114_sdoc4/read_pages.py | Python | bsd-2-clause | 11,865 |
from django.views.generic import ListView
from models import Project
# Create your views here.
class ListProjectView(ListView):
model = Project
template_name = 'cmsplugin_vfoss_project/project_list.html'
| thuydang/djagazin | wsgi/djagazin/cmsplugin_vfoss_project/views.py | Python | bsd-2-clause | 207 |
""" Common utility
"""
import logging
import time
def measure(func, *args, **kwargs):
def start(*args, **kwargs):
begin = time.time()
result = func(*args, **kwargs)
end = time.time()
arg = args
while not (isinstance(arg, str) or isinstance(arg, int) or isinstance(arg, float)):
if isinstance(arg, list) or isinstance(arg, tuple):
arg = arg[0]
elif isinstance(args, dict):
arg = ''
else:
arg = ''
arg_trun = arg
if len(arg) > 70:
arg_trun = arg[:67]
logging.info('{} took {:6.3f} sec {}'.format(func.__name__ ,
end - begin, arg_trun))
logging.debug('with {} and {}'.format(args, kwargs))
return result
return start
| inlinechan/stags | stags/util.py | Python | bsd-2-clause | 856 |
from __future__ import (absolute_import, print_function, division)
class NotImplementedException(Exception):
pass
| zbuc/imaghost | ghost_exceptions/__init__.py | Python | bsd-2-clause | 120 |
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from model_utils import Choices
from model_utils.models import TimeStampedModel
from crate.web.packages.models import Package, Release, ReleaseFile
class Event(TimeStampedModel):
ACTIONS = Choices(
("package_create", _("Package Created")),
("package_delete", _("Package Deleted")),
("release_create", _("Release Created")),
("release_delete", _("Release Deleted")),
("file_add", _("File Added")),
("file_remove", _("File Removed")),
)
package = models.SlugField(max_length=150)
version = models.CharField(max_length=512, blank=True)
action = models.CharField(max_length=25, choices=ACTIONS)
data = JSONField(null=True, blank=True)
@receiver(post_save, sender=Package)
def history_package_create(instance, created, **kwargs):
if created:
Event.objects.create(
package=instance.name,
action=Event.ACTIONS.package_create
)
@receiver(post_delete, sender=Package)
def history_package_delete(instance, **kwargs):
Event.objects.create(
package=instance.name,
action=Event.ACTIONS.package_delete
)
@receiver(post_save, sender=Release)
def history_release_update(instance, created, **kwargs):
if created:
Event.objects.create(
package=instance.package.name,
version=instance.version,
action=Event.ACTIONS.release_create
)
if instance.has_changed("hidden"):
if instance.hidden:
Event.objects.create(
package=instance.package.name,
version=instance.version,
action=Event.ACTIONS.release_delete
)
else:
Event.objects.create(
package=instance.package.name,
version=instance.version,
action=Event.ACTIONS.release_create
)
@receiver(post_save, sender=ReleaseFile)
def history_releasefile_update(instance, created, **kwargs):
e = None
if instance.has_changed("hidden"):
if instance.hidden:
e = Event.objects.create(
package=instance.release.package.name,
version=instance.release.version,
action=Event.ACTIONS.file_remove
)
if e is not None:
try:
e.data = {
"filename": instance.filename,
"digest": instance.digest,
"uri": instance.get_absolute_url(),
}
except ValueError:
pass
else:
e.save()
| crateio/crate.web | crate/web/history/models.py | Python | bsd-2-clause | 2,810 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TtTrip.shape'
db.add_column(u'timetable_tttrip', 'shape',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['timetable.TtShape'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TtTrip.shape'
db.delete_column(u'timetable_tttrip', 'shape_id')
models = {
u'timetable.ttshape': {
'Meta': {'object_name': 'TtShape'},
'gtfs_shape_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.TextField', [], {})
},
u'timetable.ttstop': {
'Meta': {'object_name': 'TtStop'},
'gtfs_stop_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop_lat': ('django.db.models.fields.FloatField', [], {}),
'stop_lon': ('django.db.models.fields.FloatField', [], {}),
'stop_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'stop_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'timetable.ttstoptime': {
'Meta': {'object_name': 'TtStopTime'},
'exp_arrival': ('django.db.models.fields.DateTimeField', [], {}),
'exp_departure': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtStop']"}),
'stop_sequence': ('django.db.models.fields.IntegerField', [], {}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtTrip']"})
},
u'timetable.tttrip': {
'Meta': {'object_name': 'TtTrip'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gtfs_trip_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtShape']", 'null': 'True'})
}
}
complete_apps = ['timetable'] | hasadna/OpenTrain | webserver/opentrain/timetable/migrations/0013_auto__add_field_tttrip_shape.py | Python | bsd-3-clause | 2,893 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-16 00:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_queue_name'),
]
operations = [
migrations.AlterField(
model_name='media',
name='media_service',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.MediaService'),
),
]
| falcaopetri/enqueuer-api | api/migrations/0006_auto_20161015_2113.py | Python | bsd-3-clause | 543 |
# Short help
def display_summary():
print("{:<13}{}".format( 'rm', "Removes a previously copied SCM Repository" ))
# DOCOPT command line definition
USAGE="""
Removes a previously 'copied' repository
===============================================================================
usage: evie [common-opts] rm [options] <dst> <repo> <origin> <id>
evie [common-opts] rm [options] get-success-msg
evie [common-opts] rm [options] get-error-msg
Arguments:
<dst> PARENT directory for where the package was copied. The
directory is specified as a relative path to the root
of primary repository.
<repo> Name of the repository to remove
<origin> Path/URL to the repository
<id> Label/Tag/Hash/Version of code to be remove
get-success-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command is successful
get-error-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command fails
Options:
-p PKGNAME Specifies the Package name if different from the <repo>
name
-b BRANCH Specifies the source branch in <repo>. The use/need
of this option in dependent on the <repo> SCM type.
Options:
-h, --help Display help for this command
Notes:
o The command MUST be run in the root of the primary respostiory.
o This command only applied to repositories previously mounted using
the 'copy' command.
""" | johnttaylor/Outcast | bin/scm/rm.py | Python | bsd-3-clause | 1,756 |
# -*- coding: utf-8 -*-
"""
DU task for ABP Table: doing jointly row BIESO and horizontal cuts
block2line edges do not cross another block.
The cut are based on baselines of text blocks.
- the labels of horizontal cuts are SIO (instead of SO in previous version)
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import math
from lxml import etree
import collections
import numpy as np
from sklearn.pipeline import Pipeline, FeatureUnion
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from tasks import _checkFindColDir, _exit
from tasks.DU_CRF_Task import DU_CRF_Task
from xml_formats.PageXml import MultiPageXml
import graph.GraphModel
from crf.Edge import Edge, SamePageEdge
from crf.Graph_MultiPageXml import Graph_MultiPageXml
from crf.NodeType_PageXml import NodeType_PageXml_type_woText
#from crf.FeatureDefinition_PageXml_std_noText import FeatureDefinition_PageXml_StandardOnes_noText
from crf.FeatureDefinition import FeatureDefinition
from crf.Transformer import Transformer, TransformerListByType
from crf.Transformer import EmptySafe_QuantileTransformer as QuantileTransformer
from crf.Transformer_PageXml import NodeTransformerXYWH_v2, NodeTransformerNeighbors, Node1HotFeatures
from crf.Transformer_PageXml import Edge1HotFeatures, EdgeBooleanFeatures_v2, EdgeNumericalSelector
from crf.PageNumberSimpleSequenciality import PageNumberSimpleSequenciality
from tasks.DU_ABPTableCutAnnotator import BaselineCutAnnotator
class GraphCut(Graph_MultiPageXml):
"""
We specialize the class of graph because the computation of edges is quite specific
"""
#Cut stuff
#iModulo = 1 # map the coordinate to this modulo
fMinPageCoverage = 0.5 # minimal coverage to consider a GT table separator
iLineVisibility = 5 * 11 # a cut line sees other cut line up to N pixels downward
iBlockVisibility = 3*7*13 # a block sees neighbouring cut lines at N pixels
_lClassicNodeType = None
@classmethod
def setClassicNodeTypeList(cls, lNodeType):
"""
determine which type of node goes thru the classical way for determining
the edges (vertical or horizontal overlap, with occlusion, etc.)
"""
cls._lClassicNodeType = lNodeType
def parseDocFile(self, sFilename, iVerbose=0):
"""
Load that document as a CRF Graph.
Also set the self.doc variable!
Return a CRF Graph object
"""
self.doc = etree.parse(sFilename)
self.lNode, self.lEdge = list(), list()
self.lNodeBlock = [] # text node
self.lNodeCutLine = [] # cut line node
root = self.doc.getroot()
doer = BaselineCutAnnotator()
doer.setLabelScheme_SIO() #use SIO instead of SO labels!
#doer.setModulo(self.iModulo) # this is optional
#load the groundtruth table separators, if any, per page (1 in tABP)
ltlYlX = doer.get_separator_YX_from_DOM(root, self.fMinPageCoverage)
for (lHi, lVi) in ltlYlX:
traceln(" - found %d horizontal, %d vertical GT separators" % (len(lHi), len(lVi)))
#create DOM node reflecting the cuts
#first clean (just in case!)
n = doer.remove_cuts_from_dom(root)
if n > 0:
traceln(" - removed %d pre-existing cut lines" % n)
# if GT, then we have labelled cut lines in DOM
_ltlYCutXCut = doer.add_cut_to_DOM(root, ltlYlX=ltlYlX)
lClassicType = [nt for nt in self.getNodeTypeList() if nt in self._lClassicNodeType]
lSpecialType = [nt for nt in self.getNodeTypeList() if nt not in self._lClassicNodeType]
for (pnum, page, domNdPage) in self._iter_Page_DocNode(self.doc):
#now that we have the page, let's create the node for each type!
lClassicPageNode = [nd for nodeType in lClassicType for nd in nodeType._iter_GraphNode(self.doc, domNdPage, page) ]
lSpecialPageNode = [nd for nodeType in lSpecialType for nd in nodeType._iter_GraphNode(self.doc, domNdPage, page) ]
self.lNode.extend(lClassicPageNode) # e.g. the TextLine objects
self.lNodeBlock.extend(lClassicPageNode)
self.lNode.extend(lSpecialPageNode) # e.g. the cut lines!
self.lNodeCutLine.extend(lSpecialPageNode)
#no previous page to consider (for cross-page links...) => None
lClassicPageEdge = Edge.computeEdges(None, lClassicPageNode)
self.lEdge.extend(lClassicPageEdge)
# Now, compute edges between special and classic objects...
lSpecialPageEdge = self.computeSpecialEdges(lClassicPageNode,
lSpecialPageNode,
doer.bCutIsBeforeText)
self.lEdge.extend(lSpecialPageEdge)
#if iVerbose>=2: traceln("\tPage %5d %6d nodes %7d edges"%(pnum, len(lPageNode), len(lPageEdge)))
if iVerbose>=2:
traceln("\tPage %5d"%(pnum))
traceln("\t block: %6d nodes %7d edges (to block)" %(pnum, len(lClassicPageNode), len(lClassicPageEdge)))
traceln("\t line: %6d nodes %7d edges (from block)"%(pnum, len(lSpecialPageNode), len(lSpecialPageEdge)))
if iVerbose: traceln("\t\t (%d nodes, %d edges)"%(len(self.lNode), len(self.lEdge)) )
return self
@classmethod
def computeSpecialEdges(cls, lClassicPageNode, lSpecialPageNode):
"""
return a list of edges
"""
raise Exception("Specialize this method")
class Edge_BL(Edge):
"""Edge block-to-Line"""
pass
class Edge_LL(Edge):
"""Edge line-to-Line"""
pass
class GraphCut_H(GraphCut):
"""
Only horizontal cut lines
"""
def __init__(self):
self.showClassParam()
@classmethod
def showClassParam(cls):
try:
cls.bParamShownOnce
assert cls.bParamShownOnce == True
except:
#traceln(" - iModulo : " , cls.iModulo)
traceln(" - block_see_line : " , cls.iBlockVisibility)
traceln(" - line_see_line : " , cls.iLineVisibility)
traceln(" - fMinPageCoverage : " , cls.fMinPageCoverage)
cls.bParamShownOnce = True
def getNodeListByType(self, iTyp):
if iTyp == 0:
return self.lNodeBlock
else:
return self.lNodeCutLine
def getEdgeListByType(self, typA, typB):
if typA == 0:
if typB == 0:
return (e for e in self.lEdge if isinstance(e, SamePageEdge))
else:
return (e for e in self.lEdge if isinstance(e, Edge_BL))
else:
if typB == 0:
return []
else:
return (e for e in self.lEdge if isinstance(e, Edge_LL))
@classmethod
def computeSpecialEdges(self, lClassicPageNode, lSpecialPageNode,
bCutIsBeforeText):
"""
Compute:
- edges between each block and the cut line above/across/below the block
- edges between cut lines
return a list of edges
"""
#augment the block with the coordinate of its baseline central point
for blk in lClassicPageNode:
try:
x,y = BaselineCutAnnotator.getDomBaselineXY(blk.node)
blk.x_bslne = x
blk.y_bslne = y
except IndexError:
traceln("** WARNING: no Baseline in ", blk.domid)
traceln("** Using x2 and y2 instead... :-/")
blk.x_bslne = blk.x2
blk.y_bslne = blk.y2
for cutBlk in lSpecialPageNode:
assert cutBlk.y1 == cutBlk.y2
cutBlk.y1 = int(round(cutBlk.y1)) #DeltaFun make float
cutBlk.y2 = cutBlk.y1
#block to cut line edges
lEdge = []
for blk in lClassicPageNode:
for cutBlk in lSpecialPageNode:
if blk.y_bslne == cutBlk.y1:
edge = Edge_BL(blk, cutBlk)
edge.len = 0
edge._type = 0 # Cut line is crossing the block
lEdge.append(edge)
elif abs(blk.y_bslne - cutBlk.y1) <= self.iBlockVisibility:
edge = Edge_BL(blk, cutBlk)
# experiments show that abs helps
# edge.len = (blk.y_bslne - cutBlk.y1) / self.iBlockVisibility
edge.len = abs(blk.y_bslne - cutBlk.y1) / self.iBlockVisibility
edge._type = -1 if blk.y_bslne > cutBlk.y1 else +1
lEdge.append(edge)
#sort those edge from top to bottom
lEdge.sort(key=lambda o: o.B.y1) # o.B.y1 == o.B.y2 by construction
#now filter those edges
n0 = len(lEdge)
if False:
print("--- before filtering: %d edges" % len(lEdge))
lSortedEdge = sorted(lEdge, key=lambda x: x.A.domid)
for edge in lSortedEdge:
print("Block domid=%s y1=%s y2=%s yg=%s"%(edge.A.domid, edge.A.y1, edge.A.y2, edge.A.y_bslne)
+ " %s line %s "%(["↑", "-", "↓"][1+edge._type],
edge.B.y1)
+ "domid=%s y1=%s " %(edge.B.domid, edge.B.y1)
+str(id(edge))
)
lEdge = self._filterBadEdge(lEdge, lSpecialPageNode, bCutIsBeforeText)
traceln(" - filtering: removed %d edges due to obstruction." % (n0-len(lEdge)))
if False:
print("--- After filtering: %d edges" % len(lEdge))
lSortedEdge = sorted(lEdge, key=lambda x: x.A.domid)
print(len(lSortedEdge))
for edge in lSortedEdge:
print("Block domid=%s y1=%s y2=%s yg=%s"%(edge.A.domid, edge.A.y1, edge.A.y2, edge.A.y_bslne)
+ " %s line %s "%(["↑", "-", "↓"][1+edge._type],
edge.B.y1)
+ "domid=%s y1=%s " %(edge.B.domid, edge.B.y1)
+str(id(edge))
)
if self.iLineVisibility > 0:
# Cut line to Cut line edges
lSpecialPageNode.sort(key=lambda o: o.y1)
for i, A in enumerate(lSpecialPageNode):
for B in lSpecialPageNode[i+1:]:
if B.y1 - A.y1 <= self.iLineVisibility:
edge = Edge_LL(A, B)
edge.len = (B.y1 - A.y1) / self.iLineVisibility
assert edge.len >= 0
lEdge.append(edge)
else:
break
return lEdge
@classmethod
def _filterBadEdge(cls, lEdge, lCutLine, bCutIsBeforeText, fRatio=0.25):
"""
We get
- a list of block2Line edges
- a sorted list of cut line
But some block should not be connected to a line due to obstruction by
another blocks.
We filter out those edges...
return a sub-list of lEdge
"""
lKeepEdge = []
def _xoverlapSrcSrc(edge, lEdge):
"""
does the source node of edge overlap with the source node of any
edge of the list?
"""
A = edge.A
for _edge in lEdge:
if A.significantXOverlap(_edge.A, fRatio): return True
return False
def _yoverlapSrcSrc(edge, lEdge):
"""
does the source node of edge overlap with the source node of any
edge of the list?
"""
A = edge.A
for _edge in lEdge:
if A.significantYOverlap(_edge.A, fRatio): return True
return False
#there are two ways for dealing with lines crossed by a block
# - either it prevents another block to link to the line (assuming an x-overlap)
# - or not (historical way)
# THIS IS THE "MODERN" way!!
#check carefully the inequality below...
if bCutIsBeforeText == True:
keep1 = 0
keep2 = 1
else:
keep1 = -1
keep2 = 0
#take each line in turn
for ndLine in lCutLine:
#--- process downward edges
#TODO: index!
lDownwardAndXingEdge = [edge for edge in lEdge \
if edge._type > keep1 and edge.B == ndLine]
if lDownwardAndXingEdge:
#sort edge by source block from closest to line block to farthest
lDownwardAndXingEdge.sort(key=lambda o: ndLine.y1 - o.A.y_bslne)
lKeepDownwardEdge = [lDownwardAndXingEdge.pop(0)]
#now keep all edges whose source does not overlap vertically with
# the source of an edge that is kept
for edge in lDownwardAndXingEdge:
if not _xoverlapSrcSrc(edge, lKeepDownwardEdge):
lKeepDownwardEdge.append(edge)
lKeepEdge.extend(lKeepDownwardEdge)
#NOTHING to do for crossing edges: they should be in the list!
# #--- keep all crossing edges
# #TODO: index!
# lCrossingEdge = [edge for edge in lEdge \
# if edge._type == 0 and edge.B == ndLine]
#
# lKeepEdge.extend(lCrossingEdge)
#--- process upward edges
#TODO: index!
lUpwarAndXingdEdge = [edge for edge in lEdge \
if edge._type < keep2 and edge.B == ndLine]
if lUpwarAndXingdEdge:
#sort edge by source block from closest to line -block to farthest
lUpwarAndXingdEdge.sort(key=lambda o: o.A.y_bslne - ndLine.y2)
lKeepUpwardEdge = [lUpwarAndXingdEdge.pop(0)]
#now keep all edges whose source does not overlap vertically with
# the source of an edge that is kept
for edge in lUpwarAndXingdEdge:
if not _xoverlapSrcSrc(edge, lKeepUpwardEdge):
lKeepUpwardEdge.append(edge)
# now we keep only the edges, excluding the crossing ones
# (already included!!)
lKeepEdge.extend(edge for edge in lKeepUpwardEdge)
#--- and include the crossing ones (that are discarded
return lKeepEdge
#------------------------------------------------------------------------------------------------------
class CutLine_NodeTransformer_v2(Transformer):
"""
features of a Cut line:
- horizontal or vertical.
"""
def transform(self, lNode):
#We allocate TWO more columns to store in it the tfidf and idf computed at document level.
#a = np.zeros( ( len(lNode), 10 ) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
a = np.zeros( ( len(lNode), 6 ) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
for i, blk in enumerate(lNode):
page = blk.page
if abs(blk.x2 - blk.x1) > abs(blk.y1 - blk.y2):
#horizontal
v = 2*blk.y1/float(page.h) - 1 # to range -1, +1
a[i,0:3] = (1.0, v, v*v)
else:
#vertical
v = 2*blk.x1/float(page.w) - 1 # to range -1, +1
a[i,3:6] = (1.0, v, v*v)
return a
class Block2CutLine_EdgeTransformer(Transformer):
"""
features of a block to Cut line edge:
- below, crossing, above
"""
def transform(self, lEdge):
a = np.zeros( ( len(lEdge), 3 + 3 + 3) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
for i, edge in enumerate(lEdge):
z = 1 + edge._type # _type is -1 or 0 or 1
a[i, z] = 1.0
a[i, 3 + z] = edge.len # normalised on [0, 1] edge length
a[i, 6 + z] = edge.len * edge.len
return a
class CutLine2CutLine_EdgeTransformer(Transformer): # ***** USELESS *****
"""
features of a block to Cut line edge:
- below, crossing, above
"""
def transform(self, lEdge):
a = np.zeros( ( len(lEdge), 3 ) , dtype=np.float64)
for i, edge in enumerate(lEdge):
a[i,:] = (1, edge.len, edge.len * edge.len)
return a
class My_FeatureDefinition_v2(FeatureDefinition):
"""
Multitype version:
so the node_transformer actually is a list of node_transformer of length n_class
the edge_transformer actually is a list of node_transformer of length n_class^2
We also inherit from FeatureDefinition_T !!!
"""
n_QUANTILES = 16
def __init__(self, **kwargs):
"""
set _node_transformer, _edge_transformer, tdifNodeTextVectorizer
"""
FeatureDefinition.__init__(self)
nbTypes = self._getTypeNumber(kwargs)
block_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("xywh", Pipeline([
('selector', NodeTransformerXYWH_v2()),
#v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("neighbors", Pipeline([
('selector', NodeTransformerNeighbors()),
#v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("1hot", Pipeline([
('1hot', Node1HotFeatures()) #does the 1-hot encoding directly
])
)
])
Cut_line_transformer = CutLine_NodeTransformer_v2()
self._node_transformer = TransformerListByType([block_transformer, Cut_line_transformer])
edge_BB_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("1hot", Pipeline([
('1hot', Edge1HotFeatures(PageNumberSimpleSequenciality()))
])
)
, ("boolean", Pipeline([
('boolean', EdgeBooleanFeatures_v2())
])
)
, ("numerical", Pipeline([
('selector', EdgeNumericalSelector()),
#v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
] )
edge_BL_transformer = Block2CutLine_EdgeTransformer()
edge_LL_transformer = CutLine2CutLine_EdgeTransformer()
self._edge_transformer = TransformerListByType([edge_BB_transformer,
edge_BL_transformer,
edge_BL_transformer, # useless but required
edge_LL_transformer
])
self.tfidfNodeTextVectorizer = None #tdifNodeTextVectorizer
def fitTranformers(self, lGraph,lY=None):
"""
Fit the transformers using the graphs, but TYPE BY TYPE !!!
return True
"""
self._node_transformer[0].fit([nd for g in lGraph for nd in g.getNodeListByType(0)])
self._node_transformer[1].fit([nd for g in lGraph for nd in g.getNodeListByType(1)])
self._edge_transformer[0].fit([e for g in lGraph for e in g.getEdgeListByType(0, 0)])
self._edge_transformer[1].fit([e for g in lGraph for e in g.getEdgeListByType(0, 1)])
self._edge_transformer[2].fit([e for g in lGraph for e in g.getEdgeListByType(1, 0)])
self._edge_transformer[3].fit([e for g in lGraph for e in g.getEdgeListByType(1, 1)])
return True
class DU_ABPTableRCut(DU_CRF_Task):
"""
We will do a CRF model for a DU task
, with the below labels
"""
sXmlFilenamePattern = "*[0-9].mpxml"
iBlockVisibility = None
iLineVisibility = None
#=== CONFIGURATION ====================================================================
@classmethod
def getConfiguredGraphClass(cls):
"""
In this class method, we must return a configured graph class
"""
# Textline labels
# Begin Inside End Single Other
lLabels_BIESO = ['B', 'I', 'E', 'S', 'O']
# Cut lines:
# Border Ignore Separator Outside
lLabels_SIO_Cut = ['S', 'I', 'O']
#DEFINING THE CLASS OF GRAPH WE USE
DU_GRAPH = GraphCut_H
DU_GRAPH.iBlockVisibility = cls.iBlockVisibility
DU_GRAPH.iLineVisibility = cls.iLineVisibility
# ROW
ntR = NodeType_PageXml_type_woText("row"
, lLabels_BIESO
, None
, False
, BBoxDeltaFun=lambda v: max(v * 0.066, min(5, v/3))
)
ntR.setLabelAttribute("DU_row")
ntR.setXpathExpr( (".//pc:TextLine" #how to find the nodes
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntR)
# HEADER
ntCutH = NodeType_PageXml_type_woText("sepH"
, lLabels_SIO_Cut
, None
, False
, None # equiv. to: BBoxDeltaFun=lambda _: 0
)
ntCutH.setLabelAttribute("type")
ntCutH.setXpathExpr( ('.//pc:CutSeparator[@orient="0"]' #how to find the nodes
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntCutH)
DU_GRAPH.setClassicNodeTypeList( [ntR ])
return DU_GRAPH
def __init__(self, sModelName, sModelDir,
iBlockVisibility = None,
iLineVisibility = None,
sComment = None,
C=None, tol=None, njobs=None, max_iter=None,
inference_cache=None):
DU_ABPTableRCut.iBlockVisibility = iBlockVisibility
DU_ABPTableRCut.iLineVisibility = iLineVisibility
DU_CRF_Task.__init__(self
, sModelName, sModelDir
, dFeatureConfig = {'row_row':{}, 'row_sepH':{},
'sepH_row':{}, 'sepH_sepH':{},
'sepH':{}, 'row':{}}
, dLearnerConfig = {
'C' : .1 if C is None else C
, 'njobs' : 4 if njobs is None else njobs
, 'inference_cache' : 50 if inference_cache is None else inference_cache
#, 'tol' : .1
, 'tol' : .05 if tol is None else tol
, 'save_every' : 50 #save every 50 iterations,for warm start
, 'max_iter' : 10 if max_iter is None else max_iter
}
, sComment=sComment
#,cFeatureDefinition=FeatureDefinition_PageXml_StandardOnes_noText
,cFeatureDefinition=My_FeatureDefinition_v2
)
#TODO: finish this!
def evalClusterByRow(self, sFilename):
"""
Evaluate the quality of the partitioning by table row, by comparing the
GT table information to the partition done automatically (thanks to the
separators added to the DOM).
"""
self.doc = etree.parse(sFilename)
root = self.doc.getroot()
# doer = BaselineCutAnnotator()
#
# #load the groundtruth table separators, if any, per page (1 in tABP)
# ltlYlX = doer.get_separator_YX_from_DOM(root, self.fMinPageCoverage)
# for (lHi, lVi) in ltlYlX:
# traceln(" - found %d horizontal, %d vertical GT separators" % (len(lHi), len(lVi)))
# #create DOM node reflecting the cuts
# #first clean (just in case!)
# n = doer.remove_cuts_from_dom(root)
# if n > 0:
# traceln(" - removed %d pre-existing cut lines" % n)
#
# # if GT, then we have labelled cut lines in DOM
# _ltlYCutXCut = doer.add_cut_to_DOM(root, ltlYlX=ltlYlX)
lClassicType = [nt for nt in self.getNodeTypeList() if nt in self._lClassicNodeType]
lSpecialType = [nt for nt in self.getNodeTypeList() if nt not in self._lClassicNodeType]
#load the block nodes per page
for (pnum, page, domNdPage) in self._iter_Page_DocNode(self.doc):
#now that we have the page, let's create the node for each type!
lClassicPageNode = [nd for nodeType in lClassicType for nd in nodeType._iter_GraphNode(self.doc, domNdPage, page) ]
lSpecialType = [nt for nt in self.getNodeTypeList() if nt not in self._lClassicNodeType]
# -- GT ---------------------------------------------
# partition by columns ad rows
dGTByRow = collections.defaultdict(list)
dGTByCol = collections.defaultdict(list)
for blk in lClassicPageNode:
cell = MultiPageXml.getAncestorByName(blk, 'TableCell')[0]
row, col, rowSpan, colSpan = [int(cell.get(sProp)) for sProp \
in ["row", "col", "rowSpan", "colSpan"] ]
# TODO: deal with span
dGTByRow[row].append(blk)
dGTByCol[col].append(col)
for k,l in dGTByRow.items:
l.sort(key=lambda o: (o.x1, o.y1))
for k,l in dGTByCol.items:
l.sort(key=lambda o: (o.y1, o.x1))
# -- Prediction ---------------------------------------------
# if options.bBaseline:
# self.bsln_mdl = self.addBaseline_LogisticRegression() #use a LR model trained by CutSearch as baseline
#=== END OF CONFIGURATION =============================================================
# def predict(self, lsColDir):
# """
# Return the list of produced files
# """
# self.sXmlFilenamePattern = "*.mpxml"
# return DU_CRF_Task.predict(self, lsColDir)
#
# def runForExternalMLMethod(self, lsColDir, storeX, applyY, bRevertEdges=False):
# """
# Return the list of produced files
# """
# self.sXmlFilenamePattern = "*.mpxml"
# return DU_CRF_Task.runForExternalMLMethod(self, lsColDir, storeX, applyY, bRevertEdges)
# ----------------------------------------------------------------------------
def main(sModelDir, sModelName, options):
doer = DU_ABPTableRCut(sModelName, sModelDir,
iBlockVisibility = options.iBlockVisibility,
iLineVisibility = options.iLineVisibility,
C = options.crf_C,
tol = options.crf_tol,
njobs = options.crf_njobs,
max_iter = options.max_iter,
inference_cache = options.crf_inference_cache)
if options.rm:
doer.rm()
return
lTrn, lTst, lRun, lFold = [_checkFindColDir(lsDir, bAbsolute=False) for lsDir in [options.lTrn, options.lTst, options.lRun, options.lFold]]
# if options.bAnnotate:
# doer.annotateDocument(lTrn)
# traceln('annotation done')
# sys.exit(0)
traceln("- classes: ", doer.getGraphClass().getLabelNameList())
## use. a_mpxml files
#doer.sXmlFilenamePattern = doer.sLabeledXmlFilenamePattern
if options.iFoldInitNum or options.iFoldRunNum or options.bFoldFinish:
if options.iFoldInitNum:
"""
initialization of a cross-validation
"""
splitter, ts_trn, lFilename_trn = doer._nfold_Init(lFold, options.iFoldInitNum, bStoreOnDisk=True)
elif options.iFoldRunNum:
"""
Run one fold
"""
oReport = doer._nfold_RunFoldFromDisk(options.iFoldRunNum, options.warm, options.pkl)
traceln(oReport)
elif options.bFoldFinish:
tstReport = doer._nfold_Finish()
traceln(tstReport)
else:
assert False, "Internal error"
#no more processing!!
exit(0)
#-------------------
if lFold:
loTstRpt = doer.nfold_Eval(lFold, 3, .25, None, options.pkl)
sReportPickleFilename = os.path.join(sModelDir, sModelName + "__report.txt")
traceln("Results are in %s"%sReportPickleFilename)
graph.GraphModel.GraphModel.gzip_cPickle_dump(sReportPickleFilename, loTstRpt)
elif lTrn:
doer.train_save_test(lTrn, lTst, options.warm, options.pkl)
try: traceln("Baseline best estimator: %s"%doer.bsln_mdl.best_params_) #for CutSearch
except: pass
traceln(" --- CRF Model ---")
traceln(doer.getModel().getModelInfo())
elif lTst:
doer.load()
tstReport = doer.test(lTst)
traceln(tstReport)
if options.bDetailedReport:
traceln(tstReport.getDetailledReport())
sReportPickleFilename = os.path.join(sModelDir, sModelName + "__detailled_report.txt")
graph.GraphModel.GraphModel.gzip_cPickle_dump(sReportPickleFilename, tstReport)
if lRun:
if options.storeX or options.applyY:
try: doer.load()
except: pass #we only need the transformer
lsOutputFilename = doer.runForExternalMLMethod(lRun, options.storeX, options.applyY, options.bRevertEdges)
else:
doer.load()
lsOutputFilename = doer.predict(lRun)
traceln("Done, see in:\n %s"%lsOutputFilename)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
version = "v.01"
usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version)
# parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels")
#FOR GCN
parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges")
parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailed reporting (score per document)")
parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method")
parser.add_option("--line_see_line", dest='iLineVisibility', action="store",
type=int, default=0,
help="seeline2line: how far in pixel can a line see another cut line?")
parser.add_option("--block_see_line", dest='iBlockVisibility', action="store",
type=int, default=273,
help="seeblock2line: how far in pixel can a block see a cut line?")
# ---
#parse the command line
(options, args) = parser.parse_args()
# ---
try:
sModelDir, sModelName = args
except Exception as e:
traceln("Specify a model folder and a model name!")
_exit(usage, 1, e)
main(sModelDir, sModelName, options)
| Transkribus/TranskribusDU | TranskribusDU/tasks/TablePrototypes/DU_ABPTableRCut1SIO.py | Python | bsd-3-clause | 34,506 |
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import numpy as np # noqa
import pandas as pd # noqa
import pandas_ml as pdml # noqa
import pandas_ml.util.testing as tm # noqa
import sklearn.datasets as datasets # noqa
import xgboost as xgb # noqa
class TestXGBoost(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.xgboost.XGBRegressor, xgb.XGBRegressor)
self.assertIs(df.xgboost.XGBClassifier, xgb.XGBClassifier)
def test_XGBClassifier(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
models = ['XGBClassifier']
for model in models:
mod1 = getattr(df.xgboost, model)()
mod2 = getattr(xgb, model)()
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
def test_XGBRegressor(self):
# http://scikit-learn.org/stable/auto_examples/plot_kernel_ridge_regression.html
X = 5 * np.random.rand(1000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(X.shape[0] // 5))
df = pdml.ModelFrame(data=X, target=y)
models = ['XGBRegressor']
for model in models:
mod1 = getattr(df.xgboost, model)()
mod2 = getattr(xgb, model)()
df.fit(mod1)
mod2.fit(X, y)
result = df.predict(mod1)
expected = mod2.predict(X)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
self.assertIsInstance(df.predicted, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(df.predicted.values, expected)
def test_grid_search(self):
tuned_parameters = [{'max_depth': [3, 4],
'n_estimators': [50, 100]}]
df = pdml.ModelFrame(datasets.load_digits())
cv = df.grid_search.GridSearchCV(df.xgb.XGBClassifier(), tuned_parameters, cv=5)
with tm.RNGContext(1):
df.fit(cv)
result = df.grid_search.describe(cv)
expected = pd.DataFrame({'mean': [0.89705064, 0.91764051, 0.91263216, 0.91930996],
'std': [0.03244061, 0.03259985, 0.02764891, 0.0266436],
'max_depth': [3, 3, 4, 4],
'n_estimators': [50, 100, 50, 100]},
columns=['mean', 'std', 'max_depth', 'n_estimators'])
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_frame_equal(result, expected)
def test_plotting(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
df.fit(df.svm.SVC())
# raises if df.estimator is not XGBModel
with self.assertRaises(ValueError):
df.xgb.plot_importance()
with self.assertRaises(ValueError):
df.xgb.to_graphviz()
with self.assertRaises(ValueError):
df.xgb.plot_tree()
df.fit(df.xgb.XGBClassifier())
from matplotlib.axes import Axes
from graphviz import Digraph
try:
ax = df.xgb.plot_importance()
except ImportError:
import nose
# matplotlib.use doesn't work on Travis
# PYTHON=3.4 PANDAS=0.17.1 SKLEARN=0.16.1
raise nose.SkipTest()
self.assertIsInstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'F score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
g = df.xgb.to_graphviz(num_trees=0)
self.assertIsInstance(g, Digraph)
ax = df.xgb.plot_tree(num_trees=0)
self.assertIsInstance(ax, Axes)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| sinhrks/pandas-ml | pandas_ml/xgboost/test/test_base.py | Python | bsd-3-clause | 4,415 |
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import numpy as np
import pytest
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
@pytest.mark.parametrize("op_type, pos, val", itertools.product(['add', 'mul', 'floor_div', 'pow', 'real_div', 'sub'], ['x', 'y'], [0, 1, [0, 0, 0, 0], [1, 1, 1, 1]]))
def test_elementwise_elimination(op_type, pos, val):
if 'div' in op_type and np.prod(val) == 0:
return
if 'pow' in op_type and (val != 0 or val != 1):
return
test_op = getattr(mb, op_type)
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
if pos == "x":
r1 = test_op(x=val, y=x)
else:
r1 = test_op(x=x, y=val)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = [op_type, "relu"]
new_program = original_program
if op_type in {'add'}:
if val == 0 or val == [0, 0, 0, 0]:
new_program = ["relu"]
elif op_type in {'mul'}:
if val == 1 or val == [1, 1, 1, 1]:
new_program = ["relu"]
elif op_type in {'real_div'}:
# TODO(rdar://79925291): Remove this branch and add `real_div` to the
# following elif once fp32 casts for `real_div` are no longer required.
original_program = ["cast"] + original_program
new_program = original_program
if pos == 'y' and (val == 1 or val == [1, 1, 1, 1]):
new_program = ["cast", "relu"]
elif op_type in {'pow', 'floor_div'}:
if pos == 'y' and (val == 1 or val == [1, 1, 1, 1]):
new_program = ["relu"]
elif op_type in {'sub'}:
if pos == 'y' and (val == 0 or val == [0, 0, 0, 0]):
new_program = ["relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == new_program
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_elementwise_broadcast():
@mb.program(input_specs=[mb.TensorSpec(shape=[4])])
def prog(x):
r1 = mb.add(x=x, y=[[0, 0, 0, 0], [0, 0, 0, 0]])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = ["add", "relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == original_program
assert_model_is_valid(
prog,
{"x": [4]},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_reshape_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.reshape(x=x, shape=[1, 8])
r2 = mb.reshape(x=r1, shape=[1, 8])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "relu"]
assert get_op_types_in_program(prog) == ["reshape", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (1, 8)},
)
def test_oneway_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, num_splits=1, axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_full_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, split_sizes=[4], axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_to_end_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[-1, -1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebyindex_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_index(x=x, begin=[0, 0], end=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
@pytest.mark.parametrize("begin_mask, end_mask",
itertools.product(itertools.product([True, False],[True, False]),
itertools.product([True, False],[True, False])))
def test_slicebyindex_mask_elimination(begin_mask, end_mask):
@mb.program(input_specs=[mb.TensorSpec(shape=(4, 4))])
def prog(x):
begin = [1, 1]
end = [1, 1]
for i in range(2):
if not begin_mask[i]:
begin[i] = 0
if not end_mask[i]:
end[i] = 4
r1 = mb.slice_by_index(x=x, begin=begin, end=end, begin_mask=begin_mask, end_mask=end_mask)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (4, 4)},
expected_output_shapes={block.outputs[0].name: (4, 4)},
)
def test_pad_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[0, 0, 0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_pad():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[4, 4, 2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["pad", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (10, 8)},
)
def test_tile_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[1, 1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_tile():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["tile", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (4, 8)},
)
def test_upsample_nearest_neighbor_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_nearest_neighbor(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_nearest_neighbor", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_upsample_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_bilinear(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_resize_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.resize_bilinear(x=x, target_size_height=2, target_size_width=4)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["resize_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_crop_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.crop(x=x, crop_height=[0, 0], crop_width=[0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["crop", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_linear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.linear_activation(x=x, alpha=1.0, beta=0.0)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["linear_activation", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_transpose_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 4))])
def prog(x):
r1 = mb.transpose(x=x, perm=[0, 1, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["transpose", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 3, 4)},
expected_output_shapes={block.outputs[0].name: (2, 3, 4)},
)
| apple/coremltools | coremltools/converters/mil/mil/passes/test_noop_elimination.py | Python | bsd-3-clause | 13,225 |
import logging
import os
# URL to clone product_details JSON files from.
# Include trailing slash.
PROD_DETAILS_URL = 'http://svn.mozilla.org/libs/product-details/json/'
# Target dir to drop JSON files into (must be writable)
PROD_DETAILS_DIR = os.path.join(os.path.dirname(__file__), 'json')
# log level.
LOG_LEVEL = logging.INFO
| pmclanahan/django-mozilla-product-details | product_details/settings_defaults.py | Python | bsd-3-clause | 335 |
"""
Luminous Efficiency Functions Spectral Distributions
====================================================
Defines the luminous efficiency functions computation related objects.
References
----------
- :cite:`Wikipedia2005d` : Wikipedia. (2005). Mesopic weighting function.
Retrieved June 20, 2014, from
http://en.wikipedia.org/wiki/Mesopic_vision#Mesopic_weighting_function
"""
from __future__ import annotations
from colour.colorimetry import (
SDS_LEFS_PHOTOPIC,
SDS_LEFS_SCOTOPIC,
SpectralDistribution,
SpectralShape,
)
from colour.colorimetry.datasets.lefs import DATA_MESOPIC_X
from colour.hints import (
Floating,
FloatingOrArrayLike,
FloatingOrNDArray,
Literal,
Optional,
Union,
cast,
)
from colour.utilities import closest, optional, validate_method
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"mesopic_weighting_function",
"sd_mesopic_luminous_efficiency_function",
]
def mesopic_weighting_function(
wavelength: FloatingOrArrayLike,
L_p: Floating,
source: Union[Literal["Blue Heavy", "Red Heavy"], str] = "Blue Heavy",
method: Union[Literal["MOVE", "LRC"], str] = "MOVE",
photopic_lef: Optional[SpectralDistribution] = None,
scotopic_lef: Optional[SpectralDistribution] = None,
) -> FloatingOrNDArray:
"""
Calculate the mesopic weighting function factor :math:`V_m` at given
wavelength :math:`\\lambda` using the photopic luminance :math:`L_p`.
Parameters
----------
wavelength
Wavelength :math:`\\lambda` to calculate the mesopic weighting function
factor.
L_p
Photopic luminance :math:`L_p`.
source
Light source colour temperature.
method
Method to calculate the weighting factor.
photopic_lef
:math:`V(\\lambda)` photopic luminous efficiency function, default to
the *CIE 1924 Photopic Standard Observer*.
scotopic_lef
:math:`V^\\prime(\\lambda)` scotopic luminous efficiency function,
default to the *CIE 1951 Scotopic Standard Observer*.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Mesopic weighting function factor :math:`V_m`.
References
----------
:cite:`Wikipedia2005d`
Examples
--------
>>> mesopic_weighting_function(500, 0.2) # doctest: +ELLIPSIS
0.7052200...
"""
photopic_lef = cast(
SpectralDistribution,
optional(
photopic_lef,
SDS_LEFS_PHOTOPIC["CIE 1924 Photopic Standard Observer"],
),
)
scotopic_lef = cast(
SpectralDistribution,
optional(
scotopic_lef,
SDS_LEFS_SCOTOPIC["CIE 1951 Scotopic Standard Observer"],
),
)
source = validate_method(
source,
["Blue Heavy", "Red Heavy"],
'"{0}" light source colour temperature is invalid, '
"it must be one of {1}!",
)
method = validate_method(method, ["MOVE", "LRC"])
mesopic_x_luminance_values = sorted(DATA_MESOPIC_X.keys())
index = mesopic_x_luminance_values.index(
closest(mesopic_x_luminance_values, L_p)
)
x = DATA_MESOPIC_X[mesopic_x_luminance_values[index]][source][method]
V_m = (1 - x) * scotopic_lef[wavelength] + x * photopic_lef[wavelength]
return V_m
def sd_mesopic_luminous_efficiency_function(
L_p: Floating,
source: Union[Literal["Blue Heavy", "Red Heavy"], str] = "Blue Heavy",
method: Union[Literal["MOVE", "LRC"], str] = "MOVE",
photopic_lef: Optional[SpectralDistribution] = None,
scotopic_lef: Optional[SpectralDistribution] = None,
) -> SpectralDistribution:
"""
Return the mesopic luminous efficiency function :math:`V_m(\\lambda)` for
given photopic luminance :math:`L_p`.
Parameters
----------
L_p
Photopic luminance :math:`L_p`.
source
Light source colour temperature.
method
Method to calculate the weighting factor.
photopic_lef
:math:`V(\\lambda)` photopic luminous efficiency function, default to
the *CIE 1924 Photopic Standard Observer*.
scotopic_lef
:math:`V^\\prime(\\lambda)` scotopic luminous efficiency function,
default to the *CIE 1951 Scotopic Standard Observer*.
Returns
-------
:class:`colour.SpectralDistribution`
Mesopic luminous efficiency function :math:`V_m(\\lambda)`.
References
----------
:cite:`Wikipedia2005d`
Examples
--------
>>> from colour.utilities import numpy_print_options
>>> with numpy_print_options(suppress=True):
... sd_mesopic_luminous_efficiency_function(0.2) # doctest: +ELLIPSIS
SpectralDistribution([[ 380. , 0.000424 ...],
[ 381. , 0.0004781...],
[ 382. , 0.0005399...],
[ 383. , 0.0006122...],
[ 384. , 0.0006961...],
[ 385. , 0.0007929...],
[ 386. , 0.000907 ...],
[ 387. , 0.0010389...],
[ 388. , 0.0011923...],
[ 389. , 0.0013703...],
[ 390. , 0.0015771...],
[ 391. , 0.0018167...],
[ 392. , 0.0020942...],
[ 393. , 0.0024160...],
[ 394. , 0.0027888...],
[ 395. , 0.0032196...],
[ 396. , 0.0037222...],
[ 397. , 0.0042957...],
[ 398. , 0.0049531...],
[ 399. , 0.0057143...],
[ 400. , 0.0065784...],
[ 401. , 0.0075658...],
[ 402. , 0.0086912...],
[ 403. , 0.0099638...],
[ 404. , 0.0114058...],
[ 405. , 0.0130401...],
[ 406. , 0.0148750...],
[ 407. , 0.0169310...],
[ 408. , 0.0192211...],
[ 409. , 0.0217511...],
[ 410. , 0.0245342...],
[ 411. , 0.0275773...],
[ 412. , 0.0309172...],
[ 413. , 0.0345149...],
[ 414. , 0.0383998...],
[ 415. , 0.0425744...],
[ 416. , 0.0471074...],
[ 417. , 0.0519322...],
[ 418. , 0.0570541...],
[ 419. , 0.0625466...],
[ 420. , 0.0683463...],
[ 421. , 0.0745255...],
[ 422. , 0.0809440...],
[ 423. , 0.0877344...],
[ 424. , 0.0948915...],
[ 425. , 0.1022731...],
[ 426. , 0.109877 ...],
[ 427. , 0.1178421...],
[ 428. , 0.1260316...],
[ 429. , 0.1343772...],
[ 430. , 0.143017 ...],
[ 431. , 0.1518128...],
[ 432. , 0.1608328...],
[ 433. , 0.1700088...],
[ 434. , 0.1792726...],
[ 435. , 0.1886934...],
[ 436. , 0.1982041...],
[ 437. , 0.2078032...],
[ 438. , 0.2174184...],
[ 439. , 0.2271147...],
[ 440. , 0.2368196...],
[ 441. , 0.2464623...],
[ 442. , 0.2561153...],
[ 443. , 0.2657160...],
[ 444. , 0.2753387...],
[ 445. , 0.2848520...],
[ 446. , 0.2944648...],
[ 447. , 0.3034902...],
[ 448. , 0.3132347...],
[ 449. , 0.3223257...],
[ 450. , 0.3314513...],
[ 451. , 0.3406129...],
[ 452. , 0.3498117...],
[ 453. , 0.3583617...],
[ 454. , 0.3676377...],
[ 455. , 0.3762670...],
[ 456. , 0.3849392...],
[ 457. , 0.3936540...],
[ 458. , 0.4024077...],
[ 459. , 0.4111965...],
[ 460. , 0.4193298...],
[ 461. , 0.4281803...],
[ 462. , 0.4363804...],
[ 463. , 0.4453117...],
[ 464. , 0.4542949...],
[ 465. , 0.4626509...],
[ 466. , 0.4717570...],
[ 467. , 0.4809300...],
[ 468. , 0.4901776...],
[ 469. , 0.4995075...],
[ 470. , 0.5096145...],
[ 471. , 0.5191293...],
[ 472. , 0.5294259...],
[ 473. , 0.5391316...],
[ 474. , 0.5496217...],
[ 475. , 0.5602103...],
[ 476. , 0.5702197...],
[ 477. , 0.5810207...],
[ 478. , 0.5919093...],
[ 479. , 0.6028683...],
[ 480. , 0.6138806...],
[ 481. , 0.6249373...],
[ 482. , 0.6360619...],
[ 483. , 0.6465989...],
[ 484. , 0.6579538...],
[ 485. , 0.6687841...],
[ 486. , 0.6797939...],
[ 487. , 0.6909887...],
[ 488. , 0.7023827...],
[ 489. , 0.7133032...],
[ 490. , 0.7244513...],
[ 491. , 0.7358470...],
[ 492. , 0.7468118...],
[ 493. , 0.7580294...],
[ 494. , 0.7694964...],
[ 495. , 0.7805225...],
[ 496. , 0.7917805...],
[ 497. , 0.8026123...],
[ 498. , 0.8130793...],
[ 499. , 0.8239297...],
[ 500. , 0.8352251...],
[ 501. , 0.8456342...],
[ 502. , 0.8564818...],
[ 503. , 0.8676921...],
[ 504. , 0.8785021...],
[ 505. , 0.8881489...],
[ 506. , 0.8986405...],
[ 507. , 0.9079322...],
[ 508. , 0.9174255...],
[ 509. , 0.9257739...],
[ 510. , 0.9350656...],
[ 511. , 0.9432365...],
[ 512. , 0.9509063...],
[ 513. , 0.9586931...],
[ 514. , 0.9658413...],
[ 515. , 0.9722825...],
[ 516. , 0.9779924...],
[ 517. , 0.9836106...],
[ 518. , 0.9883465...],
[ 519. , 0.9920964...],
[ 520. , 0.9954436...],
[ 521. , 0.9976202...],
[ 522. , 0.9993457...],
[ 523. , 1. ...],
[ 524. , 0.9996498...],
[ 525. , 0.9990487...],
[ 526. , 0.9975356...],
[ 527. , 0.9957615...],
[ 528. , 0.9930143...],
[ 529. , 0.9899559...],
[ 530. , 0.9858741...],
[ 531. , 0.9814453...],
[ 532. , 0.9766885...],
[ 533. , 0.9709363...],
[ 534. , 0.9648947...],
[ 535. , 0.9585832...],
[ 536. , 0.952012 ...],
[ 537. , 0.9444916...],
[ 538. , 0.9367089...],
[ 539. , 0.9293506...],
[ 540. , 0.9210429...],
[ 541. , 0.9124772...],
[ 542. , 0.9036604...],
[ 543. , 0.8945958...],
[ 544. , 0.8845999...],
[ 545. , 0.8750500...],
[ 546. , 0.8659457...],
[ 547. , 0.8559224...],
[ 548. , 0.8456846...],
[ 549. , 0.8352499...],
[ 550. , 0.8253229...],
[ 551. , 0.8152079...],
[ 552. , 0.8042205...],
[ 553. , 0.7944209...],
[ 554. , 0.7837466...],
[ 555. , 0.7735680...],
[ 556. , 0.7627808...],
[ 557. , 0.7522710...],
[ 558. , 0.7417549...],
[ 559. , 0.7312909...],
[ 560. , 0.7207983...],
[ 561. , 0.7101939...],
[ 562. , 0.6996362...],
[ 563. , 0.6890656...],
[ 564. , 0.6785599...],
[ 565. , 0.6680593...],
[ 566. , 0.6575697...],
[ 567. , 0.6471578...],
[ 568. , 0.6368208...],
[ 569. , 0.6264871...],
[ 570. , 0.6161541...],
[ 571. , 0.6058896...],
[ 572. , 0.5957000...],
[ 573. , 0.5855937...],
[ 574. , 0.5754412...],
[ 575. , 0.5653883...],
[ 576. , 0.5553742...],
[ 577. , 0.5454680...],
[ 578. , 0.5355972...],
[ 579. , 0.5258267...],
[ 580. , 0.5160152...],
[ 581. , 0.5062322...],
[ 582. , 0.4965595...],
[ 583. , 0.4868746...],
[ 584. , 0.4773299...],
[ 585. , 0.4678028...],
[ 586. , 0.4583704...],
[ 587. , 0.4489722...],
[ 588. , 0.4397606...],
[ 589. , 0.4306131...],
[ 590. , 0.4215446...],
[ 591. , 0.4125681...],
[ 592. , 0.4037550...],
[ 593. , 0.3950359...],
[ 594. , 0.3864104...],
[ 595. , 0.3778777...],
[ 596. , 0.3694405...],
[ 597. , 0.3611074...],
[ 598. , 0.3528596...],
[ 599. , 0.3447056...],
[ 600. , 0.3366470...],
[ 601. , 0.3286917...],
[ 602. , 0.3208410...],
[ 603. , 0.3130808...],
[ 604. , 0.3054105...],
[ 605. , 0.2978225...],
[ 606. , 0.2903027...],
[ 607. , 0.2828727...],
[ 608. , 0.2755311...],
[ 609. , 0.2682900...],
[ 610. , 0.2611478...],
[ 611. , 0.2541176...],
[ 612. , 0.2471885...],
[ 613. , 0.2403570...],
[ 614. , 0.2336057...],
[ 615. , 0.2269379...],
[ 616. , 0.2203527...],
[ 617. , 0.2138465...],
[ 618. , 0.2073946...],
[ 619. , 0.2009789...],
[ 620. , 0.1945818...],
[ 621. , 0.1881943...],
[ 622. , 0.1818226...],
[ 623. , 0.1754987...],
[ 624. , 0.1692476...],
[ 625. , 0.1630876...],
[ 626. , 0.1570257...],
[ 627. , 0.151071 ...],
[ 628. , 0.1452469...],
[ 629. , 0.1395845...],
[ 630. , 0.1341087...],
[ 631. , 0.1288408...],
[ 632. , 0.1237666...],
[ 633. , 0.1188631...],
[ 634. , 0.1141075...],
[ 635. , 0.1094766...],
[ 636. , 0.1049613...],
[ 637. , 0.1005679...],
[ 638. , 0.0962924...],
[ 639. , 0.0921296...],
[ 640. , 0.0880778...],
[ 641. , 0.0841306...],
[ 642. , 0.0802887...],
[ 643. , 0.0765559...],
[ 644. , 0.0729367...],
[ 645. , 0.0694345...],
[ 646. , 0.0660491...],
[ 647. , 0.0627792...],
[ 648. , 0.0596278...],
[ 649. , 0.0565970...],
[ 650. , 0.0536896...],
[ 651. , 0.0509068...],
[ 652. , 0.0482444...],
[ 653. , 0.0456951...],
[ 654. , 0.0432510...],
[ 655. , 0.0409052...],
[ 656. , 0.0386537...],
[ 657. , 0.0364955...],
[ 658. , 0.0344285...],
[ 659. , 0.0324501...],
[ 660. , 0.0305579...],
[ 661. , 0.0287496...],
[ 662. , 0.0270233...],
[ 663. , 0.0253776...],
[ 664. , 0.0238113...],
[ 665. , 0.0223226...],
[ 666. , 0.0209086...],
[ 667. , 0.0195688...],
[ 668. , 0.0183056...],
[ 669. , 0.0171216...],
[ 670. , 0.0160192...],
[ 671. , 0.0149986...],
[ 672. , 0.0140537...],
[ 673. , 0.0131784...],
[ 674. , 0.0123662...],
[ 675. , 0.0116107...],
[ 676. , 0.0109098...],
[ 677. , 0.0102587...],
[ 678. , 0.0096476...],
[ 679. , 0.0090665...],
[ 680. , 0.0085053...],
[ 681. , 0.0079567...],
[ 682. , 0.0074229...],
[ 683. , 0.0069094...],
[ 684. , 0.0064213...],
[ 685. , 0.0059637...],
[ 686. , 0.0055377...],
[ 687. , 0.0051402...],
[ 688. , 0.00477 ...],
[ 689. , 0.0044263...],
[ 690. , 0.0041081...],
[ 691. , 0.0038149...],
[ 692. , 0.0035456...],
[ 693. , 0.0032984...],
[ 694. , 0.0030718...],
[ 695. , 0.0028639...],
[ 696. , 0.0026738...],
[ 697. , 0.0025000...],
[ 698. , 0.0023401...],
[ 699. , 0.0021918...],
[ 700. , 0.0020526...],
[ 701. , 0.0019207...],
[ 702. , 0.001796 ...],
[ 703. , 0.0016784...],
[ 704. , 0.0015683...],
[ 705. , 0.0014657...],
[ 706. , 0.0013702...],
[ 707. , 0.001281 ...],
[ 708. , 0.0011976...],
[ 709. , 0.0011195...],
[ 710. , 0.0010464...],
[ 711. , 0.0009776...],
[ 712. , 0.0009131...],
[ 713. , 0.0008525...],
[ 714. , 0.0007958...],
[ 715. , 0.0007427...],
[ 716. , 0.0006929...],
[ 717. , 0.0006462...],
[ 718. , 0.0006026...],
[ 719. , 0.0005619...],
[ 720. , 0.0005240...],
[ 721. , 0.0004888...],
[ 722. , 0.0004561...],
[ 723. , 0.0004255...],
[ 724. , 0.0003971...],
[ 725. , 0.0003704...],
[ 726. , 0.0003455...],
[ 727. , 0.0003221...],
[ 728. , 0.0003001...],
[ 729. , 0.0002796...],
[ 730. , 0.0002604...],
[ 731. , 0.0002423...],
[ 732. , 0.0002254...],
[ 733. , 0.0002095...],
[ 734. , 0.0001947...],
[ 735. , 0.0001809...],
[ 736. , 0.0001680...],
[ 737. , 0.0001560...],
[ 738. , 0.0001449...],
[ 739. , 0.0001345...],
[ 740. , 0.0001249...],
[ 741. , 0.0001159...],
[ 742. , 0.0001076...],
[ 743. , 0.0000999...],
[ 744. , 0.0000927...],
[ 745. , 0.0000862...],
[ 746. , 0.0000801...],
[ 747. , 0.0000745...],
[ 748. , 0.0000693...],
[ 749. , 0.0000646...],
[ 750. , 0.0000602...],
[ 751. , 0.0000561...],
[ 752. , 0.0000523...],
[ 753. , 0.0000488...],
[ 754. , 0.0000456...],
[ 755. , 0.0000425...],
[ 756. , 0.0000397...],
[ 757. , 0.0000370...],
[ 758. , 0.0000346...],
[ 759. , 0.0000322...],
[ 760. , 0.0000301...],
[ 761. , 0.0000281...],
[ 762. , 0.0000262...],
[ 763. , 0.0000244...],
[ 764. , 0.0000228...],
[ 765. , 0.0000213...],
[ 766. , 0.0000198...],
[ 767. , 0.0000185...],
[ 768. , 0.0000173...],
[ 769. , 0.0000161...],
[ 770. , 0.0000150...],
[ 771. , 0.0000140...],
[ 772. , 0.0000131...],
[ 773. , 0.0000122...],
[ 774. , 0.0000114...],
[ 775. , 0.0000106...],
[ 776. , 0.0000099...],
[ 777. , 0.0000092...],
[ 778. , 0.0000086...],
[ 779. , 0.0000080...],
[ 780. , 0.0000075...]],
interpolator=SpragueInterpolator,
interpolator_kwargs={},
extrapolator=Extrapolator,
extrapolator_kwargs={...})
"""
photopic_lef = cast(
SpectralDistribution,
optional(
photopic_lef,
SDS_LEFS_PHOTOPIC["CIE 1924 Photopic Standard Observer"],
),
)
scotopic_lef = cast(
SpectralDistribution,
optional(
scotopic_lef,
SDS_LEFS_SCOTOPIC["CIE 1951 Scotopic Standard Observer"],
),
)
shape = SpectralShape(
max([photopic_lef.shape.start, scotopic_lef.shape.start]),
min([photopic_lef.shape.end, scotopic_lef.shape.end]),
max([photopic_lef.shape.interval, scotopic_lef.shape.interval]),
)
wavelengths = shape.range()
sd = SpectralDistribution(
mesopic_weighting_function(
wavelengths, L_p, source, method, photopic_lef, scotopic_lef
),
wavelengths,
name=f"{L_p} Lp Mesopic Luminous Efficiency Function",
)
return sd.normalise()
| colour-science/colour | colour/colorimetry/lefs.py | Python | bsd-3-clause | 30,125 |
import qt
class CollapsibleMultilineText(qt.QTextEdit):
"""Text field that expands when it gets the focus and remain collapsed otherwise"""
def __init__(self):
super(CollapsibleMultilineText, self).__init__()
self.minHeight = 20
self.maxHeight = 50
self.setFixedHeight(self.minHeight)
def focusInEvent(self, event):
# super(MyLineEdit, self).focusInEvent(event)
self.setFixedHeight(self.maxHeight)
def focusOutEvent(self, event):
# super(MyLineEdit, self).focusOutEvent(event)
self.setFixedHeight(self.minHeight) | acil-bwh/SlicerCIP | Scripted/CIP_/CIP/ui/CollapsibleMultilineText.py | Python | bsd-3-clause | 595 |
import hashlib
import tempfile
import unittest
import shutil
import os
import sys
from testfixtures import LogCapture
from scrapy.dupefilters import RFPDupeFilter
from scrapy.http import Request
from scrapy.core.scheduler import Scheduler
from scrapy.utils.python import to_bytes
from scrapy.utils.job import job_dir
from scrapy.utils.test import get_crawler
from tests.spiders import SimpleSpider
class FromCrawlerRFPDupeFilter(RFPDupeFilter):
@classmethod
def from_crawler(cls, crawler):
debug = crawler.settings.getbool('DUPEFILTER_DEBUG')
df = cls(job_dir(crawler.settings), debug)
df.method = 'from_crawler'
return df
class FromSettingsRFPDupeFilter(RFPDupeFilter):
@classmethod
def from_settings(cls, settings):
debug = settings.getbool('DUPEFILTER_DEBUG')
df = cls(job_dir(settings), debug)
df.method = 'from_settings'
return df
class DirectDupeFilter(object):
method = 'n/a'
class RFPDupeFilterTest(unittest.TestCase):
def test_df_from_crawler_scheduler(self):
settings = {'DUPEFILTER_DEBUG': True,
'DUPEFILTER_CLASS': __name__ + '.FromCrawlerRFPDupeFilter'}
crawler = get_crawler(settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
self.assertTrue(scheduler.df.debug)
self.assertEqual(scheduler.df.method, 'from_crawler')
def test_df_from_settings_scheduler(self):
settings = {'DUPEFILTER_DEBUG': True,
'DUPEFILTER_CLASS': __name__ + '.FromSettingsRFPDupeFilter'}
crawler = get_crawler(settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
self.assertTrue(scheduler.df.debug)
self.assertEqual(scheduler.df.method, 'from_settings')
def test_df_direct_scheduler(self):
settings = {'DUPEFILTER_CLASS': __name__ + '.DirectDupeFilter'}
crawler = get_crawler(settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
self.assertEqual(scheduler.df.method, 'n/a')
def test_filter(self):
dupefilter = RFPDupeFilter()
dupefilter.open()
r1 = Request('http://scrapytest.org/1')
r2 = Request('http://scrapytest.org/2')
r3 = Request('http://scrapytest.org/2')
assert not dupefilter.request_seen(r1)
assert dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
assert dupefilter.request_seen(r3)
dupefilter.close('finished')
def test_dupefilter_path(self):
r1 = Request('http://scrapytest.org/1')
r2 = Request('http://scrapytest.org/2')
path = tempfile.mkdtemp()
try:
df = RFPDupeFilter(path)
try:
df.open()
assert not df.request_seen(r1)
assert df.request_seen(r1)
finally:
df.close('finished')
df2 = RFPDupeFilter(path)
try:
df2.open()
assert df2.request_seen(r1)
assert not df2.request_seen(r2)
assert df2.request_seen(r2)
finally:
df2.close('finished')
finally:
shutil.rmtree(path)
def test_request_fingerprint(self):
"""Test if customization of request_fingerprint method will change
output of request_seen.
"""
r1 = Request('http://scrapytest.org/index.html')
r2 = Request('http://scrapytest.org/INDEX.html')
dupefilter = RFPDupeFilter()
dupefilter.open()
assert not dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
dupefilter.close('finished')
class CaseInsensitiveRFPDupeFilter(RFPDupeFilter):
def request_fingerprint(self, request):
fp = hashlib.sha1()
fp.update(to_bytes(request.url.lower()))
return fp.hexdigest()
case_insensitive_dupefilter = CaseInsensitiveRFPDupeFilter()
case_insensitive_dupefilter.open()
assert not case_insensitive_dupefilter.request_seen(r1)
assert case_insensitive_dupefilter.request_seen(r2)
case_insensitive_dupefilter.close('finished')
def test_seenreq_newlines(self):
""" Checks against adding duplicate \r to
line endings on Windows platforms. """
r1 = Request('http://scrapytest.org/1')
path = tempfile.mkdtemp()
try:
df = RFPDupeFilter(path)
df.open()
df.request_seen(r1)
df.close('finished')
with open(os.path.join(path, 'requests.seen'), 'rb') as seen_file:
line = next(seen_file).decode()
assert not line.endswith('\r\r\n')
if sys.platform == 'win32':
assert line.endswith('\r\n')
else:
assert line.endswith('\n')
finally:
shutil.rmtree(path)
def test_log(self):
with LogCapture() as l:
settings = {'DUPEFILTER_DEBUG': False,
'DUPEFILTER_CLASS': __name__ + '.FromCrawlerRFPDupeFilter'}
crawler = get_crawler(SimpleSpider, settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
spider = SimpleSpider.from_crawler(crawler)
dupefilter = scheduler.df
dupefilter.open()
r1 = Request('http://scrapytest.org/index.html')
r2 = Request('http://scrapytest.org/index.html')
dupefilter.log(r1, spider)
dupefilter.log(r2, spider)
assert crawler.stats.get_value('dupefilter/filtered') == 2
l.check_present(('scrapy.dupefilters', 'DEBUG',
('Filtered duplicate request: <GET http://scrapytest.org/index.html>'
' - no more duplicates will be shown'
' (see DUPEFILTER_DEBUG to show all duplicates)')))
dupefilter.close('finished')
def test_log_debug(self):
with LogCapture() as l:
settings = {'DUPEFILTER_DEBUG': True,
'DUPEFILTER_CLASS': __name__ + '.FromCrawlerRFPDupeFilter'}
crawler = get_crawler(SimpleSpider, settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
spider = SimpleSpider.from_crawler(crawler)
dupefilter = scheduler.df
dupefilter.open()
r1 = Request('http://scrapytest.org/index.html')
r2 = Request('http://scrapytest.org/index.html',
headers={'Referer': 'http://scrapytest.org/INDEX.html'}
)
dupefilter.log(r1, spider)
dupefilter.log(r2, spider)
assert crawler.stats.get_value('dupefilter/filtered') == 2
l.check_present(('scrapy.dupefilters', 'DEBUG',
('Filtered duplicate request: <GET http://scrapytest.org/index.html>'
' (referer: None)')))
l.check_present(('scrapy.dupefilters', 'DEBUG',
('Filtered duplicate request: <GET http://scrapytest.org/index.html>'
' (referer: http://scrapytest.org/INDEX.html)')))
dupefilter.close('finished')
| eLRuLL/scrapy | tests/test_dupefilters.py | Python | bsd-3-clause | 7,297 |
# -*- coding: utf-8 -*-
"""Provides vertical object."""
from __future__ import absolute_import
from ..entity import Entity
class Vertical(Entity):
"""docstring for Vertical."""
collection = 'verticals'
resource = 'vertical'
_relations = {
'advertiser',
}
_pull = {
'id': int,
'name': None,
'created_on': Entity._strpt,
'updated_on': Entity._strpt,
'version': int,
}
_push = _pull
def __init__(self, session, properties=None, **kwargs):
super(Vertical, self).__init__(session, properties, **kwargs)
| Cawb07/t1-python | terminalone/models/vertical.py | Python | bsd-3-clause | 596 |
import pytest
import bauble.db as db
from bauble.model.family import Family
from bauble.model.genus import Genus, GenusSynonym, GenusNote
import test.api as api
@pytest.fixture
def setup(organization, session):
setup.organization = session.merge(organization)
setup.user = setup.organization.owners[0]
setup.session = session
db.set_session_schema(session, setup.organization.pg_schema)
return setup
def test_genus_json(setup):
session = setup.session
family = Family(family=api.get_random_name())
genus_name = api.get_random_name()
genus = Genus(family=family, genus=genus_name)
note = GenusNote(genus=genus, note="this is a test")
syn = GenusSynonym(genus=genus, synonym=genus)
session.add_all([family, genus, note, syn])
session.commit()
genus_json = genus.json()
assert 'id' in genus_json
assert genus_json['id'] == genus.id
assert 'genus' in genus_json
assert 'str' in genus_json
assert 'qualifier' in genus_json
note_json = note.json()
assert 'id' in note_json
assert 'genus_id' in note_json
assert note_json['genus_id'] == genus.id
syn_json = syn.json()
assert 'id' in syn_json
assert syn_json['genus_id'] == genus.id
assert syn_json['synonym_id'] == genus.id
session.delete(genus)
session.commit()
session.close()
def test_server(setup):
"""
Test the server properly handle /genus resources
"""
user = setup.user
family = api.create_resource('/family', {'family': api.get_random_name()}, user)
# create a genus
first_genus = api.create_resource('/genus', {'genus': api.get_random_name(), 'family': family},
user)
# create another genus and use the first as a synonym
data = {'genus': api.get_random_name(),
'family': family,
'notes': [{'user': 'me', 'category': 'test', 'date': '2001-1-1',
'note': 'test note'},
{'user': 'me', 'category': 'test', 'date': '2002-2-2',
'note': 'test note2'}],
'synonyms': [first_genus]
#'synonyms': [{'synonym': first_genus}]
}
second_genus = api.create_resource('/genus', data, user)
assert 'id' in second_genus # created
# update the genus
second_genus['genus'] = api.get_random_name()
second_id = second_genus['id']
second_genus = api.update_resource('/genus/' + str(second_id), second_genus, user=user)
assert second_genus['id'] == second_id # make sure they have the same id after the update
# get the genus
first_genus = api.get_resource('/genus/' + str(first_genus['id']), user=user)
# query for genera and make sure the second genus is in the results
genera = api.query_resource('/genus', q=second_genus['genus'], user=user)
# TODO: ** shouldn't len(genera) be 1 since the name should be unique
#assert second_genus['ref'] in [genus['ref'] for genus in genera]
assert second_genus['id'] in [genus['id'] for genus in genera]
# test getting the genus relative to its family
# ** TODO: now we just embed the relation in the /genera/:id
# ** request....need to create a test to make sure it's happening
# genera = api.get_resource('/family/' + str(family['id']) + "/genera", user=user)
# assert first_genus['id'] in [genus['id'] for genus in genera]
# test getting a family with its genera relations
# ** TODO: now we just embed the relation in the /genera/:id
# ** request....need to create a test to make sure it's happening
#response_json = api.query_resource('/family', q=family['family'], relations="genera,notes", user=user)
#families = response_json
# TODO: *** i don't know if we still support returning relations like this...do
# we need to
# print(families[0]['genera'])
# assert first_genus['ref'] in [genus['ref'] for genus in families[0]['genera']]
# count the number of genera on a family
# TODO: ** count is temporarily disabled
# count = api.count_resource(family['ref'] + "/genera")
# assert count == "2"
# delete the created resources
api.delete_resource('/genus/' + str(first_genus['id']), user)
api.delete_resource('/genus/' + str(second_genus['id']), user)
api.delete_resource('/family/' + str(family['id']), user)
| Bauble/bauble.api | test/spec/test_genus.py | Python | bsd-3-clause | 4,372 |
from rdkit import Chem
from rdkit import rdBase
from rdkit.Chem import rdMolDescriptors as rdMD
from rdkit.Chem import AllChem
from rdkit.Chem.EState import EStateIndices
from rdkit.Chem.EState import AtomTypes
import time
print rdBase.rdkitVersion
print rdBase.boostVersion
def getEState(mol):
return EStateIndices(mol)
def localopt(mol, steps = 500):
if mol.GetNumConformers() == 0:
mol=make3D(mol)
AllChem.MMFFOptimizeMolecule(mol, maxIters = steps)
return mol
def make3D(mol, steps = 50):
mol = Chem.AddHs(mol)
success = AllChem.EmbedMolecule(mol)
if success == -1: # Failed
success = AllChem.EmbedMolecule(mol, useRandomCoords = True)
if success == -1:
raise Error, "Embedding failed!"
mol = localopt(mol, steps)
return mol
def get3D(m,is3d):
if not is3d:
m = Chem.AddHs(m)
AllChem.EmbedMolecule(m)
AllChem.MMFFOptimizeMolecule(m)
r= rdMD.CalcAUTOCORR3D(m)+rdMD.CalcRDF(m)+rdMD.CalcMORSE(m)+rdMD.CalcWHIM(m)+rdMD.CalcGETAWAY(m)
return r
def generateALL():
m = Chem.MolFromSmiles('Cc1ccccc1')
thefile = open('testAC.txt', 'w')
filename="/Users/mbp/Github/rdkit_mine/Code/GraphMol/Descriptors/test_data/PBF_egfr.sdf"
suppl = Chem.SDMolSupplier(filename,removeHs=False)
mols = [x for x in suppl]
start = time.time()
for m in mols:
r= get3D(m,True)
for item in r:
thefile.write("%.3f," % item)
thefile.write("\n")
end = time.time()
print end - start
thefile = open('testSMWHIM.txt', 'w')
writer = Chem.SDWriter('3Dsmallmol.sdf')
A=['[H][H]','B','O=O','C','CC','CCC','CCCC','CCCCC','CCCCCC','CO','CCO','CCCO','CCCCO','CCCCCO','CCCCCCO','CCl','CCCl','CCCCl','CCCCCl','CCCCCCl','CCCCCCCl','CBr','CCBr','CCCBr','CCCCBr','CCCCCBr','CCCCCCBr','CI','CCI','CCCI','CCCCI','CCCCCI','CCCCCCI','CF','CCF','CCCF','CCCCF','CCCCCF','CCCCCCF','CS','CCS','CCCS','CCCCS','CCCCCS','CCCCCCS','CN','CCN','CCCN','CCCCN','CCCCCN','CCCCCCN']
for smi in A:
m = Chem.MolFromSmiles(smi)
m=localopt(m,100)
#r=get3D(m,True)
print smi
print "---------"
r=rdMD.CalcWHIM(m)
print "Ei:"+str(r[0])+ "," + str(r[1]) + "," + str(r[2])+ "\n"
print "Gi:"+str(r[5])+ "," + str(r[6]) + "," + str(r[7])+ "\n"
print "SI:"+str(rdMD.CalcSpherocityIndex(m))
print "AS:"+str(rdMD.CalcAsphericity(m))
print "EX:"+str(rdMD.CalcEccentricity(m))
for item in r:
thefile.write("%.3f," % item)
thefile.write("\n")
#m.SetProp("smi", smi)
#writer.write(m)
thefile = open('testBPA.txt', 'w')
writer = Chem.SDWriter('3DBPAmol.sdf')
B=['CN(C)CC(Br)c1ccccc1','CN(C)CC(Br)c1ccc(F)cc1','CN(C)CC(Br)c1ccc(Cl)cc1','CN(C)CC(Br)c1ccc(Cl)cc1','CN(C)CC(Br)c1ccc(I)cc1','CN(C)CC(Br)c1ccc(C)cc1','CN(C)CC(Br)c1cccc(F)c1','CN(C)CC(Br)c1cccc(Cl)c1','CN(C)CC(Br)c1cccc(Br)c1','CN(C)CC(Br)c1cccc(I)c1','CN(C)CC(Br)c1cccc(C)c1','CN(C)CC(Br)c1ccc(F)c(Cl)c1','CN(C)CC(Br)c1ccc(F)c(Br)c1','CN(C)CC(Br)c1ccc(F)c(C)c1','CN(C)CC(Br)c1ccc(Cl)c(Cl)c1','CN(C)CC(Br)c1ccc(Cl)c(Br)c1','CN(C)CC(Br)c1ccc(Cl)c(C)c1','CN(C)CC(Br)c1ccc(Br)c(Cl)c1','CN(C)CC(Br)c1ccc(Br)c(Br)c1','CN(C)CC(Br)c1ccc(Br)c(C)c1','CN(C)CC(Br)c1ccc(C)c(C)c1','CN(C)CC(Br)c1ccc(C)c(Br)c1']
for smi in B:
m = Chem.MolFromSmiles(smi)
m=localopt(m,100)
#r=get3D(m,True)
r=rdMD.CalcWHIM(m)
for item in r:
thefile.write("%.3f," % item)
thefile.write("\n")
#m.SetProp("smi", smi)
#writer.write(m)
A="G1w,G2w,G3w,Gw"
print dir(rdMD)
| rdkit/rdkit | Code/GraphMol/Descriptors/test3D_old.py | Python | bsd-3-clause | 3,537 |
"""
WSGI config for invoices project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| sztosz/invoices | config/wsgi.py | Python | bsd-3-clause | 1,618 |
"""
Sampling along tracks
---------------------
The :func:`pygmt.grdtrack` function samples a raster grid's value along specified
points. We will need to input a 2D raster to ``grid`` which can be an
:class:`xarray.DataArray`. The argument passed to the ``points`` parameter can be a
:class:`pandas.DataFrame` table where the first two columns are x and y (or longitude
and latitude). Note also that there is a ``newcolname`` parameter that will be used to
name the new column of values sampled from the grid.
Alternatively, a NetCDF file path can be passed to ``grid``. An ASCII file path can
also be accepted for ``points``. To save an output ASCII file, a file name argument
needs to be passed to the ``outfile`` parameter.
"""
import pygmt
# Load sample grid and point datasets
grid = pygmt.datasets.load_earth_relief()
points = pygmt.datasets.load_ocean_ridge_points()
# Sample the bathymetry along the world's ocean ridges at specified track points
track = pygmt.grdtrack(points=points, grid=grid, newcolname="bathymetry")
fig = pygmt.Figure()
# Plot the earth relief grid on Cylindrical Stereographic projection, masking land areas
fig.basemap(region="g", projection="Cyl_stere/150/-20/15c", frame=True)
fig.grdimage(grid=grid, cmap="gray")
fig.coast(land="#666666")
# Plot the sampled bathymetry points using circles (c) of 0.15 cm size
# Points are colored using elevation values (normalized for visual purposes)
fig.plot(
x=track.longitude,
y=track.latitude,
style="c0.15c",
cmap="terra",
color=(track.bathymetry - track.bathymetry.mean()) / track.bathymetry.std(),
)
fig.show()
| GenericMappingTools/gmt-python | examples/gallery/images/track_sampling.py | Python | bsd-3-clause | 1,614 |
"""
A sub-package for efficiently dealing with polynomials.
Within the documentation for this sub-package, a "finite power series,"
i.e., a polynomial (also referred to simply as a "series") is represented
by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
order term to highest. For example, array([1,2,3]) represents
``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
applicable to the specific module in question, e.g., `polynomial` (which
"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
all operations on polynomials, including evaluation at an argument, are
implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
"""
from polynomial import *
from chebyshev import *
from polyutils import *
from numpy.testing import Tester
test = Tester(__file__).test
bench = Tester(__file__).bench
| teoliphant/numpy-refactor | numpy/polynomial/__init__.py | Python | bsd-3-clause | 951 |
from django.core.management.base import BaseCommand
from dojo.models import System_Settings
class Command(BaseCommand):
help = 'Updates product grade calculation'
def handle(self, *args, **options):
code = """def grade_product(crit, high, med, low):
health=100
if crit > 0:
health = 40
health = health - ((crit - 1) * 5)
if high > 0:
if health == 100:
health = 60
health = health - ((high - 1) * 3)
if med > 0:
if health == 100:
health = 80
health = health - ((med - 1) * 2)
if low > 0:
if health == 100:
health = 95
health = health - low
if health < 5:
health = 5
return health
"""
system_settings = System_Settings.objects.get(id=1)
system_settings.product_grade = code
system_settings.save()
| rackerlabs/django-DefectDojo | dojo/management/commands/system_settings.py | Python | bsd-3-clause | 1,040 |
"""
Author: Dr. John T. Hwang <[email protected]>
This package is distributed under New BSD license.
Full-factorial sampling.
"""
import numpy as np
from smt.sampling_methods.sampling_method import SamplingMethod
class FullFactorial(SamplingMethod):
def _initialize(self):
self.options.declare(
"weights",
values=None,
types=(list, np.ndarray),
desc="relative sampling weights for each nx dimensions",
)
self.options.declare(
"clip",
default=False,
types=bool,
desc="round number of samples to the sampling number product of each nx dimensions (> asked nt)",
)
def _compute(self, nt):
"""
Compute the requested number of sampling points.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the input space.
"""
xlimits = self.options["xlimits"]
nx = xlimits.shape[0]
if self.options["weights"] is None:
weights = np.ones(nx) / nx
else:
weights = np.atleast_1d(self.options["weights"])
weights /= np.sum(weights)
num_list = np.ones(nx, int)
while np.prod(num_list) < nt:
ind = np.argmax(weights - num_list / np.sum(num_list))
num_list[ind] += 1
lins_list = [np.linspace(0.0, 1.0, num_list[kx]) for kx in range(nx)]
x_list = np.meshgrid(*lins_list, indexing="ij")
if self.options["clip"]:
nt = np.prod(num_list)
x = np.zeros((nt, nx))
for kx in range(nx):
x[:, kx] = x_list[kx].reshape(np.prod(num_list))[:nt]
return x
| bouhlelma/smt | smt/sampling_methods/full_factorial.py | Python | bsd-3-clause | 1,806 |
"""
Dealing with SFT tests.
"""
import logging
import sft_meta
import sft_schema as schema
from sft.utils.helpers import strip_args
class SFTPool():
""" This class defines all site functional tests (SFT)s
that shall be executed.
"""
def __init__(self):
self.log = logging.getLogger(__name__)
self.session=sft_meta.Session()
self.log.debug("Initialization finished")
def __del__(self):
self.session.close()
@strip_args
def add_sft(self, name, cluster_grp, vo_grp,test_suit):
""" Adding a new SFT to the 'global' pool of SFTS.
params: name - the name of the SFT, must be unique
cluster_grp - the name of the cluster group (see ClusterGroupPool)
to which SFTs shall apply
vo_grp - the name of the VO group (see VOGroupPool), to
which SFT shall apply
test_suit - the suit of tests the SFT consists of
Notice: the execution time, is must be set via the set_exectime method
Notice: XXX checks whether cluster_grp, vo_grp and test_suit exist are currently missing
"""
sft = self.session.query(schema.SFTTest).filter_by(name=name).first()
if sft:
self.log.info("SFT test '%s' exists already, overwriting" % name)
else:
self.log.debug("Adding SFT '%s'." % name)
sft = schema.SFTTest()
sft.name = name
sft.cluster_group = cluster_grp
sft.vo_group = vo_grp
sft.test_suit = test_suit
self.session.add(sft)
self.session.commit()
@strip_args
def set_exectime(self, name, minute='0', hour='*',
day='*', month='*', weekday='*'):
""" Setting execution time of the SFT.
params: name - name of the SFT
minute - minute 0-59, default 0
hour - hour 0-23, default *
day - day 1-31, default *
month - month 1-12, default *
weekday - day of week 0-6, Sunday=0, default *
Notice: for each param, you can use crontab notation, e.g. '*', '1-3', '*/5', etc.
"""
sft = self.session.query(schema.SFTTest).filter_by(name=name).first()
if sft:
sft.minute = minute
sft.hour = hour
sft.day = day
sft.month= month
sft.weekday = weekday
self.session.commit()
@strip_args
def remove_sft(self, name):
""" removing SFT from SFT pool.
params: name - name of SFT to remove
"""
sft = self.session.query(schema.SFTTest).filter_by(name=name).first()
if sft:
self.log.info("Removing sft '%s'." % name)
self.session.delete(sft)
self.session.commit()
def list_sfts(self):
""" Listing of all existing SFTs in pool.
returns list of SFT objects
"""
return self.session.query(schema.SFTTest).all()
| placiflury/gridmonitor-sft | sft/db/sft_handler.py | Python | bsd-3-clause | 3,142 |
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from ..forms import AnonymousUserShippingForm, ShippingAddressesForm
from ...userprofile.forms import get_address_form
from ...userprofile.models import Address
from ...teamstore.utils import get_team
def anonymous_user_shipping_address_view(request, checkout):
team = get_team(request.session['team'])
if team.group_shipping:
address_form, preview = get_address_form(
request.POST or None, country_code=request.country.code,
autocomplete_type='shipping',
initial={'country': request.country.code},
instance=team.shipping_address)
else:
address_form, preview = get_address_form(
request.POST or None, country_code=request.country.code,
autocomplete_type='shipping',
initial={'country': request.country.code},
instance=checkout.shipping_address)
user_form = AnonymousUserShippingForm(
not preview and request.POST or None, initial={'email': checkout.email}
if not preview else request.POST.dict())
if team.group_shipping and user_form.is_valid():
checkout.shipping_address = team.shipping_address
checkout.email = user_form.cleaned_data['email']
return redirect('checkout:shipping-method')
elif all([user_form.is_valid(), address_form.is_valid()]):
checkout.shipping_address = address_form.instance
checkout.email = user_form.cleaned_data['email']
return redirect('checkout:shipping-method')
return TemplateResponse(
request, 'checkout/shipping_address.html', context={
'address_form': address_form, 'user_form': user_form,
'group_shipping': team.group_shipping, 'checkout': checkout})
def user_shipping_address_view(request, checkout):
data = request.POST or None
additional_addresses = request.user.addresses.all()
checkout.email = request.user.email
shipping_address = checkout.shipping_address
if shipping_address is not None and shipping_address.id:
address_form, preview = get_address_form(
data, country_code=request.country.code,
initial={'country': request.country})
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses,
initial={'address': shipping_address.id})
elif shipping_address:
address_form, preview = get_address_form(
data, country_code=shipping_address.country.code,
instance=shipping_address)
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses)
else:
address_form, preview = get_address_form(
data, initial={'country': request.country},
country_code=request.country.code)
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses)
if addresses_form.is_valid() and not preview:
if addresses_form.cleaned_data['address'] != ShippingAddressesForm.NEW_ADDRESS:
address_id = addresses_form.cleaned_data['address']
checkout.shipping_address = Address.objects.get(id=address_id)
return redirect('checkout:shipping-method')
elif address_form.is_valid():
checkout.shipping_address = address_form.instance
return redirect('checkout:shipping-method')
return TemplateResponse(
request, 'checkout/shipping_address.html', context={
'address_form': address_form, 'user_form': addresses_form,
'checkout': checkout, 'additional_addresses': additional_addresses})
| jonathanmeier5/teamstore | saleor/checkout/views/shipping.py | Python | bsd-3-clause | 3,737 |
from httpx import AsyncClient
# Runtime import to avoid syntax errors in samples on Python < 3.5 and reach top-dir
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
_SAMPLES_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../samples/',
)),
)
import sys
sys.path.append(_TOP_DIR)
sys.path.append(_SAMPLES_DIR)
from asyncutils import AsyncTestCase
from wiringfastapi import web
class WiringFastAPITest(AsyncTestCase):
client: AsyncClient
def setUp(self) -> None:
super().setUp()
self.client = AsyncClient(app=web.app, base_url='http://test')
def tearDown(self) -> None:
self._run(self.client.aclose())
super().tearDown()
def test_depends_marker_injection(self):
class ServiceMock:
async def process(self):
return 'Foo'
with web.container.service.override(ServiceMock()):
response = self._run(self.client.get('/'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'result': 'Foo'})
def test_depends_injection(self):
response = self._run(self.client.get('/auth', auth=('john_smith', 'secret')))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'username': 'john_smith', 'password': 'secret'})
| rmk135/objects | tests/unit/wiring/test_wiringfastapi_py36.py | Python | bsd-3-clause | 1,426 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
# Sentinel to avoid the situation where `None` *is* the default value.
NoDefault = collections.namedtuple('NoDefault', [])()
class ValueNotFoundException(Exception):
"""Raised when a value cannot be found. Used for control-flow only."""
class Handler:
def __init__(self, name, prefix='', default=NoDefault, description=None):
# e.g. my_option_name
self.name = name
# e.g. p_my_option_name
self.click_name = prefix + name
self.default = default
self.description = description
self.missing = []
@property
def cli_name(self):
import q2cli.util
# e.g. p-my-option-name
return q2cli.util.to_cli_name(self.click_name)
def get_click_options(self):
"""Should yield 1 or more click.Options"""
raise NotImplementedError()
def get_value(self, arguments, fallback=None):
"""Should find 1 or more arguments and convert to a single API value"""
raise NotImplementedError()
def _locate_value(self, arguments, fallback, multiple=False):
"""Default lookup procedure to find a click.Option provided by user"""
# TODO revisit this interaction between _locate_value, single vs.
# multiple options, and fallbacks. Perhaps handlers should always
# use tuples to store values, even for single options, in order to
# normalize single-vs-multiple option handling. Probably not worth
# revisiting until there are more unit + integration tests of q2cli
# since there's the potential to break things.
# Is it in args?
v = arguments[self.click_name]
missing_value = () if multiple else None
if v != missing_value:
return v
# Does our fallback know about it?
if fallback is not None:
try:
fallback_value = fallback(self.name, self.cli_name)
except ValueNotFoundException:
pass
else:
# TODO fallbacks don't know whether they're handling a single
# vs. multiple option, so the current expectation is that
# fallbacks will always return a single value. Revisit this
# expectation in the future; perhaps fallbacks should be aware
# of single-vs-multiple options, or perhaps they could always
# return a tuple.
if multiple:
fallback_value = (fallback_value,)
return fallback_value
# Do we have a default?
if self.default is not NoDefault:
return self.default
# Give up
self.missing.append(self.cli_name)
raise ValueNotFoundException()
def _parse_boolean(self, string):
"""Parse string representing a boolean into Python bool type.
Supported values match `configparser.ConfigParser.getboolean`.
"""
trues = ['1', 'yes', 'true', 'on']
falses = ['0', 'no', 'false', 'off']
string_lower = string.lower()
if string_lower in trues:
return True
elif string_lower in falses:
return False
else:
import itertools
import click
msg = (
"Error: unrecognized value for --%s flag: %s\n"
"Supported values (case-insensitive): %s" %
(self.cli_name, string,
', '.join(itertools.chain(trues, falses)))
)
click.secho(msg, err=True, fg='red', bold=True)
ctx = click.get_current_context()
ctx.exit(1)
def _add_description(self, option, requirement):
def pretty_cat(a, b, space=1):
if a:
return a + (' ' * space) + b
return b
if self.description:
option.help = pretty_cat(option.help, self.description)
option.help = pretty_cat(option.help, requirement, space=2)
return option
class VerboseHandler(Handler):
"""Handler for verbose output (--verbose flag)."""
def __init__(self):
super().__init__('verbose', default=False)
def get_click_options(self):
import q2cli
# `is_flag` will set the default to `False`, but `self._locate_value`
# needs to distinguish between the presence or absence of the flag
# provided by the user.
yield q2cli.Option(
['--' + self.cli_name], is_flag=True, default=None,
help='Display verbose output to stdout and/or stderr during '
'execution of this action. [default: %s]' % self.default)
def get_value(self, arguments, fallback=None):
value = self._locate_value(arguments, fallback)
# Value may have been specified in --cmd-config (or another source in
# the future). If we don't have a bool type yet, attempt to interpret a
# string representing a boolean.
if type(value) is not bool:
value = self._parse_boolean(value)
return value
class QuietHandler(Handler):
"""Handler for quiet output (--quiet flag)."""
def __init__(self):
super().__init__('quiet', default=False)
def get_click_options(self):
import q2cli
# `is_flag` will set the default to `False`, but `self._locate_value`
# needs to distinguish between the presence or absence of the flag
# provided by the user.
yield q2cli.Option(
['--' + self.cli_name], is_flag=True, default=None,
help='Silence output if execution is successful '
'(silence is golden). [default: %s]' % self.default)
def get_value(self, arguments, fallback=None):
value = self._locate_value(arguments, fallback)
# Value may have been specified in --cmd-config (or another source in
# the future). If we don't have a bool type yet, attempt to interpret a
# string representing a boolean.
if type(value) is not bool:
value = self._parse_boolean(value)
return value
class OutputDirHandler(Handler):
"""Meta handler which returns a fallback function as its value."""
def __init__(self):
super().__init__('output_dir')
def get_click_options(self):
import click
import q2cli
yield q2cli.Option(
['--' + self.cli_name],
type=click.Path(exists=False, dir_okay=True, file_okay=False,
writable=True),
help='Output unspecified results to a directory')
def get_value(self, arguments, fallback=None):
import os
import os.path
import click
try:
path = self._locate_value(arguments, fallback=fallback)
# TODO: do we want a --force like flag?
if os.path.exists(path):
click.secho("Error: --%s directory already exists, won't "
"overwrite." % self.cli_name, err=True, fg='red',
bold=True)
ctx = click.get_current_context()
ctx.exit(1)
os.makedirs(path)
def fallback_(name, cli_name):
return os.path.join(path, name)
return fallback_
except ValueNotFoundException:
# Always fail to find a value as this handler doesn't exist.
def fail(*_):
raise ValueNotFoundException()
return fail
class CommandConfigHandler(Handler):
"""Meta handler which returns a fallback function as its value."""
def __init__(self, cli_plugin, cli_action):
self.cli_plugin = cli_plugin
self.cli_action = cli_action
super().__init__('cmd_config')
def get_click_options(self):
import click
import q2cli
yield q2cli.Option(
['--' + self.cli_name],
type=click.Path(exists=True, dir_okay=False, file_okay=True,
readable=True),
help='Use config file for command options')
def get_value(self, arguments, fallback=None):
import configparser
import warnings
try:
path = self._locate_value(arguments, fallback=fallback)
config = configparser.ConfigParser()
config.read(path)
try:
config_section = config['.'.join([
self.cli_plugin, self.cli_action
])]
except KeyError:
warnings.warn("Config file does not contain a section"
" for %s"
% '.'.join([self.cli_plugin, self.cli_action]),
UserWarning)
raise ValueNotFoundException()
def fallback_(name, cli_name):
try:
return config_section[cli_name]
except KeyError:
raise ValueNotFoundException()
return fallback_
except ValueNotFoundException:
# Always fail to find a value as this handler doesn't exist.
def fail(*_):
raise ValueNotFoundException()
return fail
class GeneratedHandler(Handler):
def __init__(self, name, repr, ast, default=NoDefault, description=None):
super().__init__(name, prefix=self.prefix, default=default,
description=description)
self.repr = repr
self.ast = ast
class CollectionHandler(GeneratedHandler):
view_map = {
'List': list,
'Set': set
}
def __init__(self, inner_handler, **kwargs):
self.inner_handler = inner_handler
# inner_handler needs to be set first so the prefix lookup works
super().__init__(**kwargs)
self.view_type = self.view_map[self.ast['name']]
@property
def prefix(self):
return self.inner_handler.prefix
def get_click_options(self):
import q2cli.core
for option in self.inner_handler.get_click_options():
option.multiple = True
# validation happens on a callback for q2cli.core.Option, so unset
# it because we need standard click behavior for multi-options
# without this, the result of not-passing a value is `None` instead
# of `()` which confuses ._locate_value
option.callback = None
option.type = q2cli.core.MultipleType(option.type)
yield option
def get_value(self, arguments, fallback=None):
args = self._locate_value(arguments, fallback, multiple=True)
if args is None:
return None
decoded_values = []
for arg in args:
# Use an empty dict because we don't need the inner handler to
# look for anything; that's our job. We just need it to decode
# whatever it was we found.
empty = collections.defaultdict(lambda: None)
decoded = self.inner_handler.get_value(empty,
fallback=lambda *_: arg)
decoded_values.append(decoded)
value = self.view_type(decoded_values)
if len(value) != len(decoded_values):
self._error_with_duplicate_in_set(decoded_values)
return value
def _error_with_duplicate_in_set(self, elements):
import click
import collections
counter = collections.Counter(elements)
dups = {name for name, count in counter.items() if count > 1}
ctx = click.get_current_context()
click.echo(ctx.get_usage() + '\n', err=True)
click.secho("Error: Option --%s was given these values: %r more than "
"one time, values passed should be unique."
% (self.cli_name, dups), err=True, fg='red', bold=True)
ctx.exit(1)
class ArtifactHandler(GeneratedHandler):
prefix = 'i_'
def get_click_options(self):
import q2cli
import q2cli.core
type = q2cli.core.ResultPath(repr=self.repr, exists=True,
file_okay=True, dir_okay=False,
readable=True)
if self.default is None:
requirement = '[optional]'
else:
requirement = '[required]'
option = q2cli.Option(['--' + self.cli_name], type=type, help="")
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
import qiime2
path = self._locate_value(arguments, fallback)
if path is None:
return None
else:
return qiime2.Artifact.load(path)
class ResultHandler(GeneratedHandler):
prefix = 'o_'
def get_click_options(self):
import q2cli
type = q2cli.core.ResultPath(self.repr, exists=False, file_okay=True,
dir_okay=False, writable=True)
option = q2cli.Option(['--' + self.cli_name], type=type, help="")
yield self._add_description(
option, '[required if not passing --output-dir]')
def get_value(self, arguments, fallback=None):
return self._locate_value(arguments, fallback)
def parameter_handler_factory(name, repr, ast, default=NoDefault,
description=None):
if ast['name'] == 'Metadata':
return MetadataHandler(name, default=default, description=description)
elif ast['name'] == 'MetadataCategory':
return MetadataCategoryHandler(name, default=default,
description=description)
else:
return RegularParameterHandler(name, repr, ast, default=default,
description=description)
class MetadataHandler(Handler):
def __init__(self, name, default=NoDefault, description=None):
if default is not NoDefault and default is not None:
raise TypeError(
"The only supported default value for Metadata is `None`. "
"Found this default value: %r" % (default,))
super().__init__(name, prefix='m_', default=default,
description=description)
self.click_name += '_file'
def get_click_options(self):
import click
import q2cli
import q2cli.core
name = '--' + self.cli_name
type = click.Path(exists=True, file_okay=True, dir_okay=False,
readable=True)
type = q2cli.core.MultipleType(type)
help = ('Metadata file or artifact viewable as metadata. This '
'option may be supplied multiple times to merge metadata.')
if self.default is None:
requirement = '[optional]'
else:
requirement = '[required]'
option = q2cli.Option([name], type=type, help=help, multiple=True)
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
import os
import qiime2
import q2cli.util
paths = self._locate_value(arguments, fallback, multiple=True)
if paths is None:
return paths
metadata = []
for path in paths:
try:
# check to see if path is an artifact
artifact = qiime2.Artifact.load(path)
except Exception:
try:
metadata.append(qiime2.Metadata.load(path))
except Exception as e:
header = ("There was an issue with loading the file %s as "
"metadata:" % path)
with open(os.devnull, 'w') as dev_null:
q2cli.util.exit_with_error(
e, header=header, file=dev_null,
suppress_footer=True)
else:
try:
metadata.append(qiime2.Metadata.from_artifact(artifact))
except Exception as e:
header = ("There was an issue with viewing the artifact "
"%s as metadata:" % path)
with open(os.devnull, 'w') as dev_null:
q2cli.util.exit_with_error(
e, header=header, file=dev_null,
suppress_footer=True)
return metadata[0].merge(*metadata[1:])
class MetadataCategoryHandler(Handler):
def __init__(self, name, default=NoDefault, description=None):
if default is not NoDefault and default is not None:
raise TypeError(
"The only supported default value for MetadataCategory is "
"`None`. Found this default value: %r" % (default,))
super().__init__(name, prefix='m_', default=default,
description=description)
self.click_name += '_category'
# Not passing `description` to metadata handler because `description`
# applies to the metadata category (`self`).
self.metadata_handler = MetadataHandler(name, default=default)
def get_click_options(self):
import q2cli
name = '--' + self.cli_name
type = str
help = ('Category from metadata file or artifact viewable as '
'metadata.')
if self.default is None:
requirement = '[optional]'
else:
requirement = '[required]'
option = q2cli.Option([name], type=type, help=help)
yield from self.metadata_handler.get_click_options()
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
# Attempt to find all options before erroring so that all handlers'
# missing options can be displayed to the user.
try:
metadata_value = self.metadata_handler.get_value(arguments,
fallback=fallback)
except ValueNotFoundException:
pass
try:
category_value = self._locate_value(arguments, fallback)
except ValueNotFoundException:
pass
missing = self.metadata_handler.missing + self.missing
if missing:
self.missing = missing
raise ValueNotFoundException()
# If metadata category is optional, there is a chance for metadata to
# be provided without a metadata category, or vice versa.
if metadata_value is None and category_value is not None:
self.missing.append(self.metadata_handler.cli_name)
raise ValueNotFoundException()
elif metadata_value is not None and category_value is None:
self.missing.append(self.cli_name)
raise ValueNotFoundException()
if metadata_value is None and category_value is None:
return None
else:
return metadata_value.get_category(category_value)
class RegularParameterHandler(GeneratedHandler):
prefix = 'p_'
def __init__(self, name, repr, ast, default=NoDefault, description=None):
import q2cli.util
super().__init__(name, repr, ast, default=default,
description=description)
# TODO: just create custom click.ParamType to avoid this silliness
if ast['type'] == 'collection':
ast, = ast['fields']
self.type = q2cli.util.convert_primitive(ast)
def get_click_options(self):
import q2cli
import q2cli.util
if self.type is bool:
no_name = self.prefix + 'no_' + self.name
cli_no_name = q2cli.util.to_cli_name(no_name)
name = '--' + self.cli_name + '/--' + cli_no_name
# click.Option type is determined implicitly for flags with
# secondary options, and explicitly passing type=bool results in a
# TypeError, so we pass type=None (the default).
option_type = None
else:
name = '--' + self.cli_name
option_type = self.type
if self.default is NoDefault:
requirement = '[required]'
elif self.default is None:
requirement = '[optional]'
else:
requirement = '[default: %s]' % self.default
# Pass `default=None` and `show_default=False` to `click.Option`
# because the handlers are responsible for resolving missing values and
# supplying defaults. Telling Click about the default value here makes
# it impossible to determine whether the user supplied or omitted a
# value once the handlers are invoked.
option = q2cli.Option([name], type=option_type, default=None,
show_default=False, help='')
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
value = self._locate_value(arguments, fallback)
if value is None:
return None
elif self.type is bool:
# TODO: should we defer to the Bool primitive? It only allows
# 'true' and 'false'.
if type(value) is not bool:
value = self._parse_boolean(value)
return value
else:
import qiime2.sdk
primitive = qiime2.sdk.parse_type(self.repr, expect='primitive')
# TODO/HACK: the repr is the primitive used, but since there's a
# collection handler managing the set/list this get_value should
# handle only the pieces. This is super gross, but would be
# unecessary if click.ParamTypes were implemented for each
# kind of QIIME 2 input.
if self.ast['type'] == 'collection':
primitive, = primitive.fields
return primitive.decode(value)
| gregcaporaso/q2cli | q2cli/handlers.py | Python | bsd-3-clause | 22,305 |
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| flightgong/scikit-learn | examples/plot_roc.py | Python | bsd-3-clause | 3,681 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry import value
from telemetry.value import improvement_direction
from telemetry.value import none_values
from telemetry.value import scalar
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
name='http://www.bar.com/'))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir,
name='http://www.baz.com/'))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
name='http://www.foo.com/'))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class ValueTest(TestBase):
def testRepr(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
description='desc', tir_label='my_ir',
improvement_direction=improvement_direction.DOWN)
expected = ('ScalarValue(http://www.bar.com/, x, unit, 3, important=True, '
'description=desc, tir_label=my_ir, '
'improvement_direction=down, grouping_keys={}')
self.assertEquals(expected, str(v))
def testBuildbotValueType(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
improvement_direction=improvement_direction.DOWN)
self.assertEquals('default', v.GetBuildbotDataType(
value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
self.assertEquals([3], v.GetBuildbotValue())
self.assertEquals(('x', page0.name),
v.GetChartAndTraceNameForPerPageResult())
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=False,
improvement_direction=improvement_direction.DOWN)
self.assertEquals(
'unimportant',
v.GetBuildbotDataType(value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
def testScalarSamePageMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page0, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page1, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
vM = scalar.ScalarValue.MergeLikeValuesFromDifferentPages([v0, v1])
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(
page0, 'x', 'unit', 1, improvement_direction=improvement_direction.DOWN)
v1 = scalar.ScalarValue(page0, 'x', 'unit', None, none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(None, vM.values)
expected_none_value_reason = (
'Merging values containing a None value results in a None value. '
'None values: [ScalarValue(http://www.bar.com/, x, unit, None, '
'important=True, description=None, tir_label=None, '
'improvement_direction=down, grouping_keys={}]')
self.assertEquals(expected_none_value_reason, vM.none_value_reason)
def testScalarWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', None,
improvement_direction=improvement_direction.UP))
def testScalarWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', 1, none_value_reason='n',
improvement_direction=improvement_direction.UP))
def testAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', 42, important=False,
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': 42})
def testNoneValueAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', None, important=False,
none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': None, 'none_value_reason': 'n'})
def testFromDictInt(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
'improvement_direction': improvement_direction.DOWN,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42)
self.assertEquals(v.improvement_direction, improvement_direction.DOWN)
def testFromDictFloat(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42.4,
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42.4)
def testFromDictWithoutImprovementDirection(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertIsNone(v.improvement_direction)
def testFromDictNoneValue(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': None,
'none_value_reason': 'n',
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, None)
self.assertEquals(v.none_value_reason, 'n')
| catapult-project/catapult-csm | telemetry/telemetry/value/scalar_unittest.py | Python | bsd-3-clause | 7,682 |
#!/usr/bin/env python3
"""Creates training data for the BERT network training
(noisified + masked gold predictions) using the input corpus.
The masked Gold predictions use Neural Monkey's PAD_TOKEN to indicate
tokens that should not be classified during training.
We only leave `coverage` percent of symbols for classification. These
symbols are left unchanged on input with a probability of `1 - mask_prob`.
If they are being changed, they are replaced by the `mask_token` with a
probability of `1 - replace_prob` and by a random vocabulary token otherwise.
"""
import argparse
import os
import numpy as np
from neuralmonkey.logging import log as _log
from neuralmonkey.vocabulary import (
Vocabulary, PAD_TOKEN, UNK_TOKEN, from_wordlist)
def log(message: str, color: str = "blue") -> None:
_log(message, color)
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--input_file", type=str, default="/dev/stdin")
parser.add_argument("--vocabulary", type=str, required=True)
parser.add_argument("--output_prefix", type=str, default=None)
parser.add_argument("--mask_token", type=str, default=UNK_TOKEN,
help="token used to mask the tokens")
parser.add_argument("--coverage", type=float, default=0.15,
help=("percentage of tokens that should be left "
"for classification during training"))
parser.add_argument("--mask_prob", type=float, default=0.8,
help=("probability of the classified token being "
"replaced by a different token on input"))
parser.add_argument("--replace_prob", type=float, default=0.1,
help=("probability of the classified token being "
"replaced by a random token instead of "
"mask_token"))
parser.add_argument("--vocab_contains_header", type=bool, default=True)
parser.add_argument("--vocab_contains_frequencies",
type=bool, default=True)
args = parser.parse_args()
assert (args.coverage <= 1 and args.coverage >= 0)
assert (args.mask_prob <= 1 and args.mask_prob >= 0)
assert (args.replace_prob <= 1 and args.replace_prob >= 0)
log("Loading vocabulary.")
vocabulary = from_wordlist(
args.vocabulary,
contains_header=args.vocab_contains_header,
contains_frequencies=args.vocab_contains_frequencies)
mask_prob = args.mask_prob
replace_prob = args.replace_prob
keep_prob = 1 - mask_prob - replace_prob
sample_probs = (keep_prob, mask_prob, replace_prob)
output_prefix = args.output_prefix
if output_prefix is None:
output_prefix = args.input_file
out_f_noise = "{}.noisy".format(output_prefix)
out_f_mask = "{}.mask".format(output_prefix)
out_noise_h = open(out_f_noise, "w", encoding="utf-8")
out_mask_h = open(out_f_mask, "w", encoding="utf-8")
log("Processing data.")
with open(args.input_file, "r", encoding="utf-8") as input_h:
# TODO: performance optimizations
for line in input_h:
line = line.strip().split(" ")
num_samples = int(args.coverage * len(line))
sampled_indices = np.random.choice(len(line), num_samples, False)
output_noisy = list(line)
output_masked = [PAD_TOKEN] * len(line)
for i in sampled_indices:
random_token = np.random.choice(vocabulary.index_to_word[4:])
new_token = np.random.choice(
[line[i], args.mask_token, random_token], p=sample_probs)
output_noisy[i] = new_token
output_masked[i] = line[i]
out_noise_h.write(str(" ".join(output_noisy)) + "\n")
out_mask_h.write(str(" ".join(output_masked)) + "\n")
if __name__ == "__main__":
main()
| ufal/neuralmonkey | scripts/preprocess_bert.py | Python | bsd-3-clause | 3,940 |
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase
class StrTests(TranspileTestCase):
def test_setattr(self):
self.assertCodeExecution("""
x = "Hello, world"
x.attr = 42
print('Done.')
""")
def test_endswith(self):
self.assertCodeExecution("""
s = "abracadabra"
suffix = "abra"
print(s.endswith(end))
""")
self.assertCodeExecution("""
s = "abracadabra"
suffix = "ABRA"
print(s.endswith(end))
""")
self.assertCodeExecution("""
s = "ABRACADABRA"
suffix = "abra"
print(s.endswith(end))
""")
# self.assertCodeExecution("""
# print('abracadabra'.endswith('abra'))
# """)
def test_getattr(self):
self.assertCodeExecution("""
x = "Hello, world"
print(x.attr)
print('Done.')
""")
def test_getitem(self):
# Simple positive index
self.assertCodeExecution("""
x = "12345"
print(x[2])
""")
# Simple negative index
self.assertCodeExecution("""
x = "12345"
print(x[-2])
""")
# Positive index out of range
self.assertCodeExecution("""
x = "12345"
print(x[10])
""")
# Negative index out of range
self.assertCodeExecution("""
x = "12345"
print(x[-10])
""")
def test_slice(self):
# Full slice
self.assertCodeExecution("""
x = "12345"
print(x[:])
""")
# Left bound slice
self.assertCodeExecution("""
x = "12345"
print(x[1:])
""")
# Right bound slice
self.assertCodeExecution("""
x = "12345"
print(x[:4])
""")
# Slice bound in both directions
self.assertCodeExecution("""
x = "12345"
print(x[1:4])
""")
# Slice bound in both directions with end out of bounds
self.assertCodeExecution("""
x = "12345"
print(x[1:6])
""")
# Slice bound in both directions with start out of bounds
self.assertCodeExecution("""
x = "12345"
print(x[6:7])
""")
def test_case_changes(self):
self.assertCodeExecution("""
for s in ['hello, world', 'HEllo, WORLD', 'átomo', '']:
print(s.capitalize())
print(s.lower())
# print(s.swap())
print(s.title())
print(s.upper())
""")
def test_index(self):
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('world'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1, 3))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1, 100))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1, -1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', -4))
""")
def test_count(self):
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('e'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('a'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 3))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 3, 4))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 0, 4))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 0, 100))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('hell', 1, -1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('hell', -4))
""")
def test_find(self):
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('world'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1, 3))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1, 100))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1, -1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', -4))
""")
def test_expand(self):
self.assertCodeExecution("""
print('\\t'.expandtabs())
print('a\\t'.expandtabs())
print('aa\\t'.expandtabs())
print('aaa\\t'.expandtabs())
print('aaaaaaaa\\t'.expandtabs())
print('a\\naa\\t'.expandtabs())
print('\\t'.expandtabs(3))
print('a\\t'.expandtabs(3))
print('aa\\t'.expandtabs(7))
print('aaa\\t'.expandtabs(4))
print('aaaaaaaa\\t'.expandtabs(4))
print('a\\naa\\t'.expandtabs(4))
""")
def test_title(self):
self.assertCodeExecution("""
s = ' foo bar baz '
print(s.title())
""")
def test_len(self):
self.assertCodeExecution("""
s = ' foo bar baz '
print(len(s))
""")
class UnaryStrOperationTests(UnaryOperationTestCase, TranspileTestCase):
data_type = 'str'
not_implemented = [
]
class BinaryStrOperationTests(BinaryOperationTestCase, TranspileTestCase):
data_type = 'str'
not_implemented = [
'test_add_class',
'test_add_frozenset',
'test_and_class',
'test_and_frozenset',
'test_eq_class',
'test_eq_frozenset',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_frozenset',
'test_ge_class',
'test_ge_frozenset',
'test_gt_class',
'test_gt_frozenset',
'test_le_class',
'test_le_frozenset',
'test_lshift_class',
'test_lshift_frozenset',
'test_lt_class',
'test_lt_frozenset',
'test_modulo_bool',
'test_modulo_bytes',
'test_modulo_bytearray',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_dict',
'test_modulo_float',
'test_modulo_frozenset',
'test_modulo_slice',
'test_modulo_int',
'test_modulo_list',
'test_modulo_None',
'test_modulo_NotImplemented',
'test_modulo_range',
'test_modulo_set',
'test_modulo_str',
'test_modulo_tuple',
'test_multiply_class',
'test_multiply_frozenset',
'test_ne_class',
'test_ne_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subscr_bool',
'test_subscr_class',
'test_subscr_frozenset',
'test_subscr_slice',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
class InplaceStrOperationTests(InplaceOperationTestCase, TranspileTestCase):
data_type = 'str'
not_implemented = [
'test_add_class',
'test_add_frozenset',
'test_and_class',
'test_and_frozenset',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_frozenset',
'test_lshift_class',
'test_lshift_frozenset',
'test_modulo_bool',
'test_modulo_bytes',
'test_modulo_bytearray',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_dict',
'test_modulo_float',
'test_modulo_frozenset',
'test_modulo_slice',
'test_modulo_int',
'test_modulo_list',
'test_modulo_None',
'test_modulo_NotImplemented',
'test_modulo_range',
'test_modulo_set',
'test_modulo_str',
'test_modulo_tuple',
'test_multiply_class',
'test_multiply_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
| Felix5721/voc | tests/datatypes/test_str.py | Python | bsd-3-clause | 9,931 |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext_lazy
from .base import Product
from .variants import (ProductVariant, PhysicalProduct, ColoredVariant,
StockedProduct)
class Bag(PhysicalProduct, Product, ColoredVariant):
class Meta:
app_label = 'product'
class Shirt(PhysicalProduct, Product, ColoredVariant):
class Meta:
app_label = 'product'
class BagVariant(ProductVariant, StockedProduct):
product = models.ForeignKey(Bag, related_name='variants')
class Meta:
app_label = 'product'
@python_2_unicode_compatible
class ShirtVariant(ProductVariant, StockedProduct):
SIZE_CHOICES = (
('xs', pgettext_lazy('Variant size', 'XS')),
('s', pgettext_lazy('Variant size', 'S')),
('m', pgettext_lazy('Variant size', 'M')),
('l', pgettext_lazy('Variant size', 'L')),
('xl', pgettext_lazy('Variant size', 'XL')),
('xxl', pgettext_lazy('Variant size', 'XXL')))
product = models.ForeignKey(Shirt, related_name='variants')
size = models.CharField(
pgettext_lazy('Variant field', 'size'), choices=SIZE_CHOICES,
max_length=3)
class Meta:
app_label = 'product'
def __str__(self):
return '%s (%s)' % (self.product.name, self.size)
| hongquan/saleor | saleor/product/models/products.py | Python | bsd-3-clause | 1,423 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUVNFThresholdPolicy(NURESTObject):
""" Represents a VNFThresholdPolicy in the VSD
Notes:
VNF Threshold Policy represents thresholds for resources consumed by VNF instance running on NS Gateway and action to be taken when resource utilization crosses configured thresholds.
"""
__rest_name__ = "vnfthresholdpolicy"
__resource_name__ = "vnfthresholdpolicies"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ACTION_SHUTOFF = "SHUTOFF"
CONST_ACTION_NONE = "NONE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a VNFThresholdPolicy instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> vnfthresholdpolicy = NUVNFThresholdPolicy(id=u'xxxx-xxx-xxx-xxx', name=u'VNFThresholdPolicy')
>>> vnfthresholdpolicy = NUVNFThresholdPolicy(data=my_dict)
"""
super(NUVNFThresholdPolicy, self).__init__()
# Read/Write Attributes
self._cpu_threshold = None
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._action = None
self._memory_threshold = None
self._description = None
self._min_occurrence = None
self._embedded_metadata = None
self._entity_scope = None
self._monit_interval = None
self._creation_date = None
self._assoc_entity_type = None
self._storage_threshold = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="cpu_threshold", remote_name="CPUThreshold", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="action", remote_name="action", attribute_type=str, is_required=False, is_unique=False, choices=[u'NONE', u'SHUTOFF'])
self.expose_attribute(local_name="memory_threshold", remote_name="memoryThreshold", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="min_occurrence", remote_name="minOccurrence", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="monit_interval", remote_name="monitInterval", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="storage_threshold", remote_name="storageThreshold", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def cpu_threshold(self):
""" Get cpu_threshold value.
Notes:
Threshold for CPU usage
This attribute is named `CPUThreshold` in VSD API.
"""
return self._cpu_threshold
@cpu_threshold.setter
def cpu_threshold(self, value):
""" Set cpu_threshold value.
Notes:
Threshold for CPU usage
This attribute is named `CPUThreshold` in VSD API.
"""
self._cpu_threshold = value
@property
def name(self):
""" Get name value.
Notes:
Name of VNF agent policy
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of VNF agent policy
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def action(self):
""" Get action value.
Notes:
Action to be taken on threshold crossover
"""
return self._action
@action.setter
def action(self, value):
""" Set action value.
Notes:
Action to be taken on threshold crossover
"""
self._action = value
@property
def memory_threshold(self):
""" Get memory_threshold value.
Notes:
Threshold for memory usage
This attribute is named `memoryThreshold` in VSD API.
"""
return self._memory_threshold
@memory_threshold.setter
def memory_threshold(self, value):
""" Set memory_threshold value.
Notes:
Threshold for memory usage
This attribute is named `memoryThreshold` in VSD API.
"""
self._memory_threshold = value
@property
def description(self):
""" Get description value.
Notes:
Description of VNF agent policy
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of VNF agent policy
"""
self._description = value
@property
def min_occurrence(self):
""" Get min_occurrence value.
Notes:
Minimum number of threshold crossover occurrence during monitoring interval before taking specified action
This attribute is named `minOccurrence` in VSD API.
"""
return self._min_occurrence
@min_occurrence.setter
def min_occurrence(self, value):
""" Set min_occurrence value.
Notes:
Minimum number of threshold crossover occurrence during monitoring interval before taking specified action
This attribute is named `minOccurrence` in VSD API.
"""
self._min_occurrence = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def monit_interval(self):
""" Get monit_interval value.
Notes:
Monitoring interval (minutes) for threshold crossover occurrences to be considered
This attribute is named `monitInterval` in VSD API.
"""
return self._monit_interval
@monit_interval.setter
def monit_interval(self, value):
""" Set monit_interval value.
Notes:
Monitoring interval (minutes) for threshold crossover occurrences to be considered
This attribute is named `monitInterval` in VSD API.
"""
self._monit_interval = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of the entity to which the Metadata is associated to.
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of the entity to which the Metadata is associated to.
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def storage_threshold(self):
""" Get storage_threshold value.
Notes:
Threshold for storage usage
This attribute is named `storageThreshold` in VSD API.
"""
return self._storage_threshold
@storage_threshold.setter
def storage_threshold(self, value):
""" Set storage_threshold value.
Notes:
Threshold for storage usage
This attribute is named `storageThreshold` in VSD API.
"""
self._storage_threshold = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| nuagenetworks/vspk-python | vspk/v6/nuvnfthresholdpolicy.py | Python | bsd-3-clause | 16,674 |
from __future__ import division, print_function, absolute_import
#from pnet.vzlog import default as vz
import numpy as np
import amitgroup as ag
import itertools as itr
import sys
import os
#import gv
import pnet
import time
def test(ims, labels, net):
yhat = net.classify(ims)
return yhat == labels
if pnet.parallel.main(__name__):
print("1")
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('seed', metavar='<seed>', type=int, help='Random seed')
parser.add_argument('param', metavar='<param>', type=float)
args0 = parser.parse_args()
param = args0.param
#for i in xrange(1, 7):
# print(make_support(i, 4).astype(np.uint8))
#params = randomize_layers_parameters(args0.seed)
#print(params)
unsup_training_times = []
sup_training_times = []
testing_times = []
error_rates = []
all_num_parts = []
maxdepth = 7
print("2")
# Switch which experiment here
#from pnet.mnist_danny import parse_background_random as loadf
from pnet.mnist_danny import parse_background_images as loadf
print("Loading...")
mnist_data = loadf()
print("Done.")
for training_seed in xrange(1):
layers = [
#pnet.IntensityThresholdLayer(),
pnet.EdgeLayer(k=5, radius=1, spread='orthogonal', minimum_contrast=0.05),#, pre_blurring=1.0),
#pnet.IntensityThresholdLayer(),
#pnet.IntensityThresholdLayer(),
pnet.PartsLayer(250, (7, 7), settings=dict(outer_frame=1,
em_seed=training_seed,
threshold=2,
samples_per_image=60,
max_samples=200000,
train_limit=10000,
min_prob=0.00005,
)),
pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)),
]
if 0:
layers += [
pnet.RandomForestPartsLayer(256, (5, 5), settings=dict(outer_frame=1,
em_seed=training_seed,
threshold=2,
samples_per_image=40,
max_samples=200000,
train_limit=10000,
min_prob=0.0005,
trees=10,
max_depth=3,
)),
pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)),
]
elif 0:
layers += [
pnet.GaussianPartsLayer(100, (5, 5), settings=dict(
em_seed=training_seed,
samples_per_image=40,
max_samples=200000,
train_limit=100000,
)),
pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)),
]
elif 0:
layers += [
pnet.PartsLayer(1000, (5, 5), settings=dict(outer_frame=1,
em_seed=training_seed,
threshold=2,
samples_per_image=40,
max_samples=200000,
train_limit=100000,
min_prob=0.0005,
)),
pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)),
]
elif 0:
layers += [
pnet.BinaryTreePartsLayer(maxdepth, (5, 5), settings=dict(outer_frame=1,
em_seed=training_seed,
threshold=2,
samples_per_image=40,
max_samples=200000,
train_limit=10000,
min_prob=0.005,
#keypoint_suppress_radius=1,
min_samples_per_part=50,
split_criterion='IG',
split_entropy=0.2,
min_information_gain=0.01,
)),
pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)),
]
elif 0:
layers += [
pnet.BinaryTreePartsLayer(maxdepth, (5, 5), settings=dict(outer_frame=1,
em_seed=training_seed,
threshold=2,
samples_per_image=40,
max_samples=200000,
train_limit=10000,
min_prob=0.0005,
#keypoint_suppress_radius=1,
min_samples_per_part=50,
split_criterion=split_criterion,
split_entropy=split_entropy,
min_information_gain=split_entropy,
)),
pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)),
]
[
pnet.BinaryTreePartsLayer(10, (1, 1), settings=dict(outer_frame=0,
em_seed=training_seed+1,
threshold=1,
samples_per_image=200,
max_samples=1000000,
train_limit=10000,
#min_information_gain=0.05,
split_entropy=0.05,
min_prob=0.0005
)),
pnet.PoolingLayer(shape=(1, 1), strides=(1, 1)),
]
layers += [
pnet.MixtureClassificationLayer(n_components=1, min_prob=1e-5),
#pnet.SVMClassificationLayer(C=None),
]
net = pnet.PartsNet(layers)
TRAIN_SAMPLES = 10000
#TRAIN_SAMPLES = 1200
print(training_seed)
digits = range(10)
#ims = ag.io.load_mnist('training', selection=slice(0 + 3000 * training_seed, TRAIN_SAMPLES + 3000 * training_seed), return_labels=False)
ims = mnist_data['training_image'][0 + 1000 * training_seed : TRAIN_SAMPLES + 1000 * training_seed]
ims_label = mnist_data['training_label'][0 + 1000 * training_seed : TRAIN_SAMPLES + 1000 * training_seed]
validation_ims = mnist_data['training_image'][10000:12000]
validation_label = mnist_data['training_label'][10000:12000]
#print(net.sizes(X[[0]]))
print(ims.shape)
start0 = time.time()
net.train(ims)
end0 = time.time()
N = 1000
sup_ims = []
sup_labels = []
# Load supervised training data
for d in digits:
if N is None:
ims0 = ims[ims_label == d]
else:
#ims0 = ag.io.load_mnist('training', [d], selection=slice(N*training_seed, N*(1+training_seed)), return_labels=False)
ims0 = ims[ims_label == d]
sup_ims.append(ims0)
sup_labels.append(d * np.ones(len(ims0), dtype=np.int64))
sup_ims = np.concatenate(sup_ims, axis=0)
sup_labels = np.concatenate(sup_labels, axis=0)
#print('labels', np.bincount(sup_labels, minlength=10))
start1 = time.time()
net.train(sup_ims, sup_labels)
end1 = time.time()
#print("Now testing...")
### Test ######################################################################
corrects = 0
total = 0
test_ims, test_labels = mnist_data['test_image'], mnist_data['test_label']
test_ims = validation_ims
test_labels = validation_label
# TEMP
if 0:
test_ims = test_ims[:1000]
test_labels = test_labels[:1000]
#with gv.Timer("Split to batches"):
ims_batches = np.array_split(test_ims, 200)
labels_batches = np.array_split(test_labels, 200)
def format_error_rate(pr):
return "{:.2f}%".format(100*(1-pr))
#import gv
#with gv.Timer('Testing'):
start2 = time.time()
args = (tup+(net,) for tup in itr.izip(ims_batches, labels_batches))
for i, res in enumerate(pnet.parallel.starmap(test, args)):
corrects += res.sum()
total += res.size
pr = corrects / total
end2 = time.time()
error_rate = 1.0 - pr
num_parts = 0#net.layers[1].num_parts
error_rates.append(error_rate)
print(training_seed, 'error rate', error_rate * 100, 'num parts', num_parts)#, 'num parts 2', net.layers[3].num_parts)
unsup_training_times.append(end0 - start0)
sup_training_times.append(end1 - start1)
testing_times.append(end2 - start2)
#print('times', end0-start0, end1-start1, end2-start2)
all_num_parts.append(num_parts)
#vz.section('MNIST')
#gv.img.save_image(vz.generate_filename(), test_ims[0])
#gv.img.save_image(vz.generate_filename(), test_ims[1])
#gv.img.save_image(vz.generate_filename(), test_ims[2])
# Vz
#net.infoplot(vz)
#vz.flush()
net.save('tmp{}.npy'.format(training_seed))
print(r"{ppl} & {depth} & {num_parts} & {unsup_time:.1f} & {test_time:.1f} & ${rate:.2f} \pm {std:.2f}$ \\".format(
ppl=2,
depth=maxdepth,
num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)),
unsup_time=np.median(unsup_training_times) / 60,
#sup_time=np.median(sup_training_times),
test_time=np.median(testing_times) / 60,
rate=100*np.mean(error_rates),
std=100*np.std(error_rates)))
print(r"{ppl} {depth} {num_parts} {unsup_time} {test_time} {rate} {std}".format(
ppl=2,
depth=maxdepth,
num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)),
unsup_time=np.median(unsup_training_times) / 60,
#sup_time=np.median(sup_training_times),
test_time=np.median(testing_times) / 60,
rate=100*np.mean(error_rates),
std=100*np.std(error_rates)))
#np.savez('gdata2-{}-{}-{}.npz'.format(maxdepth, split_criterion, split_entropy), all_num_parts=all_num_parts, unsup_time=unsup_training_times, test_time=testing_times, rates=error_rates)
print('mean error rate', np.mean(error_rates) * 100)
#net.save(args.model)
| amitgroup/parts-net | scripts/train_and_test5.py | Python | bsd-3-clause | 12,277 |
# proxy module
from pyface.qt.QtScript import *
| enthought/etsproxy | enthought/qt/QtScript.py | Python | bsd-3-clause | 48 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/frames/tree_reduce.py
# Helper module to "Reduce" tree using frames
# @see Ostap::DataFrame
# @see ROOT::RDataFrame
# @author Vanya BELYAEV [email protected]
# @date 2018-06-16
# =============================================================================
"""Helper module to ``reduce'' tree using frames
- see Ostap.DataFrame
- see ROOT.ROOT.RDataFrame
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV [email protected]"
__date__ = "2011-06-07"
__all__ = (
'ReduceTree' ,
'reduce' ,
)
# =============================================================================
import ROOT, os
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger( 'ostap.frames.tree_reduce' )
else : logger = getLogger( __name__ )
# =============================================================================
logger.debug ( "``Reduce'' TTree using ROOT::RDataFrame object")
# =============================================================================
import ostap.trees.trees
from ostap.core.core import cpp, Ostap
from ostap.utils.cleanup import CleanUp
# =============================================================================
## @class ReduceTree
# Reduce TTree object using intermediate (temporary
# @code
# tree = ...
# r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ] , 'new_file.root' )
# reduced = t.tree
# @endcode
class ReduceTree(CleanUp):
"""Reduce ROOT.TTree object
>>> tree = ...
>>> r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ]
>>> reduced = r.tree
"""
def __init__ ( self ,
chain , ## input TChain/TTree
selection = {} , ## selection/cuts
save_vars = () , ## list of variables to save
new_vars = {} , ## new variables
no_vars = () , ## exclude these variables
##
output = '' , ## output file name
name = '' , ## the name
addselvars = False , ## add varibles from selections?
tmp_keep = False , ## keep the temporary file
silent = False ): ## silent processing
from ostap.frames.frames import DataFrame
frame = DataFrame ( chain )
report = None
self.__frame_main = frame
if not silent :
pbar = frame.ProgressBar ( len ( chain ) )
nvars = []
## new variables
for nv in new_vars :
frame = frame.Define ( nv , new_vars [ nv] )
nvars.append ( nv )
from ostap.core.ostap_types import ( string_types ,
listlike_types ,
dictlike_types )
cut_types = string_types + ( ROOT.TCut , )
Lmax = 30
selections = []
if selection and isinstance ( selection , cut_types ) :
ss = str ( selection ).strip()
if len ( ss ) < Lmax : filter_name = ss
else : filter_name = 'SELECTION'
frame = frame.Filter ( ss , filter_name )
selections.append ( ss )
elif selection and isinstance ( selection , dictlike_types ) :
for filter_name in selection :
s = selection [ filter_name ]
assert isinstance ( s , cut_types ),\
'Invalid selection type %s/%s' % ( s , type ( s ) )
ss = str ( s ).strip()
frame = frame.Filter ( ss , str ( filter_name ) )
selections.append ( ss )
elif selection and isinstance ( selection , listlike_types ) :
for i , s in enumerate ( selection ) :
assert isinstance ( s , cut_types ),\
'Invalid selection type %s/%s' % ( s , type ( s ) )
ss = str( s ).strip()
##
if len ( ss ) < Lmax : filter_name = ss
else : filter_name = 'SELECTION%d' % i
#
frame = frame.Filter ( ss , filter_name )
selections.append ( ss )
elif selection :
raise TypeError('Invalid selection type %s/%s' % ( selection , type ( selection ) ) )
if not output :
output = self.tempfile ( prefix = 'ostap-frame-' , suffix = '.root' )
## logger.debug ( 'ReduceTree: output file is %s' % output )
if not tmp_keep : self.trash.add ( output )
## if selections : report = frame.Report()
if selections and addselvars :
bvars = chain.the_variables ( selections )
save_vars = list ( bvars ) + [ v for v in save_vars if not v in bvars ]
save_vars = tuple ( save_vars )
## exclude some variables
if no_vars and not save_vars :
bvars = list ( chain.branches () )
all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ]
save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] )
elif no_vars :
bvars = chain.the_variables ( *save_vars )
all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ]
save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] )
nb_ = len ( chain.branches () )
ne_ = len ( chain )
## chain name:
## FIXME!
# cname = chain.GetName() ## produces ROOT error
if not name :
_ , _ , cname = chain.GetName().rpartition ( '/' )
name = '%s_reduced' % cname
self.__name = name
if not save_vars :
snapshot = frame.Snapshot ( name , output )
else :
bvars = chain.the_variables ( *save_vars )
all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ]
from ostap.core.core import strings as _strings
all_vars = _strings ( all_vars )
snapshot = frame.Snapshot ( name , output , all_vars )
assert os.path.exists ( output ) and\
os.path.isfile ( output ) , 'Invalid file %s' % fname
self.__chain = ROOT.TChain ( name )
self.__chain.Add ( output )
self.__output = output
self.__report = 'Tree -> Frame -> Tree filter/transformation'
self.__table = []
if report :
from ostap.frames.frames import report_print, report_as_table
title = self.__report
self.__report += '\n%s' % report_print ( report , title , '# ')
self.__table = report_as_table ( report )
fs = os.path.getsize ( self.__output )
gb , r = divmod ( fs , 1024 * 1024 * 1024 )
mb , r = divmod ( r , 1024 * 1024 )
kb , r = divmod ( r , 1024 )
if gb : fs = '%.1fGB' % ( float ( fs ) / 1024 / 1024 / 1024 )
elif mb : fs = '%.1fMB' % ( float ( fs ) / 1024 / 1024 )
elif kb : fs = '%.1fkB' % ( float ( fs ) / 1024 )
else : fs = '%sB' % fs
nb = len ( self.__chain.branches () )
ne = len ( self.__chain )
self.__report += '\n# Reduce %d -> %d branches, %d -> %d entries' % ( nb_ , nb , ne_ , ne )
self.__report += '\n# Output:%s size:%s' % ( self.__output , fs )
self.__report += '\n# %s' % str ( self.__chain )
del self.__frame_main
def __str__ ( self ) : return self.__report
def __repr__ ( self ) : return self.__report
@property
def output ( self ) :
"""``output'' : the output file name"""
return self.__output
@property
def chain ( self ) :
"""``chain'': the reduced chain/tree (same as tree)"""
return self.__chain
@property
def name ( self ) :
"""``name'' : the output chain name"""
return self.__name
@property
def tree ( self ) :
"""``tree'': the reduced chain/tree (same as chain)"""
return self.__chain
@property
def table ( self ) :
"""``table'' : get the statitics as table"""
return self.__table
@property
def report ( self ) :
"""``report'' : get the statitics report"""
return self.__report
# ===============================================================================
## Powerful method to reduce/tranform the tree/chain.
# It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows
# - filter entries from TTree/TChain
# - add new colums
# - remove unnesessary columns
# @code
# tree = ....
# reduced1 = tree.reduce ( 'pt>1' )
# reduced2 = tree.reduce ( 'pt>1' , save_vars = [ 'p', 'pt' ,'q' ] )
# reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] )
# reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } )
# reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' )
# @endcode
# @see Ostap::DataFrame
# @see ROOT::RDataFrame
def reduce ( tree ,
selection ,
save_vars = () ,
new_vars = {} ,
no_vars = () ,
output = '' ,
name = '' ,
addselvars = False ,
silent = False ) :
""" Powerful method to reduce/tranform the tree/chain.
It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows
- filter entries from TTree/TChain
- add new colums
- remove unnesessary columns
>>> tree = ....
>>> reduced1 = tree.reduce ( 'pt>1' )
>>> reduced2 = tree.reduce ( 'pt>1' , vars = [ 'p', 'pt' ,'q' ] )
>>> reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] )
>>> reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } )
>>> reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' )
"""
nb0 = len ( tree.branches() )
ne0 = len ( tree )
reduced = ReduceTree ( tree ,
selection = selection ,
save_vars = save_vars ,
new_vars = new_vars ,
no_vars = no_vars ,
output = output ,
name = name ,
addselvars = addselvars ,
tmp_keep = True ,
silent = silent )
from ostap.trees.trees import Chain
result = Chain ( reduced.chain )
if not output : result.trash.add ( reduced.output )
if silent :
nb = len ( result.chain.branches() )
ne = len ( result.chain )
f = float ( nb0 * ne0 ) / ( nb * ne )
logger.info ( 'reduce: (%dx%d) -> (%dx%d) %.1f (branches x entries) ' % ( nb0 , ne0 , nb , ne , f ) )
return result
ROOT.TTree. reduce = reduce
# =============================================================================
_decorated_classes_ = (
ROOT.TTree ,
)
_new_methods_ = (
ROOT.TTree.reduce ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
# The END
# =============================================================================
| OstapHEP/ostap | ostap/frames/tree_reduce.py | Python | bsd-3-clause | 12,435 |
"""
Tests for values coercion in setitem-like operations on DataFrame.
For the most part, these should be multi-column DataFrames, otherwise
we would share the tests with Series.
"""
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSetitemCoercion:
@pytest.mark.xfail(reason="Unnecessary cast.")
@pytest.mark.parametrize("consolidate", [True, False])
def test_loc_setitem_multiindex_columns(self, consolidate):
# GH#18415 Setting values in a single column preserves dtype,
# while setting them in multiple columns did unwanted cast.
# Note that A here has 2 blocks, below we do the same thing
# with a consolidated frame.
A = DataFrame(np.zeros((6, 5), dtype=np.float32))
A = pd.concat([A, A], axis=1, keys=[1, 2])
if consolidate:
A = A._consolidate()
A.loc[2:3, (1, slice(2, 3))] = np.ones((2, 2), dtype=np.float32)
assert (A.dtypes == np.float32).all()
A.loc[0:5, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32)
assert (A.dtypes == np.float32).all()
A.loc[:, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32)
assert (A.dtypes == np.float32).all()
# TODO: i think this isn't about MultiIndex and could be done with iloc?
def test_37477():
# fixed by GH#45121
orig = DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
expected = DataFrame({"A": [1, 2, 3], "B": [3, 1.2, 5]})
df = orig.copy()
df.at[1, "B"] = 1.2
tm.assert_frame_equal(df, expected)
df = orig.copy()
df.loc[1, "B"] = 1.2
tm.assert_frame_equal(df, expected)
df = orig.copy()
df.iat[1, 1] = 1.2
tm.assert_frame_equal(df, expected)
df = orig.copy()
df.iloc[1, 1] = 1.2
tm.assert_frame_equal(df, expected)
def test_6942(indexer_al):
# check that the .at __setitem__ after setting "Live" actually sets the data
start = Timestamp("2014-04-01")
t1 = Timestamp("2014-04-23 12:42:38.883082")
t2 = Timestamp("2014-04-24 01:33:30.040039")
dti = date_range(start, periods=1)
orig = DataFrame(index=dti, columns=["timenow", "Live"])
df = orig.copy()
indexer_al(df)[start, "timenow"] = t1
df["Live"] = True
df.at[start, "timenow"] = t2
assert df.iloc[0, 0] == t2
def test_26395(indexer_al):
# .at case fixed by GH#45121 (best guess)
df = DataFrame(index=["A", "B", "C"])
df["D"] = 0
indexer_al(df)["C", "D"] = 2
expected = DataFrame({"D": [0, 0, 2]}, index=["A", "B", "C"], dtype=np.int64)
tm.assert_frame_equal(df, expected)
indexer_al(df)["C", "D"] = 44.5
expected = DataFrame({"D": [0, 0, 44.5]}, index=["A", "B", "C"], dtype=np.float64)
tm.assert_frame_equal(df, expected)
indexer_al(df)["C", "D"] = "hello"
expected = DataFrame({"D": [0, 0, "hello"]}, index=["A", "B", "C"], dtype=object)
tm.assert_frame_equal(df, expected)
@pytest.mark.xfail(reason="unwanted upcast")
def test_15231():
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
df.loc[2] = Series({"a": 5, "b": 6})
assert (df.dtypes == np.int64).all()
df.loc[3] = Series({"a": 7})
# df["a"] doesn't have any NaNs, should not have been cast
exp_dtypes = Series([np.int64, np.float64], dtype=object, index=["a", "b"])
tm.assert_series_equal(df.dtypes, exp_dtypes)
@pytest.mark.xfail(reason="Unnecessarily upcasts to float64")
def test_iloc_setitem_unnecesssary_float_upcasting():
# GH#12255
df = DataFrame(
{
0: np.array([1, 3], dtype=np.float32),
1: np.array([2, 4], dtype=np.float32),
2: ["a", "b"],
}
)
orig = df.copy()
values = df[0].values.reshape(2, 1)
df.iloc[:, 0:1] = values
tm.assert_frame_equal(df, orig)
@pytest.mark.xfail(reason="unwanted casting to dt64")
def test_12499():
# TODO: OP in GH#12499 used np.datetim64("NaT") instead of pd.NaT,
# which has consequences for the expected df["two"] (though i think at
# the time it might not have because of a separate bug). See if it makes
# a difference which one we use here.
ts = Timestamp("2016-03-01 03:13:22.98986", tz="UTC")
data = [{"one": 0, "two": ts}]
orig = DataFrame(data)
df = orig.copy()
df.loc[1] = [np.nan, NaT]
expected = DataFrame(
{"one": [0, np.nan], "two": Series([ts, NaT], dtype="datetime64[ns, UTC]")}
)
tm.assert_frame_equal(df, expected)
data = [{"one": 0, "two": ts}]
df = orig.copy()
df.loc[1, :] = [np.nan, NaT]
tm.assert_frame_equal(df, expected)
@pytest.mark.xfail(reason="Too many columns cast to float64")
def test_20476():
mi = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])
df = DataFrame(-1, index=range(3), columns=mi)
filler = DataFrame([[1, 2, 3.0]] * 3, index=range(3), columns=["a", "b", "c"])
df["A"] = filler
expected = DataFrame(
{
0: [1, 1, 1],
1: [2, 2, 2],
2: [3.0, 3.0, 3.0],
3: [-1, -1, -1],
4: [-1, -1, -1],
5: [-1, -1, -1],
}
)
expected.columns = mi
exp_dtypes = Series(
[np.dtype(np.int64)] * 2 + [np.dtype(np.float64)] + [np.dtype(np.int64)] * 3,
index=mi,
)
tm.assert_series_equal(df.dtypes, exp_dtypes)
| pandas-dev/pandas | pandas/tests/frame/indexing/test_coercion.py | Python | bsd-3-clause | 5,463 |
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from selection.algorithms.lasso import instance
from selection.algorithms.forward_step import forward_stepwise, info_crit_stop, sequential, data_carving_IC
def test_FS(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n))
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3)
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
print FS.model_quadratic(3)
def test_FS_unknown(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y)
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
def test_subset(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
subset = np.ones(n, np.bool)
subset[-10:] = 0
FS = forward_stepwise(X, Y, subset=subset,
covariance=0.5**2 * np.identity(n))
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3, saturated=True)
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
FS = forward_stepwise(X, Y, subset=subset)
for i in range(k):
FS.next()
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
def test_BIC(k=10, do_sample=True):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = info_crit_stop(Y, X, 0.5, cost=np.log(n))
final_model = len(FS.variables) - 1
if do_sample:
return [p[-1] for p in FS.model_pivots(final_model, saturated=False, burnin=5000, ndraw=5000)]
else:
saturated_pivots = FS.model_pivots(final_model)
return [p[-1] for p in saturated_pivots]
def test_sequential(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
print sequential(X, Y, sigma=0.5, saturated=True)[1]
print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000)[1]
print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000)[1]
# now use a subset of cases
subset = np.ones(n, np.bool)
subset[-10:] = 0
print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000,
subset=subset)[1]
print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000, subset=subset)[1]
def simulate_null(saturated=True):
n, p = 100, 40
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n))
for i in range(5):
FS.next()
return [p[-1] for p in FS.model_pivots(3, saturated=saturated,
use_new=False)]
def test_ecdf(nsim=1000, BIC=False,
saturated=True):
P = []
for _ in range(nsim):
if not BIC:
P.extend(simulate_null(saturated=saturated))
else:
P.extend(test_BIC(do_sample=True))
P = np.array(P)
ecdf = sm.distributions.ECDF(P)
plt.clf()
plt.plot(ecdf.x, ecdf.y, linewidth=4, color='black')
plt.show()
def test_data_carving_IC(n=100,
p=200,
s=7,
sigma=5,
rho=0.3,
snr=7.,
split_frac=0.9,
ndraw=5000,
burnin=1000,
df=np.inf,
coverage=0.90,
compute_intervals=False):
counter = 0
while True:
counter += 1
X, y, beta, active, sigma = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
snr=snr,
df=df)
mu = np.dot(X, beta)
splitn = int(n*split_frac)
indices = np.arange(n)
np.random.shuffle(indices)
stage_one = indices[:splitn]
FS = info_crit_stop(y, X, sigma, cost=np.log(n), subset=stage_one)
if set(range(s)).issubset(FS.active):
results, FS = data_carving_IC(y, X, sigma,
stage_one=stage_one,
splitting=True,
ndraw=ndraw,
burnin=burnin,
coverage=coverage,
compute_intervals=compute_intervals,
cost=np.log(n))
carve = [r[1] for r in results]
split = [r[3] for r in results]
Xa = X[:,FS.variables[:-1]]
truth = np.dot(np.linalg.pinv(Xa), mu)
split_coverage = []
carve_coverage = []
for result, t in zip(results, truth):
_, _, ci, _, si = result
carve_coverage.append((ci[0] < t) * (t < ci[1]))
split_coverage.append((si[0] < t) * (t < si[1]))
return ([carve[j] for j, i in enumerate(FS.active) if i >= s],
[split[j] for j, i in enumerate(FS.active) if i >= s],
[carve[j] for j, i in enumerate(FS.active) if i < s],
[split[j] for j, i in enumerate(FS.active) if i < s],
counter, carve_coverage, split_coverage)
def test_full_pvals(n=100, p=40, rho=0.3, snr=4):
X, y, beta, active, sigma = instance(n=n, p=p, snr=snr, rho=rho)
FS = forward_stepwise(X, y, covariance=sigma**2 * np.identity(n))
from scipy.stats import norm as ndist
pval = []
completed_yet = False
for i in range(min(n, p)):
FS.next()
var_select, pval_select = FS.model_pivots(i+1, alternative='twosided',
which_var=[FS.variables[-1]],
saturated=False,
burnin=2000,
ndraw=8000)[0]
pval_saturated = FS.model_pivots(i+1, alternative='twosided',
which_var=[FS.variables[-1]],
saturated=True)[0][1]
# now, nominal ones
LSfunc = np.linalg.pinv(FS.X[:,FS.variables])
Z = np.dot(LSfunc[-1], FS.Y) / (np.linalg.norm(LSfunc[-1]) * sigma)
pval_nominal = 2 * ndist.sf(np.fabs(Z))
pval.append((var_select, pval_select, pval_saturated, pval_nominal))
if set(active).issubset(np.array(pval)[:,0]) and not completed_yet:
completed_yet = True
completion_index = i + 1
return X, y, beta, active, sigma, np.array(pval), completion_index
| stefanv/selective-inference | selection/algorithms/tests/test_forward_step.py | Python | bsd-3-clause | 8,482 |
# proxy module
from __future__ import absolute_import
from codetools.blocks.analysis import *
| enthought/etsproxy | enthought/blocks/analysis.py | Python | bsd-3-clause | 94 |
from django.conf.urls.defaults import patterns, url
from snippets.base import views
urlpatterns = patterns('',
url(r'^$', views.index, name='base.index'),
url(r'^(?P<startpage_version>[^/]+)/(?P<name>[^/]+)/(?P<version>[^/]+)/'
'(?P<appbuildid>[^/]+)/(?P<build_target>[^/]+)/(?P<locale>[^/]+)/'
'(?P<channel>[^/]+)/(?P<os_version>[^/]+)/(?P<distribution>[^/]+)/'
'(?P<distribution_version>[^/]+)/$', views.fetch_snippets,
name='view_snippets'),
url(r'^admin/base/snippet/preview/', views.preview_empty,
name='base.admin.preview_empty'),
url(r'^admin/base/snippet/(\d+)/preview/', views.preview_snippet,
name='base.admin.preview_snippet'),
url(r'^admin/base/snippettemplate/(\d+)/variables/',
views.admin_template_json, name='base.admin.template_json'),
)
| Osmose/snippets-service-prototype | snippets/base/urls.py | Python | bsd-3-clause | 836 |
import os
import numpy as np
import tables
import galry.pyplot as plt
from galry import Visual, process_coordinates, get_next_color, get_color
from qtools import inthread
MAXSIZE = 5000
CHANNEL_HEIGHT = .25
class MultiChannelVisual(Visual):
def initialize(self, x=None, y=None, color=None, point_size=1.0,
position=None, nprimitives=None, index=None,
color_array_index=None, channel_height=CHANNEL_HEIGHT,
options=None, autocolor=None):
position, shape = process_coordinates(x=x, y=y)
# register the size of the data
self.size = np.prod(shape)
# there is one plot per row
if not nprimitives:
nprimitives = shape[0]
nsamples = shape[1]
else:
nsamples = self.size // nprimitives
# register the bounds
if nsamples <= 1:
self.bounds = [0, self.size]
else:
self.bounds = np.arange(0, self.size + 1, nsamples)
# automatic color with color map
if autocolor is not None:
if nprimitives <= 1:
color = get_next_color(autocolor)
else:
color = np.array([get_next_color(i + autocolor) for i in xrange(nprimitives)])
# set position attribute
self.add_attribute("position0", ndim=2, data=position, autonormalizable=True)
index = np.array(index)
self.add_index("index", data=index)
if color_array_index is None:
color_array_index = np.repeat(np.arange(nprimitives), nsamples)
color_array_index = np.array(color_array_index)
ncolors = color.shape[0]
ncomponents = color.shape[1]
color = color.reshape((1, ncolors, ncomponents))
dx = 1. / ncolors
offset = dx / 2.
self.add_texture('colormap', ncomponents=ncomponents, ndim=1, data=color)
self.add_attribute('index', ndim=1, vartype='int', data=color_array_index)
self.add_varying('vindex', vartype='int', ndim=1)
self.add_uniform('nchannels', vartype='float', ndim=1, data=float(nprimitives))
self.add_uniform('channel_height', vartype='float', ndim=1, data=channel_height)
self.add_vertex_main("""
vec2 position = position0;
position.y = channel_height * position.y + .9 * (2 * index - (nchannels - 1)) / (nchannels - 1);
vindex = index;
""")
self.add_fragment_main("""
float coord = %.5f + vindex * %.5f;
vec4 color = texture1D(colormap, coord);
out_color = color;
""" % (offset, dx))
# add point size uniform (when it's not specified, there might be some
# bugs where its value is obtained from other datasets...)
self.add_uniform("point_size", data=point_size)
self.add_vertex_main("""gl_PointSize = point_size;""")
def get_view(total_size, xlim, freq):
"""Return the slice of the data.
Arguments:
* xlim: (x0, x1) of the window currently displayed.
"""
# Viewport.
x0, x1 = xlim
d = x1 - x0
dmax = duration
zoom = max(dmax / d, 1)
view_size = total_size / zoom
step = int(np.ceil(view_size / MAXSIZE))
# Extended viewport for data.
x0ex = np.clip(x0 - 3 * d, 0, dmax)
x1ex = np.clip(x1 + 3 * d, 0, dmax)
i0 = np.clip(int(np.round(x0ex * freq)), 0, total_size)
i1 = np.clip(int(np.round(x1ex * freq)), 0, total_size)
return (x0ex, x1ex), slice(i0, i1, step)
def get_undersampled_data(data, xlim, slice):
"""
Arguments:
* data: a HDF5 dataset of size Nsamples x Nchannels.
* xlim: (x0, x1) of the current data view.
"""
# total_size = data.shape[0]
# Get the view slice.
# x0ex, x1ex = xlim
# x0d, x1d = x0ex / (duration_initial) * 2 - 1, x1ex / (duration_initial) * 2 - 1
# Extract the samples from the data (HDD access).
samples = data[slice, :]
# Convert the data into floating points.
samples = np.array(samples, dtype=np.float32)
# Normalize the data.
samples *= (1. / 65535)
# samples *= .25
# Size of the slice.
nsamples, nchannels = samples.shape
# Create the data array for the plot visual.
M = np.empty((nsamples * nchannels, 2))
samples = samples.T# + np.linspace(-1., 1., nchannels).reshape((-1, 1))
M[:, 1] = samples.ravel()
# Generate the x coordinates.
x = np.arange(slice.start, slice.stop, slice.step) / float(total_size - 1)
# [0, 1] -> [-1, 2*duration.duration_initial - 1]
x = x * 2 * duration / duration_initial - 1
M[:, 0] = np.tile(x, nchannels)
# Update the bounds.
bounds = np.arange(nchannels + 1) * nsamples
size = bounds[-1]
return M, bounds, size
@inthread
class DataUpdater(object):
info = {}
def update(self, data, xlimex, slice):
samples, bounds, size = get_undersampled_data(data, xlimex, slice)
nsamples = samples.shape[0]
color_array_index = np.repeat(np.arange(nchannels), nsamples / nchannels)
self.info = dict(position0=samples, bounds=bounds, size=size,
index=color_array_index)
dir = os.path.dirname(os.path.abspath(__file__))
try:
filename = r"test_data/n6mab031109.h5"
f = tables.openFile(os.path.join(dir, filename))
except:
filename = r"test_data/n6mab031109.trim.h5"
f = tables.openFile(os.path.join(dir, filename))
try:
data = f.root.RawData
except:
data = f.root.raw_data
nsamples, nchannels = data.shape
total_size = nsamples
freq = 20000.
dt = 1. / freq
duration = (data.shape[0] - 1) * dt
duration_initial = 5.
x = np.tile(np.linspace(0., duration, nsamples // MAXSIZE), (nchannels, 1))
y = np.zeros_like(x)+ np.linspace(-.9, .9, nchannels).reshape((-1, 1))
plt.figure(toolbar=False, show_grid=True)
plt.visual(MultiChannelVisual, x=x, y=y)
updater = DataUpdater(impatient=True)
SLICE = None
def change_channel_height(figure, parameter):
global CHANNEL_HEIGHT
CHANNEL_HEIGHT *= (1 + parameter)
figure.set_data(channel_height=CHANNEL_HEIGHT)
def pan(figure, parameter):
figure.process_interaction('Pan', parameter)
def anim(figure, parameter):
# Constrain the zoom.
nav = figure.get_processor('navigation')
nav.constrain_navigation = True
nav.xmin = -1
nav.xmax = 2 * duration / duration_initial
nav.sxmin = 1.
zoom = nav.sx
box = nav.get_viewbox()
xlim = ((box[0] + 1) / 2. * (duration_initial), (box[2] + 1) / 2. * (duration_initial))
xlimex, slice = get_view(data.shape[0], xlim, freq)
# Paging system.
dur = xlim[1] - xlim[0]
index = int(np.floor(xlim[0] / dur))
zoom_index = int(np.round(duration_initial / dur))
i = (index, zoom_index)
global SLICE
if i != SLICE:
SLICE = i
updater.update(data, xlimex, slice)
if updater.info:
figure.set_data(**updater.info)
updater.info.clear()
plt.animate(anim, dt=.01)
plt.action('Wheel', change_channel_height, key_modifier='Control',
param_getter=lambda p: p['wheel'] * .001)
plt.action('Wheel', pan, key_modifier='Shift',
param_getter=lambda p: (p['wheel'] * .002, 0))
plt.action('DoubleClick', 'ResetZoom')
plt.xlim(0., duration_initial)
plt.show()
f.close()
| rossant/spiky | experimental/ephyview.py | Python | bsd-3-clause | 7,411 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Fare'
db.create_table('gtfs_fare', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gtfs.Source'], null=True)),
('fare_id', self.gf('django.db.models.fields.CharField')(max_length=20, db_index=True)),
('price', self.gf('django.db.models.fields.FloatField')()),
('currency_type', self.gf('django.db.models.fields.CharField')(max_length=3)),
('payment_method', self.gf('django.db.models.fields.IntegerField')()),
('transfers', self.gf('django.db.models.fields.IntegerField')(null=True)),
('transfer_duration', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('gtfs', ['Fare'])
# Adding unique constraint on 'Fare', fields ['source', 'fare_id']
db.create_unique('gtfs_fare', ['source_id', 'fare_id'])
# Adding unique constraint on 'Shape', fields ['source', 'shape_id']
db.create_unique('gtfs_shape', ['source_id', 'shape_id'])
# Adding field 'Zone.source'
db.add_column('gtfs_zone', 'source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gtfs.Source'], null=True), keep_default=False)
# Adding unique constraint on 'Zone', fields ['source', 'zone_id']
db.create_unique('gtfs_zone', ['source_id', 'zone_id'])
# Deleting field 'FareRule.payment_method'
db.delete_column('gtfs_farerule', 'payment_method')
# Deleting field 'FareRule.price'
db.delete_column('gtfs_farerule', 'price')
# Deleting field 'FareRule.currency_type'
db.delete_column('gtfs_farerule', 'currency_type')
# Deleting field 'FareRule.transfer_duration'
db.delete_column('gtfs_farerule', 'transfer_duration')
# Deleting field 'FareRule.transfers'
db.delete_column('gtfs_farerule', 'transfers')
# Deleting field 'FareRule.farerule_id'
db.delete_column('gtfs_farerule', 'farerule_id')
# Deleting field 'FareRule.agency'
db.delete_column('gtfs_farerule', 'agency_id')
# Adding field 'FareRule.fare'
db.add_column('gtfs_farerule', 'fare', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['gtfs.Fare']), keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Zone', fields ['source', 'zone_id']
db.delete_unique('gtfs_zone', ['source_id', 'zone_id'])
# Removing unique constraint on 'Shape', fields ['source', 'shape_id']
db.delete_unique('gtfs_shape', ['source_id', 'shape_id'])
# Removing unique constraint on 'Fare', fields ['source', 'fare_id']
db.delete_unique('gtfs_fare', ['source_id', 'fare_id'])
# Deleting model 'Fare'
db.delete_table('gtfs_fare')
# Deleting field 'Zone.source'
db.delete_column('gtfs_zone', 'source_id')
# User chose to not deal with backwards NULL issues for 'FareRule.payment_method'
raise RuntimeError("Cannot reverse this migration. 'FareRule.payment_method' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.price'
raise RuntimeError("Cannot reverse this migration. 'FareRule.price' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.currency_type'
raise RuntimeError("Cannot reverse this migration. 'FareRule.currency_type' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.transfer_duration'
raise RuntimeError("Cannot reverse this migration. 'FareRule.transfer_duration' and its values cannot be restored.")
# Adding field 'FareRule.transfers'
db.add_column('gtfs_farerule', 'transfers', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'FareRule.farerule_id'
raise RuntimeError("Cannot reverse this migration. 'FareRule.farerule_id' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.agency'
raise RuntimeError("Cannot reverse this migration. 'FareRule.agency' and its values cannot be restored.")
# Deleting field 'FareRule.fare'
db.delete_column('gtfs_farerule', 'fare_id')
models = {
'gtfs.agency': {
'Meta': {'unique_together': "(('source', 'agency_id'),)", 'object_name': 'Agency'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.TextField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'gtfs.block': {
'Meta': {'unique_together': "(('source', 'block_id'),)", 'object_name': 'Block'},
'block_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.calendar': {
'Meta': {'object_name': 'Calendar'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'service': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gtfs.Service']", 'unique': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gtfs.calendardate': {
'Meta': {'object_name': 'CalendarDate'},
'date': ('django.db.models.fields.DateField', [], {}),
'exception_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"})
},
'gtfs.fare': {
'Meta': {'unique_together': "(('source', 'fare_id'),)", 'object_name': 'Fare'},
'currency_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'fare_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.IntegerField', [], {}),
'price': ('django.db.models.fields.FloatField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'transfer_duration': ('django.db.models.fields.IntegerField', [], {}),
'transfers': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'gtfs.farerule': {
'Meta': {'object_name': 'FareRule'},
'contains': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_contains'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_destinations'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'fare': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Fare']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_origins'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Route']", 'null': 'True'})
},
'gtfs.frequency': {
'Meta': {'object_name': 'Frequency'},
'end_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'end_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'headway_secs': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Trip']"})
},
'gtfs.route': {
'Meta': {'unique_together': "(('agency', 'route_id'),)", 'object_name': 'Route'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Agency']", 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.TextField', [], {}),
'route_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'route_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'text_color': ('django.db.models.fields.TextField', [], {'max_length': '6', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'})
},
'gtfs.service': {
'Meta': {'unique_together': "(('source', 'service_id'),)", 'object_name': 'Service'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.shape': {
'Meta': {'unique_together': "(('source', 'shape_id'),)", 'object_name': 'Shape'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.contrib.gis.db.models.fields.LineStringField', [], {'null': 'True'}),
'shape_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'gtfs.stop': {
'Meta': {'unique_together': "(('source', 'stop_id'),)", 'object_name': 'Stop'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'location_type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'parent_station': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Stop']", 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'stop_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Zone']", 'null': 'True'})
},
'gtfs.stoptime': {
'Meta': {'object_name': 'StopTime'},
'arrival_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'arrival_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'departure_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'departure_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'drop_off_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickup_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shape_dist_travelled': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Stop']"}),
'stop_headsign': ('django.db.models.fields.TextField', [], {}),
'stop_sequence': ('django.db.models.fields.IntegerField', [], {}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Trip']"})
},
'gtfs.transfer': {
'Meta': {'object_name': 'Transfer'},
'from_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfer_from_stop'", 'to': "orm['gtfs.Stop']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_transfer_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'to_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfer_to_stop'", 'to': "orm['gtfs.Stop']"}),
'transfer_type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'gtfs.trip': {
'Meta': {'unique_together': "(('service', 'trip_id'), ('route', 'trip_id'))", 'object_name': 'Trip'},
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Block']", 'null': 'True'}),
'direction_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'headsign': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Route']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Shape']", 'null': 'True'}),
'short_name': ('django.db.models.fields.TextField', [], {}),
'trip_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'gtfs.zone': {
'Meta': {'unique_together': "(('source', 'zone_id'),)", 'object_name': 'Zone'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'zone_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'})
}
}
complete_apps = ['gtfs']
| rcoup/traveldash | traveldash/gtfs/migrations/0011_auto__add_fare__add_unique_fare_source_fare_id__add_unique_shape_sourc.py | Python | bsd-3-clause | 16,927 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file is the main file used when running tests with pytest directly,
# in particular if running e.g. ``pytest docs/``.
from importlib.util import find_spec
import os
import pkg_resources
import tempfile
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES
except ImportError:
PYTEST_HEADER_MODULES = {}
import astropy
if find_spec('asdf') is not None:
from asdf import __version__ as asdf_version
if asdf_version >= astropy.__minimum_asdf_version__:
entry_points = []
for entry_point in pkg_resources.iter_entry_points('pytest11'):
entry_points.append(entry_point.name)
if "asdf_schema_tester" not in entry_points:
pytest_plugins += ['asdf.tests.schema_tester']
PYTEST_HEADER_MODULES['Asdf'] = 'asdf'
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
| MSeifert04/astropy | conftest.py | Python | bsd-3-clause | 1,526 |
#!/usr/bin/python
# Code is executed top-to-bottom on load.
# Variables are defined at the first assignment
a = 2 # defines `a`
b = 2
# 'print' operator, simple form: just prints out human-readable representation
# of the argument. NOTE: no \n!
print a + b
# Types in Python are dynamic!
v = 42 # `v` is an integer
print v
v = 0.42 # now it's a float
print v
v = 2**76 # NEW: Loooong integers are supported!
print v
v = 4 + 0.2j # NEW: complex numbers!
print v
v = "almost but not quite entirely unlike tea" # now it's a string
print v
# 'print' operator, full form.
print "%d %.1f %s" % (42, 4.2, "forty two")
# non-optimal equivalent:
print str(42) + " " + str(4.2) + " forty two"
| denfromufa/mipt-course | demos/python/2_variables_and_types.py | Python | bsd-3-clause | 694 |
"""
fabcloudkit
Functions for managing Nginx.
This module provides functions that check for installation, install, and manage an
installation of, Nginx.
/etc/init.d/nginx:
The "init-script" that allows Nginx to be run automatically at system startup.
The existence of this file is verified, but it's assumed that the script is
installed by the package manager that installed Nginx.
/etc/nginx/nginx.conf:
The main or root Nginx configuration file. This file is loaded by Nginx when
it launches. The file contains an include directive that tells Nginx to
load additional configurations from a different directory.
Currently, this code writes a very basic nginx.conf file.
/etc/nginx/conf.d/:
The directory marked by the include directive in the nginx root configuration
file. Individual server configurations are stored in files in this folder.
/etc/nginx/conf.g/*.conf:
Individual server configuration files.
<deploy_root>/<name>/logs/ngaccess.log, ngerror.log:
Default location of the access (ngaccess.log) and error (ngerror.log) log files
for a specific server configuration. This location can be overridden in the call
to write_server_config().
For more information on Nginx check out: http://nginx.org, http://wiki.nginx.org
:copyright: (c) 2013 by Rick Bohrer.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# standard
import posixpath as path
# pypi
from fabric.operations import run, sudo
# package
from fabcloudkit import cfg, put_string
from ..internal import *
from ..toolbase import Tool, SimpleTool
class NginxTool(Tool):
def __init__(self):
super(NginxTool,self).__init__()
self._simple = SimpleTool.create('nginx')
def check(self, **kwargs):
return self._simple.check()
def install(self, **kwargs):
# install Nginx using the package manager.
self._simple.install()
start_msg('----- Configuring "Nginx":')
# verify that there's an init-script.
result = run('test -f /etc/init.d/nginx')
if result.failed:
raise HaltError('Uh oh. Package manager did not install an Nginx init-script.')
# write nginx.conf file.
dest = path.join(cfg().nginx_conf, 'nginx.conf')
message('Writing "nginx.conf"')
put_string(_NGINX_CONF, dest, use_sudo=True)
# the Amazon Linux AMI uses chkconfig; the init.d script won't do the job by itself.
# set Nginx so it can be managed by chkconfig; and turn on boot startup.
result = run('which chkconfig')
if result.succeeded:
message('System has chkconfig; configuring.')
result = sudo('chkconfig --add nginx')
if result.failed:
raise HaltError('"chkconfig --add nginx" failed.')
result = sudo('chkconfig nginx on')
if result.failed:
raise HaltError('"chkconfig nginx on" failed.')
succeed_msg('Successfully installed and configured "Nginx".')
return self
def write_config(self, name, server_names, proxy_pass, static_locations='', log_root=None, listen=80):
"""
Writes an Nginx server configuration file.
This function writes a specific style of configuration, that seems to be somewhat common, where
Nginx is used as a reverse-proxy for a locally-running (e.g., WSGI) server.
:param name: identifies the server name; used to name the configuration file.
:param server_names:
:param proxy_pass: identifies the local proxy to which Nginx will pass requests.
"""
start_msg('----- Writing Nginx server configuration for "{0}":'.format(name))
# be sure the log directory exists.
if log_root is None:
log_root = path.join(cfg().deploy_root, name, 'logs')
result = sudo('mkdir -p {0}'.format(log_root))
if result.failed:
raise HaltError('Unable to create log directory: "{0}"'.format(log_root))
# generate and write the configuration file.
server_config = _NGINX_SERVER_CONF.format(**locals())
dest = path.join(cfg().nginx_include_conf, '{name}.conf'.format(**locals()))
message('Writing to file: "{0}"'.format(dest))
put_string(server_config, dest, use_sudo=True)
succeed_msg('Wrote conf file for "{0}".'.format(name))
return self
def delete_config(self, name):
start_msg('----- Deleting server configuration for "{0}":'.format(name))
# delete the file, but ignore any errors.
config_name = '{name}.conf'.format(**locals())
result = sudo('rm -f {0}'.format(path.join(cfg().nginx_include_conf, config_name)))
if result.failed:
failed_msg('Ignoring failed attempt to delete configuration "{0}"'.format(config_name))
else:
succeed_msg('Successfully deleted configuration "{0}".'.format(config_name))
return self
def reload(self):
start_msg('----- Telling "Nginx" to reload configuration:')
result = sudo('/etc/init.d/nginx reload')
if result.failed:
raise HaltError('"Nginx" configuration reload failed ({0})'.format(result))
succeed_msg('Successfully reloaded.')
return self
# register.
Tool.__tools__['nginx'] = NginxTool
_NGINX_SERVER_CONF = """
server {{
listen {listen};
server_name {server_names};
access_log {log_root}/ngaccess.log;
error_log {log_root}/ngerror.log;
location / {{
proxy_pass {proxy_pass};
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
}}
{static_locations}
}}
""".lstrip()
_NGINX_CONF = """
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}
""".lstrip()
| waxkinetic/fabcloudkit | fabcloudkit/tool/nginx.py | Python | bsd-3-clause | 6,961 |
import emission.analysis.modelling.tour_model.data_preprocessing as preprocess
# to determine if the user is valid:
# valid user should have >= 10 trips for further analysis and the proportion of filter_trips is >=50%
def valid_user(filter_trips,trips):
valid = False
if len(filter_trips) >= 10 and len(filter_trips) / len(trips) >= 0.5:
valid = True
return valid
# - user_ls: a list of strings representing short user names, such as [user1, user2, user3...]
# - valid_user_ls: a subset of `user_ls` for valid users, so also string representation of user names
# - all_users: a collection of all user ids, in terms of user id objects
def get_user_ls(all_users,radius):
user_ls = []
valid_user_ls = []
for i in range(len(all_users)):
curr_user = 'user' + str(i + 1)
user = all_users[i]
trips = preprocess.read_data(user)
filter_trips = preprocess.filter_data(trips,radius)
if valid_user(filter_trips,trips):
valid_user_ls.append(curr_user)
user_ls.append(curr_user)
else:
user_ls.append(curr_user)
continue
return user_ls,valid_user_ls
| e-mission/e-mission-server | emission/analysis/modelling/tour_model/get_users.py | Python | bsd-3-clause | 1,172 |
from django.shortcuts import render_to_response, get_object_or_404
from django.http import Http404
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.dates import YearArchiveView, MonthArchiveView,\
DateDetailView
from .models import Article, Section
class ArticleListView(ListView):
template = "news/article_list.html"
paginate_by = 5
def get_queryset(self):
return Article.objects.published()
def get_context_data(self, **kwargs):
context = super(ArticleListView, self).get_context_data(**kwargs)
context['section_list'] = Section.objects.all()
return context
class ArticleDateDetailView(DateDetailView):
date_field = "published"
template = "news/article_detail.html"
def get_queryset(self):
return Article.objects.published()
def get_context_data(self, **kwargs):
# import ipdb; ipdb.set_trace()
context = super(ArticleDateDetailView, self).get_context_data(**kwargs)
context['section_list'] = Section.objects.all()
return context
class ArticleDetailView(DetailView):
queryset = Article.objects.published()
template = "news/post_detail.html"
def get_context_data(self, **kwargs):
context = super(ArticleDetailView, self).get_context_data(**kwargs)
context['section_list'] = Section.objects.all()
return context
class SectionListView(ListView):
queryset = Section.objects.all()
template = "news/section_list.html"
class SectionDetailView(DetailView):
queryset = Section.objects.all()
template = "news/section_detail.html"
class ArticleYearArchiveView(YearArchiveView):
queryset = Article.objects.published()
date_field = "published"
make_object_list = True
template = "news/post_archive_year.html"
class ArticleMonthArchiveView(MonthArchiveView):
queryset = Article.objects.all()
date_field = "published"
make_object_list = True
template = "news/post_archive_month.html"
| ilendl2/chrisdev-cookiecutter | {{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/news/views.py | Python | bsd-3-clause | 2,060 |
"""
Module to create topo and qinit data files for this example.
"""
from clawpack.geoclaw import topotools
from pylab import *
def maketopo_hilo():
x = loadtxt('x.txt')
y = loadtxt('y.txt')
z = loadtxt('z.txt')
# modify x and y so that cell size is truly uniform:
dx = 1. / (3.*3600.) # 1/3"
xx = linspace(x[0], x[-1], len(x))
yy = linspace(y[-1], y[0], len(y))
zz = flipud(z)
topo = topotools.Topography()
topo.x = xx
topo.y = yy
topo.Z = zz
topo.write('hilo_flattened.tt2',topo_type=2)
def maketopo_flat():
"""
Output topography file for the entire domain
"""
nxpoints = 201
nypoints = 301
xlower = 204.812
xupper = 205.012
ylower = 19.7
yupper = 20.0
outfile= "flat.tt2"
topotools.topo2writer(outfile,topo_flat,xlower,xupper,ylower,yupper,nxpoints,nypoints)
def topo_flat(x,y):
"""
flat
"""
z = where(x < 204.91213, 30., -30.)
return z
def plot_topo_big():
figure(figsize=(8,12))
topo1 = topotools.Topography()
topo1.read('flat.tt2',2)
contourf(topo1.x,topo1.y,topo1.Z,linspace(-30,20,51), extend='both')
topo2 = topotools.Topography()
topo2.read('hilo_flattened.tt2',2)
contourf(topo2.x,topo2.y,topo2.Z,linspace(-30,20,51), extend='both')
x1 = 204.90028
x2 = 204.96509
y1 = 19.71
y2 = 19.95
plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1],'w')
axis('scaled')
colorbar()
def plot_topo():
figure(figsize=(12,8))
topo1 = topotools.Topography()
topo1.read('flat.tt2',2)
contourf(topo1.x,topo1.y,topo1.Z,linspace(-30,20,51), extend='both')
topo2 = topotools.Topography()
topo2.read('hilo_flattened.tt2',2)
contourf(topo2.x,topo2.y,topo2.Z,linspace(-30,20,51), extend='both')
colorbar()
x1 = 204.9
x2 = 204.955
y1 = 19.715
y2 = 19.755
axis([x1,x2,y1,y2])
gca().set_aspect(1./cos(y1*pi/180.))
ticklabel_format(format='plain',useOffset=False)
contour(topo2.x,topo2.y,topo2.Z,[0.],colors='k')
plot([204.9447],[19.7308], 'ko') # from BM description
plot([204.9437],[19.7307], 'ro') # closer to pier
# from <http://tidesandcurrents.noaa.gov/stationhome.html?id=1617760>
# location is listed as: 19 degrees 43.8' N, 155 degrees, 3.3' W
xg = 360 - (155 + 3.3/60.)
yg = 19 + 43.8/60.
plot([xg],[yg], 'bo')
#gauges.append([1125, 204.91802, 19.74517, 0., 1.e9]) #Hilo
#gauges.append([1126, 204.93003, 19.74167, 0., 1.e9]) #Hilo
#gauges.append([3333, 204.93, 19.7576, 0., 1.e9])
if __name__=='__main__':
maketopo_hilo()
maketopo_flat()
| rjleveque/tsunami_benchmarks | nthmp_currents_2015/problem2/maketopo.py | Python | bsd-3-clause | 2,646 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Difference/trend_Lag1Trend/cycle_7/ar_12/test_artificial_128_Difference_Lag1Trend_7_12_20.py | Python | bsd-3-clause | 266 |
import random
import time
import sys
import Csound
import subprocess
import base64
import hashlib
import matrixmusic
csd = None
oscillator = None
buzzer = None
voice = None
truevoice = None
song_publisher = None
def add_motif(instrument, req):
global csd
time = req.motif_start_time
for note in req.score:
if note != "P":
csd.score(instrument.note(time,
req.note_duration,
note,
req.motif_amplitude))
time += req.internote_delay
def handle_create_song(req):
global csd, oscillator, buzzer, voice
global song_publisher
s = 'temp'
csd = Csound.CSD('%s.csd' % s)
csd.orchestra(oscillator, buzzer, voice)
for motif in req.motifs:
if motif.instrument == 'oscil':
add_motif(oscillator, motif)
elif motif.instrument == 'buzzer':
add_motif(buzzer, motif)
elif motif.instrument == 'voice':
add_motif(voice, motif)
csd.output()
args = ['csound', '-d', '%s.csd' % s]
subprocess.call(args)
f = open('%s.csd' % s)
csd_string = f.read()
f.close()
song_name = '%s.ogg' % req.song_name
args = ['oggenc', '-o', song_name, '%s.wav' % s]
subprocess.call(args)
args = ['vorbiscomment', '-a', song_name,
'-t', "ARTIST=%s" % req.artist,
'-t', "TITLE=%s" % req.song_name,
'-t', "ALBUM=%s" % req.album,
'-t', "GENRE=%s" % 'Electronica',
'-t', "CSOUND=%s" % csd_string]
subprocess.call(args)
args = ['ogg123', song_name]
subprocess.call(args)
class Motif(object):
def __init__(self, motif_start_time, motif_repeat, motif_amplitude, score, note_duration, internote_delay, instrument):
self.motif_start_time = motif_start_time
self.motif_repeat = motif_repeat
self.motif_amplitude = motif_amplitude
self.score = score
self.note_duration = note_duration
self.internote_delay = internote_delay
self.instrument = instrument
class Request(object):
def __init__(self, song_name, artist, album, motifs):
self.song_name = song_name
self.artist = artist
self.album = album
self.motifs = motifs
def heads():
return (random.random() < 0.5)
def biasedFlip(p):
return (random.random() < p)
def selectInstrument():
if heads():
return 'oscil'
else:
return 'buzzer'
def selectInterval():
return 0.15, 0.05
def triggerCreate(song_name, artist, album, motifs):
handle_create_song(Request(song_name, artist, album, motifs))
def random_note():
bases = ["A", "B", "C", "D", "E", "F", "G"]
unsharpable = ["E", "B"]
unflatable = ["C", "F"]
octaves = map(str, range(2,6))
mods = ["", "#"]
base = random.choice(bases)
mods = [""]
if not base in unsharpable:
mods.append("#")
mod = random.choice(mods)
octave = random.choice(octaves)
return base + mod + octave
def random_motif(start_time):
#notes = " ".join([random_note() for i in range(10)])
#notes = "A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5 A5 B5 D6 E6 F#6 P".split(" ")
notes = "C3 C#3 E3 F3 G3 G#3 B4 C4 C#4 E4 F4 G4 G#4".split(" ")
score = matrixmusic.create_pair_score(notes, 15) * 5
print("Random score: " + str(score))
opts = [("voice", 1.0, 1.5),
#("oscil", 1.0, 1.5),
("voice", 3.0, 1.5)]
#("oscil", 3.0, 1.5)]
opt = random.choice(opts)
return Motif(start_time, 12, 0.05, score, opt[1], opt[2], opt[0])
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <artist> <album name>" % sys.argv[0]
exit()
else:
artist = sys.argv[1]
album = sys.argv[2]
global song_publisher, oscillator, buzzer, voice
oscillator = Csound.oscil()
buzzer = Csound.buzz()
voice = Csound.fmvoice()
#voice = Csound.voice()
for i in xrange(1, 16384):
song_title = "song_%d" % i
#motifs = [ Motif(0.0, 12, 0.32, "A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5 A5 B5 D6 E6 F#6", 0.15, 0.05, selectInstrument()) ]
motifs = [random_motif(i*0.8) for i in range(3)]
# if biasedFlip(0.8):
# motifs.append(Motif(3.0, 10, 0.32, "A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5 A5 B5 D6 E6 F#6", a, b, selectInstrument()))
# if biasedFlip(0.9):
# motifs.append(Motif(6.0, 4, 0.10, "A2 B2 D3 D3 F#3 A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5", 0.3, 0.1, selectInstrument()))
triggerCreate(song_title, artist, album, motifs)
print "Created song %s" % song_title
time.sleep(10)
| andrewtron3000/jampy | generator_matrix.py | Python | bsd-3-clause | 4,610 |
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from pylons.templating import render_mako_def
from kai.lib.base import BaseController, render
from kai.lib.helpers import textilize
from kai.lib.serialization import render_feed
from kai.model import Comment
log = logging.getLogger(__name__)
class CommentsController(BaseController):
def preview(self):
data = request.POST['content']
return textilize(data)
def create(self, doc_id):
if not c.user:
abort(401)
# Ensure the doc exists
doc = self.db.get(doc_id)
if not doc:
abort(404)
comment = Comment(doc_id=doc_id, displayname=c.user.displayname,
email=c.user.email, human_id=c.user.id,
content=request.POST['content'])
comment.store(self.db)
return ''
def delete(self, id):
if not c.user or not c.user.in_group('admin'):
abort(401)
# Ensure doc exists
doc = self.db.get(id)
if not doc:
abort(404)
# Make sure its a comment
if not doc['type'] == 'Comment':
abort(404)
self.db.delete(doc)
return ''
def index(self, format='html'):
if format == 'html':
abort(404)
elif format in ['atom', 'rss']:
# Pull comments and grab the docs with them for their info
comments = list(Comment.by_anytime(c.db, descending=True, limit=20))
commentdata = []
for comment_doc in comments:
comment = {}
displayname = comment_doc.displayname or 'Anonymous'
comment['created'] = comment_doc.created
id = comment_doc.id
doc = c.db.get(comment_doc.doc_id)
if doc['type'] == 'Traceback':
comment['title'] = '%s: %s' % (doc['exception_type'], doc['exception_value'])
else:
comment['title'] = doc.get('title', '-- No title --')
comment['type'] = doc['type']
comment['link'] = render_mako_def(
'/widgets.mako', 'comment_link', title=comment['title'],
comment_id=comment_doc.id, doc=doc, type=doc['type'],
urlonly=True).strip()
comment['doc_id'] = comment_doc.doc_id
comment['description'] = textilize(comment_doc.content)
commentdata.append(comment)
response.content_type = 'application/atom+xml'
return render_feed(
title="PylonsHQ Comment Feed", link=url.current(qualified=True),
description="Recent PylonsHQ comments", objects=commentdata,
pub_date='created')
| Pylons/kai | kai/controllers/comments.py | Python | bsd-3-clause | 2,956 |
class Gadgets(object):
"""
A Gadgets object providing managing of various gadgets for display on analytics dashboard.
Gadgets are registered with the Gadgets using the register() method.
"""
def __init__(self):
self._registry = {} # gadget hash -> gadget object.
def get_gadget(self, id):
return self._registry[id]
def get_gadgets(self):
return self._registry.values()
def register(self, gadget):
"""
Registers a gadget object.
If a gadget is already registered, this will raise AlreadyRegistered.
"""
self._registry[gadget.id] = gadget
gadgets = Gadgets()
| praekelt/django-analytics | analytics/sites.py | Python | bsd-3-clause | 657 |
#!/usr/bin/env python
#
# Written by Chema Garcia (aka sch3m4)
# Contact: [email protected] || http://safetybits.net || @sch3m4
#
import serial.tools.list_ports
from SerialCrypt import Devices
def locateDevice(devid):
'''
Returns the serial port path of the arduino if found, or None if it isn't connected
'''
retval = None
for port in serial.tools.list_ports.comports():
if port[2][:len(devid)] == devid:
retval = port[0]
break
return retval
def main():
print "HSM Device: %s" % locateDevice ( Devices.DEVICE_CRYPT_ID )
print "uToken Device: %s" % locateDevice ( Devices.DEVICE_UTOKEN_ID )
print "Debug Device: %s" % locateDevice ( Devices.DEVICE_DEBUG_ID )
if __name__ == "__main__":
main()
| sch3m4/SerialCrypt | apps/locate.py | Python | bsd-3-clause | 724 |
# -*- coding: utf-8 -*-
"""
Display number of scratchpad windows and urgency hints.
Configuration parameters:
cache_timeout: refresh interval for i3-msg or swaymsg (default 5)
format: display format for this module
(default "\u232b [\?color=scratchpad {scratchpad}]")
thresholds: specify color thresholds to use
(default [(0, "darkgray"), (1, "violet")])
Format placeholders:
{scratchpad} number of scratchpads
{urgent} number of urgent scratchpads
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Optional:
i3ipc: an improved python library to control i3wm and sway
Examples:
```
# hide zero scratchpad
scratchpad {
format = '[\?not_zero \u232b [\?color=scratchpad {scratchpad}]]'
}
# hide non-urgent scratchpad
scratchpad {
format = '[\?not_zero \u232b {urgent}]'
}
# bring up scratchpads on clicks
scratchpad {
on_click 1 = 'scratchpad show'
}
# add more colors
scratchpad {
thresholds = [
(0, "darkgray"), (1, "violet"), (2, "deepskyblue"), (3, "lime"),
(4, "yellow"), (5, "orange"), (6, "red"), (7, "tomato"),
]
}
```
@author shadowprince (counter), cornerman (async)
@license Eclipse Public License (counter), BSD (async)
SAMPLE OUTPUT
[{'full_text': '\u232b '}, {'full_text': u'0', 'color': '#a9a9a9'}]
violet
[{'full_text': '\u232b '}, {'full_text': u'5', 'color': '#ee82ee'}]
urgent
[{'full_text': '\u232b URGENT 1', 'urgent': True}]
"""
STRING_ERROR = "invalid ipc `{}`"
class Ipc:
"""
"""
def __init__(self, parent):
self.parent = parent
self.setup(parent)
class I3ipc(Ipc):
"""
i3ipc - an improved python library to control i3wm and sway
"""
def setup(self, parent):
from threading import Thread
self.parent.cache_timeout = self.parent.py3.CACHE_FOREVER
self.scratchpad_data = {"scratchpad": 0, "urgent": 0}
t = Thread(target=self.start)
t.daemon = True
t.start()
def start(self):
from i3ipc import Connection
i3 = Connection()
self.update(i3)
for event in ["window::move", "window::urgent"]:
i3.on(event, self.update)
i3.main()
def update(self, i3, event=None):
leaves = i3.get_tree().scratchpad().leaves()
temporary = {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum(window.urgent for window in leaves),
}
if self.scratchpad_data != temporary:
self.scratchpad_data = temporary
self.parent.py3.update()
def get_scratchpad_data(self):
return self.scratchpad_data
class Msg(Ipc):
"""
i3-msg - send messages to i3 window manager
swaymsg - send messages to sway window manager
"""
def setup(self, parent):
from json import loads
self.json_loads = loads
wm_msg = {"i3msg": "i3-msg"}.get(parent.ipc, parent.ipc)
self.tree_command = [wm_msg, "-t", "get_tree"]
def get_scratchpad_data(self):
tree = self.json_loads(self.parent.py3.command_output(self.tree_command))
leaves = self.find_scratchpad(tree).get("floating_nodes", [])
return {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum([window["urgent"] for window in leaves]),
}
def find_scratchpad(self, tree):
if tree.get("name") == "__i3_scratch":
return tree
for x in tree.get("nodes", []):
result = self.find_scratchpad(x)
if result:
return result
return {}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = "\u232b [\?color=scratchpad {scratchpad}]"
thresholds = [(0, "darkgray"), (1, "violet")]
def post_config_hook(self):
# ipc: specify i3ipc, i3-msg, or swaymsg, otherwise auto
self.ipc = getattr(self, "ipc", "")
if self.ipc in ["", "i3ipc"]:
try:
from i3ipc import Connection # noqa f401
self.ipc = "i3ipc"
except Exception:
if self.ipc:
raise # module not found
self.ipc = (self.ipc or self.py3.get_wm_msg()).replace("-", "")
if self.ipc in ["i3ipc"]:
self.backend = I3ipc(self)
elif self.ipc in ["i3msg", "swaymsg"]:
self.backend = Msg(self)
else:
raise Exception(STRING_ERROR.format(self.ipc))
self.thresholds_init = self.py3.get_color_names_list(self.format)
def scratchpad(self):
scratchpad_data = self.backend.get_scratchpad_data()
for x in self.thresholds_init:
if x in scratchpad_data:
self.py3.threshold_get_color(scratchpad_data[x], x)
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, scratchpad_data),
}
if scratchpad_data["urgent"]:
response["urgent"] = True
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
config = {"format": "\[{ipc}\] [\?color=scratchpad {scratchpad}]"}
module_test(Py3status, config=config)
| Andrwe/py3status | py3status/modules/scratchpad.py | Python | bsd-3-clause | 5,375 |
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import stix
from stix.data_marking import MarkingStructure
import stix.bindings.extensions.marking.tlp as tlp_binding
@stix.register_extension
class TLPMarkingStructure(MarkingStructure):
_binding = tlp_binding
_binding_class = tlp_binding.TLPMarkingStructureType
_namespace = 'http://data-marking.mitre.org/extensions/MarkingStructure#TLP-1'
_XSI_TYPE = "tlpMarking:TLPMarkingStructureType"
def __init__(self, color=None):
super(TLPMarkingStructure, self).__init__()
self.color = color
def to_obj(self, return_obj=None, ns_info=None):
super(TLPMarkingStructure, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = self._binding_class()
MarkingStructure.to_obj(self, return_obj=return_obj, ns_info=ns_info)
return_obj.color = self.color
return return_obj
def to_dict(self):
d = MarkingStructure.to_dict(self)
if self.color:
d['color'] = self.color
return d
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
MarkingStructure.from_obj(obj, return_obj=return_obj)
return_obj.color = obj.color
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
MarkingStructure.from_dict(d, return_obj)
return_obj.color = d.get('color')
return return_obj
| chriskiehl/python-stix | stix/extensions/marking/tlp.py | Python | bsd-3-clause | 1,713 |
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import logging
import os
import pipes
import posixpath
import random
import re
import shlex
import sys
import devil_chromium
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_errors
from devil.android import device_utils
from devil.android import flag_changer
from devil.android.sdk import adb_wrapper
from devil.android.sdk import intent
from devil.android.sdk import version_codes
from devil.utils import run_tests_helper
with devil_env.SysPath(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'colorama', 'src')):
import colorama
from incremental_install import installer
from pylib import constants
from pylib.symbols import deobfuscator
def _Colorize(color, text):
# |color| as a string to avoid pylint's no-member warning :(.
# pylint: disable=no-member
return getattr(colorama.Fore, color) + text + colorama.Fore.RESET
def _InstallApk(devices, apk, install_dict):
def install(device):
if install_dict:
installer.Install(device, install_dict, apk=apk)
else:
device.Install(apk)
logging.info('Installing %sincremental apk.', '' if install_dict else 'non-')
device_utils.DeviceUtils.parallel(devices).pMap(install)
def _UninstallApk(devices, install_dict, package_name):
def uninstall(device):
if install_dict:
installer.Uninstall(device, package_name)
else:
device.Uninstall(package_name)
device_utils.DeviceUtils.parallel(devices).pMap(uninstall)
def _LaunchUrl(devices, input_args, device_args_file, url, apk):
if input_args and device_args_file is None:
raise Exception('This apk does not support any flags.')
if url:
view_activity = apk.GetViewActivityName()
if not view_activity:
raise Exception('APK does not support launching with URLs.')
def launch(device):
# The flags are first updated with input args.
changer = flag_changer.FlagChanger(device, device_args_file)
flags = []
if input_args:
flags = shlex.split(input_args)
changer.ReplaceFlags(flags)
# Then launch the apk.
if url is None:
# Simulate app icon click if no url is present.
cmd = ['monkey', '-p', apk.GetPackageName(), '-c',
'android.intent.category.LAUNCHER', '1']
device.RunShellCommand(cmd, check_return=True)
else:
launch_intent = intent.Intent(action='android.intent.action.VIEW',
activity=view_activity, data=url,
package=apk.GetPackageName())
device.StartActivity(launch_intent)
device_utils.DeviceUtils.parallel(devices).pMap(launch)
def _ChangeFlags(devices, input_args, device_args_file):
if input_args is None:
_DisplayArgs(devices, device_args_file)
else:
flags = shlex.split(input_args)
def update(device):
flag_changer.FlagChanger(device, device_args_file).ReplaceFlags(flags)
device_utils.DeviceUtils.parallel(devices).pMap(update)
def _TargetCpuToTargetArch(target_cpu):
if target_cpu == 'x64':
return 'x86_64'
if target_cpu == 'mipsel':
return 'mips'
return target_cpu
def _RunGdb(device, package_name, output_directory, target_cpu, extra_args,
verbose):
gdb_script_path = os.path.dirname(__file__) + '/adb_gdb'
cmd = [
gdb_script_path,
'--package-name=%s' % package_name,
'--output-directory=%s' % output_directory,
'--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(),
'--device=%s' % device.serial,
# Use one lib dir per device so that changing between devices does require
# refetching the device libs.
'--pull-libs-dir=/tmp/adb-gdb-libs-%s' % device.serial,
]
# Enable verbose output of adb_gdb if it's set for this script.
if verbose:
cmd.append('--verbose')
if target_cpu:
cmd.append('--target-arch=%s' % _TargetCpuToTargetArch(target_cpu))
cmd.extend(extra_args)
logging.warning('Running: %s', ' '.join(pipes.quote(x) for x in cmd))
print _Colorize('YELLOW', 'All subsequent output is from adb_gdb script.')
os.execv(gdb_script_path, cmd)
def _PrintPerDeviceOutput(devices, results, single_line=False):
for d, result in zip(devices, results):
if not single_line and d is not devices[0]:
sys.stdout.write('\n')
sys.stdout.write(
_Colorize('YELLOW', '%s (%s):' % (d, d.build_description)))
sys.stdout.write(' ' if single_line else '\n')
yield result
def _RunMemUsage(devices, package_name):
def mem_usage_helper(d):
ret = []
proc_map = d.GetPids(package_name)
for name, pids in proc_map.iteritems():
for pid in pids:
ret.append((name, pid, d.GetMemoryUsageForPid(pid)))
return ret
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(mem_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print 'No processes found.'
else:
for name, pid, usage in sorted(result):
print '%s(%s):' % (name, pid)
for k, v in sorted(usage.iteritems()):
print ' %s=%d' % (k, v)
print
def _DuHelper(device, path_spec, run_as=None):
"""Runs "du -s -k |path_spec|" on |device| and returns parsed result.
Args:
device: A DeviceUtils instance.
path_spec: The list of paths to run du on. May contain shell expansions
(will not be escaped).
run_as: Package name to run as, or None to run as shell user. If not None
and app is not android:debuggable (run-as fails), then command will be
run as root.
Returns:
A dict of path->size in kb containing all paths in |path_spec| that exist on
device. Paths that do not exist are silently ignored.
"""
# Example output for: du -s -k /data/data/org.chromium.chrome/{*,.*}
# 144 /data/data/org.chromium.chrome/cache
# 8 /data/data/org.chromium.chrome/files
# <snip>
# du: .*: No such file or directory
# The -d flag works differently across android version, so use -s instead.
cmd_str = 'du -s -k ' + path_spec
lines = device.RunShellCommand(cmd_str, run_as=run_as, shell=True,
check_return=False)
output = '\n'.join(lines)
# run-as: Package 'com.android.chrome' is not debuggable
if output.startswith('run-as:'):
# check_return=False needed for when some paths in path_spec do not exist.
lines = device.RunShellCommand(cmd_str, as_root=True, shell=True,
check_return=False)
ret = {}
try:
for line in lines:
# du: .*: No such file or directory
if line.startswith('du:'):
continue
size, subpath = line.split(None, 1)
ret[subpath] = int(size)
return ret
except ValueError:
logging.error('Failed to parse du output:\n%s', output)
def _RunDiskUsage(devices, package_name, verbose):
# Measuring dex size is a bit complicated:
# https://source.android.com/devices/tech/dalvik/jit-compiler
#
# For KitKat and below:
# dumpsys package contains:
# dataDir=/data/data/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-1.apk
# resourcePath=/data/app/org.chromium.chrome-1.apk
# nativeLibraryPath=/data/app-lib/org.chromium.chrome-1
# To measure odex:
# ls -l /data/dalvik-cache/data@[email protected]@classes.dex
#
# For Android L and M (and maybe for N+ system apps):
# dumpsys package contains:
# codePath=/data/app/org.chromium.chrome-1
# resourcePath=/data/app/org.chromium.chrome-1
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-1/lib
# To measure odex:
# # Option 1:
# /data/dalvik-cache/arm/data@[email protected]@[email protected]
# /data/dalvik-cache/arm/data@[email protected]@[email protected]
# ls -l /data/dalvik-cache/profiles/org.chromium.chrome
# (these profiles all appear to be 0 bytes)
# # Option 2:
# ls -l /data/app/org.chromium.chrome-1/oat/arm/base.odex
#
# For Android N+:
# dumpsys package contains:
# dataDir=/data/user/0/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# resourcePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-GUID/lib
# Instruction Set: arm
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm/base.odex[status=kOatUpToDate, compilation_f
# ilter=quicken]
# Instruction Set: arm64
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm64/base.odex[status=..., compilation_filter=q
# uicken]
# To measure odex:
# ls -l /data/app/.../oat/arm/base.odex
# ls -l /data/app/.../oat/arm/base.vdex (optional)
# To measure the correct odex size:
# cmd package compile -m speed org.chromium.chrome # For webview
# cmd package compile -m speed-profile org.chromium.chrome # For others
def disk_usage_helper(d):
package_output = '\n'.join(d.RunShellCommand(
['dumpsys', 'package', package_name], check_return=True))
# Prints a message but does not return error when apk is not installed.
if 'Unable to find package:' in package_output:
return None
# Ignore system apks.
idx = package_output.find('Hidden system packages:')
if idx != -1:
package_output = package_output[:idx]
try:
data_dir = re.search(r'dataDir=(.*)', package_output).group(1)
code_path = re.search(r'codePath=(.*)', package_output).group(1)
lib_path = re.search(r'(?:legacyN|n)ativeLibrary(?:Dir|Path)=(.*)',
package_output).group(1)
except AttributeError:
raise Exception('Error parsing dumpsys output: ' + package_output)
compilation_filters = set()
# Match "compilation_filter=value", where a line break can occur at any spot
# (refer to examples above).
awful_wrapping = r'\s*'.join('compilation_filter=')
for m in re.finditer(awful_wrapping + r'([\s\S]+?)[\],]', package_output):
compilation_filters.add(re.sub(r'\s+', '', m.group(1)))
compilation_filter = ','.join(sorted(compilation_filters))
data_dir_sizes = _DuHelper(d, '%s/{*,.*}' % data_dir, run_as=package_name)
# Measure code_cache separately since it can be large.
code_cache_sizes = {}
code_cache_dir = next(
(k for k in data_dir_sizes if k.endswith('/code_cache')), None)
if code_cache_dir:
data_dir_sizes.pop(code_cache_dir)
code_cache_sizes = _DuHelper(d, '%s/{*,.*}' % code_cache_dir,
run_as=package_name)
apk_path_spec = code_path
if not apk_path_spec.endswith('.apk'):
apk_path_spec += '/*.apk'
apk_sizes = _DuHelper(d, apk_path_spec)
if lib_path.endswith('/lib'):
# Shows architecture subdirectory.
lib_sizes = _DuHelper(d, '%s/{*,.*}' % lib_path)
else:
lib_sizes = _DuHelper(d, lib_path)
# Look at all possible locations for odex files.
odex_paths = []
for apk_path in apk_sizes:
mangled_apk_path = apk_path[1:].replace('/', '@')
apk_basename = posixpath.basename(apk_path)[:-4]
for ext in ('dex', 'odex', 'vdex', 'art'):
# Easier to check all architectures than to determine active ones.
for arch in ('arm', 'arm64', 'x86', 'x86_64', 'mips', 'mips64'):
odex_paths.append(
'%s/oat/%s/%s.%s' % (code_path, arch, apk_basename, ext))
# No app could possibly have more than 6 dex files.
for suffix in ('', '2', '3', '4', '5'):
odex_paths.append('/data/dalvik-cache/%s/%s@classes%s.%s' % (
arch, mangled_apk_path, suffix, ext))
# This path does not have |arch|, so don't repeat it for every arch.
if arch == 'arm':
odex_paths.append('/data/dalvik-cache/%s@classes%s.dex' % (
mangled_apk_path, suffix))
odex_sizes = _DuHelper(d, ' '.join(pipes.quote(p) for p in odex_paths))
return (data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter)
def print_sizes(desc, sizes):
print '%s: %dkb' % (desc, sum(sizes.itervalues()))
if verbose:
for path, size in sorted(sizes.iteritems()):
print ' %s: %skb' % (path, size)
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(disk_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print 'APK is not installed.'
continue
(data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter) = result
total = sum(sum(sizes.itervalues()) for sizes in result[:-1])
print_sizes('Apk', apk_sizes)
print_sizes('App Data (non-code cache)', data_dir_sizes)
print_sizes('App Data (code cache)', code_cache_sizes)
print_sizes('Native Libs', lib_sizes)
show_warning = compilation_filter and 'speed' not in compilation_filter
compilation_filter = compilation_filter or 'n/a'
print_sizes('odex (compilation_filter=%s)' % compilation_filter, odex_sizes)
if show_warning:
logging.warning('For a more realistic odex size, run:')
logging.warning(' %s compile-dex [speed|speed-profile]', sys.argv[0])
print 'Total: %skb (%.1fmb)' % (total, total / 1024.0)
def _RunLogcat(device, package_name, verbose, mapping_path):
if mapping_path:
try:
deobfuscate = deobfuscator.Deobfuscator(mapping_path)
except OSError:
sys.stderr.write('Error executing "bin/java_deobfuscate". '
'Did you forget to build it?\n')
sys.exit(1)
def get_my_pids():
my_pids = []
for pids in device.GetPids(package_name).values():
my_pids.extend(pids)
return [int(pid) for pid in my_pids]
def process_line(line, fast=False):
if verbose:
if fast:
return
else:
if not line or line.startswith('------'):
return
tokens = line.split(None, 4)
pid = int(tokens[2])
priority = tokens[4]
if pid in my_pids or (not fast and priority == 'F'):
pass # write
elif pid in not_my_pids:
return
elif fast:
# Skip checking whether our package spawned new processes.
not_my_pids.add(pid)
return
else:
# Check and add the pid if it is a new one from our package.
my_pids.update(get_my_pids())
if pid not in my_pids:
not_my_pids.add(pid)
return
if mapping_path:
line = '\n'.join(deobfuscate.TransformLines([line.rstrip()])) + '\n'
sys.stdout.write(line)
try:
my_pids = set(get_my_pids())
not_my_pids = set()
nonce = 'apk_wrappers.py nonce={}'.format(random.random())
device.RunShellCommand(['log', nonce])
fast = True
for line in device.adb.Logcat(logcat_format='threadtime'):
try:
process_line(line, fast)
except:
sys.stderr.write('Failed to process line: ' + line)
raise
if fast and nonce in line:
fast = False
except KeyboardInterrupt:
pass # Don't show stack trace upon Ctrl-C
finally:
if mapping_path:
deobfuscate.Close()
def _RunPs(devices, package_name):
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_pids = parallel_devices.GetPids(package_name).pGet(None)
for proc_map in _PrintPerDeviceOutput(devices, all_pids):
if not proc_map:
print 'No processes found.'
else:
for name, pids in sorted(proc_map.items()):
print name, ','.join(pids)
def _RunShell(devices, package_name, cmd):
if cmd:
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(
cmd, run_as=package_name).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print line
else:
adb_path = adb_wrapper.AdbWrapper.GetAdbPath()
cmd = [adb_path, '-s', devices[0].serial, 'shell']
# Pre-N devices do not support -t flag.
if devices[0].build_version_sdk >= version_codes.NOUGAT:
cmd += ['-t', 'run-as', package_name]
else:
print 'Upon entering the shell, run:'
print 'run-as', package_name
print
os.execv(adb_path, cmd)
def _RunCompileDex(devices, package_name, compilation_filter):
cmd = ['cmd', 'package', 'compile', '-f', '-m', compilation_filter,
package_name]
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(cmd).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print line
def _GenerateAvailableDevicesMessage(devices):
devices_obj = device_utils.DeviceUtils.parallel(devices)
descriptions = devices_obj.pMap(lambda d: d.build_description).pGet(None)
msg = 'Available devices:\n'
for d, desc in zip(devices, descriptions):
msg += ' %s (%s)\n' % (d, desc)
return msg
# TODO(agrieve):add "--all" in the MultipleDevicesError message and use it here.
def _GenerateMissingAllFlagMessage(devices):
return ('More than one device available. Use --all to select all devices, ' +
'or use --device to select a device by serial.\n\n' +
_GenerateAvailableDevicesMessage(devices))
def _DisplayArgs(devices, device_args_file):
def flags_helper(d):
changer = flag_changer.FlagChanger(d, device_args_file)
return changer.GetCurrentFlags()
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.pMap(flags_helper).pGet(None)
print 'Existing flags per-device (via /data/local/tmp/%s):' % device_args_file
for flags in _PrintPerDeviceOutput(devices, outputs, single_line=True):
quoted_flags = ' '.join(pipes.quote(f) for f in flags)
print quoted_flags or 'No flags set.'
def _DeviceCachePath(device, output_directory):
file_name = 'device_cache_%s.json' % device.serial
return os.path.join(output_directory, file_name)
def _LoadDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
if os.path.exists(cache_path):
logging.debug('Using device cache: %s', cache_path)
with open(cache_path) as f:
d.LoadCacheData(f.read())
# Delete the cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
else:
logging.debug('No cache present for device: %s', d)
def _SaveDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
with open(cache_path, 'w') as f:
f.write(d.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
class _Command(object):
name = None
description = None
needs_package_name = False
needs_output_directory = False
needs_apk_path = False
supports_incremental = False
accepts_command_line_flags = False
accepts_args = False
accepts_url = False
all_devices_by_default = False
calls_exec = False
def __init__(self, from_wrapper_script):
self._parser = None
self._from_wrapper_script = from_wrapper_script
self.args = None
self.apk_helper = None
self.install_dict = None
self.devices = None
# Do not support incremental install outside the context of wrapper scripts.
if not from_wrapper_script:
self.supports_incremental = False
def _RegisterExtraArgs(self, subp):
pass
def RegisterArgs(self, parser):
subp = parser.add_parser(self.name, help=self.description)
self._parser = subp
subp.set_defaults(command=self)
subp.add_argument('--all',
action='store_true',
default=self.all_devices_by_default,
help='Operate on all connected devices.',)
subp.add_argument('-d',
'--device',
action='append',
default=[],
dest='devices',
help='Target device for script to work on. Enter '
'multiple times for multiple devices.')
subp.add_argument('-v',
'--verbose',
action='count',
default=0,
dest='verbose_count',
help='Verbose level (multiple times for more)')
group = subp.add_argument_group('%s arguments' % self.name)
if self.needs_package_name:
# Always gleaned from apk when using wrapper scripts.
group.add_argument('--package-name',
help=argparse.SUPPRESS if self._from_wrapper_script else (
"App's package name."))
if self.needs_apk_path or self.needs_package_name:
# Adding this argument to the subparser would override the set_defaults()
# value set by on the parent parser (even if None).
if not self._from_wrapper_script:
group.add_argument('--apk-path',
required=self.needs_apk_path,
help='Path to .apk')
if self.supports_incremental:
group.add_argument('--incremental',
action='store_true',
default=False,
help='Always install an incremental apk.')
group.add_argument('--non-incremental',
action='store_true',
default=False,
help='Always install a non-incremental apk.')
# accepts_command_line_flags and accepts_args are mutually exclusive.
# argparse will throw if they are both set.
if self.accepts_command_line_flags:
group.add_argument('--args', help='Command-line flags.')
if self.accepts_args:
group.add_argument('--args', help='Extra arguments.')
if self.accepts_url:
group.add_argument('url', nargs='?', help='A URL to launch with.')
if not self._from_wrapper_script and self.accepts_command_line_flags:
# Provided by wrapper scripts.
group.add_argument(
'--command-line-flags-file-name',
help='Name of the command-line flags file')
self._RegisterExtraArgs(group)
def ProcessArgs(self, args):
devices = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.devices,
enable_device_files_cache=bool(args.output_directory),
default_retries=0)
self.args = args
self.devices = devices
# TODO(agrieve): Device cache should not depend on output directory.
# Maybe put int /tmp?
_LoadDeviceCaches(devices, args.output_directory)
# Ensure these keys always exist. They are set by wrapper scripts, but not
# always added when not using wrapper scripts.
args.__dict__.setdefault('apk_path', None)
args.__dict__.setdefault('incremental_json', None)
try:
if len(devices) > 1:
if self.calls_exec:
self._parser.error(device_errors.MultipleDevicesError(devices))
if not args.all and not args.devices:
self._parser.error(_GenerateMissingAllFlagMessage(devices))
if self.supports_incremental:
if args.incremental and args.non_incremental:
self._parser.error('Must use only one of --incremental and '
'--non-incremental')
elif args.non_incremental:
if not args.apk_path:
self._parser.error('Apk has not been built.')
args.incremental_json = None
elif args.incremental:
if not args.incremental_json:
self._parser.error('Incremental apk has not been built.')
args.apk_path = None
if args.apk_path and args.incremental_json:
self._parser.error('Both incremental and non-incremental apks exist. '
'Select using --incremental or --non-incremental')
if self.needs_apk_path or args.apk_path or args.incremental_json:
if args.incremental_json:
with open(args.incremental_json) as f:
install_dict = json.load(f)
apk_path = os.path.join(args.output_directory,
install_dict['apk_path'])
if os.path.exists(apk_path):
self.install_dict = install_dict
self.apk_helper = apk_helper.ToHelper(
os.path.join(args.output_directory,
self.install_dict['apk_path']))
if not self.apk_helper and args.apk_path:
self.apk_helper = apk_helper.ToHelper(args.apk_path)
if not self.apk_helper:
self._parser.error(
'Neither incremental nor non-incremental apk is built.')
if self.needs_package_name and not args.package_name:
if self.apk_helper:
args.package_name = self.apk_helper.GetPackageName()
elif self._from_wrapper_script:
self._parser.error(
'Neither incremental nor non-incremental apk is built.')
else:
self._parser.error('One of --package-name or --apk-path is required.')
# Save cache now if command will not get a chance to afterwards.
if self.calls_exec:
_SaveDeviceCaches(devices, args.output_directory)
except:
_SaveDeviceCaches(devices, args.output_directory)
raise
class _DevicesCommand(_Command):
name = 'devices'
description = 'Describe attached devices.'
all_devices_by_default = True
def Run(self):
print _GenerateAvailableDevicesMessage(self.devices)
class _InstallCommand(_Command):
name = 'install'
description = 'Installs the APK to one or more devices.'
needs_apk_path = True
supports_incremental = True
def Run(self):
_InstallApk(self.devices, self.apk_helper, self.install_dict)
class _UninstallCommand(_Command):
name = 'uninstall'
description = 'Removes the APK to one or more devices.'
needs_package_name = True
def Run(self):
_UninstallApk(self.devices, self.install_dict, self.args.package_name)
class _LaunchCommand(_Command):
name = 'launch'
description = ('Sends a launch intent for the APK after first writing the '
'command-line flags file.')
# TODO(agrieve): Launch could be changed to require only package name by
# parsing "dumpsys package" for launch & view activities.
needs_apk_path = True
accepts_command_line_flags = True
accepts_url = True
all_devices_by_default = True
def Run(self):
_LaunchUrl(self.devices, self.args.args, self.args.command_line_flags_file,
self.args.url, self.apk_helper)
class _RunCommand(_Command):
name = 'run'
description = 'Install and then launch.'
needs_apk_path = True
supports_incremental = True
needs_package_name = True
accepts_command_line_flags = True
accepts_url = True
def Run(self):
logging.warning('Installing...')
_InstallApk(self.devices, self.apk_helper, self.install_dict)
logging.warning('Sending launch intent...')
_LaunchUrl(self.devices, self.args.args, self.args.command_line_flags_file,
self.args.url, self.apk_helper)
class _StopCommand(_Command):
name = 'stop'
description = 'Force-stops the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ForceStop(
self.args.package_name)
class _ClearDataCommand(_Command):
name = 'clear-data'
descriptions = 'Clears all app data.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ClearApplicationState(
self.args.package_name)
class _ArgvCommand(_Command):
name = 'argv'
description = 'Display and optionally update command-line flags file.'
needs_package_name = True
accepts_command_line_flags = True
all_devices_by_default = True
def Run(self):
_ChangeFlags(self.devices, self.args.args,
self.args.command_line_flags_file)
class _GdbCommand(_Command):
name = 'gdb'
description = 'Runs //build/android/adb_gdb with apk-specific args.'
needs_package_name = True
needs_output_directory = True
accepts_args = True
calls_exec = True
def Run(self):
extra_args = shlex.split(self.args.args or '')
_RunGdb(self.devices[0], self.args.package_name, self.args.output_directory,
self.args.target_cpu, extra_args, bool(self.args.verbose_count))
class _LogcatCommand(_Command):
name = 'logcat'
description = 'Runs "adb logcat" filtering to just the current APK processes'
needs_package_name = True
calls_exec = True
def Run(self):
mapping = self.args.proguard_mapping_path
if self.args.no_deobfuscate:
mapping = None
_RunLogcat(self.devices[0], self.args.package_name,
bool(self.args.verbose_count), mapping)
def _RegisterExtraArgs(self, group):
if self._from_wrapper_script:
group.add_argument('--no-deobfuscate', action='store_true',
help='Disables ProGuard deobfuscation of logcat.')
else:
group.set_defaults(no_deobfuscate=False)
group.add_argument('--proguard-mapping-path',
help='Path to ProGuard map (enables deobfuscation)')
class _PsCommand(_Command):
name = 'ps'
description = 'Show PIDs of any APK processes currently running.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunPs(self.devices, self.args.package_name)
class _DiskUsageCommand(_Command):
name = 'disk-usage'
description = 'Show how much device storage is being consumed by the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunDiskUsage(self.devices, self.args.package_name,
bool(self.args.verbose_count))
class _MemUsageCommand(_Command):
name = 'mem-usage'
description = 'Show memory usage of currently running APK processes.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunMemUsage(self.devices, self.args.package_name)
class _ShellCommand(_Command):
name = 'shell'
description = ('Same as "adb shell <command>", but runs as the apk\'s uid '
'(via run-as). Useful for inspecting the app\'s data '
'directory.')
needs_package_name = True
@property
def calls_exec(self):
return not self.args.cmd
def _RegisterExtraArgs(self, group):
group.add_argument(
'cmd', nargs=argparse.REMAINDER, help='Command to run.')
def Run(self):
_RunShell(self.devices, self.args.package_name, self.args.cmd)
class _CompileDexCommand(_Command):
name = 'compile-dex'
description = ('Applicable only for Android N+. Forces .odex files to be '
'compiled with the given compilation filter. To see existing '
'filter, use "disk-usage" command.')
needs_package_name = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument(
'compilation_filter',
choices=['verify', 'quicken', 'space-profile', 'space',
'speed-profile', 'speed'],
help='For WebView/Monochrome, use "speed". For other apks, use '
'"speed-profile".')
def Run(self):
_RunCompileDex(self.devices, self.args.package_name,
self.args.compilation_filter)
_COMMANDS = [
_DevicesCommand,
_InstallCommand,
_UninstallCommand,
_LaunchCommand,
_RunCommand,
_StopCommand,
_ClearDataCommand,
_ArgvCommand,
_GdbCommand,
_LogcatCommand,
_PsCommand,
_DiskUsageCommand,
_MemUsageCommand,
_ShellCommand,
_CompileDexCommand,
]
def _ParseArgs(parser, from_wrapper_script):
subparsers = parser.add_subparsers()
commands = [clazz(from_wrapper_script) for clazz in _COMMANDS]
for command in commands:
if from_wrapper_script or not command.needs_output_directory:
command.RegisterArgs(subparsers)
# Show extended help when no command is passed.
argv = sys.argv[1:]
if not argv:
argv = ['--help']
return parser.parse_args(argv)
def _RunInternal(parser, output_directory=None):
colorama.init()
parser.set_defaults(output_directory=output_directory)
from_wrapper_script = bool(output_directory)
args = _ParseArgs(parser, from_wrapper_script)
run_tests_helper.SetLogLevel(args.verbose_count)
args.command.ProcessArgs(args)
args.command.Run()
# Incremental install depends on the cache being cleared when uninstalling.
if args.command.name != 'uninstall':
_SaveDeviceCaches(args.command.devices, output_directory)
# TODO(agrieve): Remove =None from target_cpu on or after October 2017.
# It exists only so that stale wrapper scripts continue to work.
def Run(output_directory, apk_path, incremental_json, command_line_flags_file,
target_cpu, proguard_mapping_path):
"""Entry point for generated wrapper scripts."""
constants.SetOutputDirectory(output_directory)
devil_chromium.Initialize(output_directory=output_directory)
parser = argparse.ArgumentParser()
exists_or_none = lambda p: p if p and os.path.exists(p) else None
parser.set_defaults(
command_line_flags_file=command_line_flags_file,
target_cpu=target_cpu,
apk_path=exists_or_none(apk_path),
incremental_json=exists_or_none(incremental_json),
proguard_mapping_path=proguard_mapping_path)
_RunInternal(parser, output_directory=output_directory)
def main():
devil_chromium.Initialize()
_RunInternal(argparse.ArgumentParser(), output_directory=None)
if __name__ == '__main__':
main()
| chrisdickinson/nojs | build/android/apk_operations.py | Python | bsd-3-clause | 34,076 |
import numpy as np
from numpy.testing import (assert_equal, assert_array_almost_equal,
assert_raises)
from skimage.transform._geometric import _stackcopy
from skimage.transform._geometric import GeometricTransform
from skimage.transform import (estimate_transform, matrix_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform,
PiecewiseAffineTransform)
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_array_almost_equal(x[..., i], y)
def test_estimate_transform():
for tform in ('similarity', 'affine', 'projective', 'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
assert_raises(ValueError, estimate_transform, 'foobar',
SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform._matrix))
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_equal(tform._matrix[0, 0], tform._matrix[1, 1])
assert_equal(tform._matrix[0, 1], - tform._matrix[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_equal(tform2._matrix[0, 0], tform2._matrix[1, 1])
assert_equal(tform2._matrix[0, 1], - tform2._matrix[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_array_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.shear, shear)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.shear, shear)
assert_array_almost_equal(tform2.translation, translation)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_array_almost_equal(tform(SRC), DST)
assert_array_almost_equal(tform.inverse(DST), SRC)
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_array_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform._matrix)
assert_array_almost_equal(tform2._matrix, tform._matrix)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_array_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
tform2 = PolynomialTransform(tform._params)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_inverse():
assert_raises(Exception, PolynomialTransform().inverse, 0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
assert tform.__class__ == ProjectiveTransform
def test_geometric_tform():
tform = GeometricTransform()
assert_raises(NotImplementedError, tform, 0)
assert_raises(NotImplementedError, tform.inverse, 0)
assert_raises(NotImplementedError, tform.__add__, 0)
def test_invalid_input():
assert_raises(ValueError, ProjectiveTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform, np.zeros((2, 3)))
assert_raises(ValueError, SimilarityTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, SimilarityTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, PolynomialTransform, np.zeros((3, 3)))
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| almarklein/scikit-image | skimage/transform/tests/test_geometric.py | Python | bsd-3-clause | 7,870 |
from django.http import HttpResponse, Http404
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from django.utils.xmlutils import SimplerXMLGenerator
from models import Place, Region
from models import Locality
from models import GlobalRegion
from utils.utils import do_paging, split_list
from django.db.models import Count
from django.contrib.contenttypes.models import ContentType
import json
def place_detail(request, place_id):
"""
Lookup a ``Place`` based on its id. Pagination its objects.
"""
place = get_object_or_404(Place, pk=place_id)
try:
region = Region.objects.get(name=place.region)
except:
region = None
place_objects = place.museumobject_set.filter(public=True)
objects = do_paging(request, place_objects)
return render(request, "location/place_detail.html",
{'place': place, 'objects': objects,
'region': region})
def place_json(request, encoding='utf-8', mimetype='text/plain'):
places = Locality.objects.exclude(
latitude=None).annotate(Count('museumobject')).values(
'id', 'name', 'latitude', 'longitude',
'museumobject__count')
return HttpResponse(json.dumps(list(places), indent=2))
def place_kml(request, encoding='utf-8', mimetype='text/plain'):
"""
Write out all the known places to KML
"""
# mimetype = "application/vnd.google-earth.kml+xml"
# mimetype = "text/html"
places = Locality.objects.exclude(
latitude=None).annotate(Count('museumobject'))
response = HttpResponse(mimetype=mimetype)
handler = SimplerXMLGenerator(response, encoding)
handler.startDocument()
handler.startElement(u"kml",
{u"xmlns": u"http://www.opengis.net/kml/2.2"})
handler.startElement(u"Document", {})
for place in places:
place_url = request.build_absolute_uri(place.get_absolute_url())
handler.startElement(u"Placemark", {})
handler.addQuickElement(u"name",
"%s (%s)" % (place.name, place.museumobject__count))
handler.addQuickElement(u"description",
'<a href="%s">%s</a>' % (place_url, place.__unicode__()))
handler.startElement(u"Point", {})
handler.addQuickElement(u"coordinates", place.get_kml_coordinates())
handler.endElement(u"Point")
handler.endElement(u"Placemark")
handler.endElement(u"Document")
handler.endElement(u"kml")
return response
def place_duplicates(request):
'''
Used for finding duplicate places, by Geoname ID
'''
places = Place.objects.values(
'gn_id').order_by().annotate(
count=Count('gn_id')).filter(count__gt=1)
return render(request, "location/place_dups_list.html",
{'places': places})
def place_geoname(request, geoname_id):
places = Place.objects.filter(gn_id=geoname_id)
return render(request, "location/place_geoname.html", {'places': places})
def tree_view(request):
global_regions = GlobalRegion.objects.all()
return render(request, "location/tree_view.html",
{'global_regions': global_regions})
def find_location(model_type, id):
element_type = ContentType.objects.get(app_label='location', model=model_type)
return element_type.get_object_for_this_type(id=id)
def view_places(request):
grs = GlobalRegion.objects.exclude(icon_path="").prefetch_related('children')
d = dict((g.name, g) for g in grs)
grs = [d['Australia'], d['Pacific'], d['Asia'], d['Europe'], d['Americas'], d['Africa'],
d['Middle East']]
kml_url = request.build_absolute_uri(reverse('place_kml'))
return render(request, 'location/map.html',
{'global_regions': grs,
'kml_url': kml_url})
def view_geoloc(request, loctype, id, columns=3):
try:
geolocation = find_location(loctype, id)
except ObjectDoesNotExist:
raise Http404
items = geolocation.museumobject_set.select_related().filter(public=True
).prefetch_related('category', 'country', 'global_region'
).extra(
select={'public_images_count': 'select count(*) from mediaman_artefactrepresentation a WHERE a.artefact_id = cat_museumobject.id AND a.public'}
).order_by('-public_images_count', 'registration_number')
children = []
if hasattr(geolocation, 'children'):
children = geolocation.children.all()
objects = do_paging(request, items)
return render(request, 'location/geolocation.html',
{'geolocation': geolocation,
'objects': objects,
'num_children': len(children),
'children': split_list(children, parts=columns)})
| uq-eresearch/uqam | location/views.py | Python | bsd-3-clause | 4,790 |
from __future__ import division
from PyQt5 import QtCore, QtWidgets
from pycho.gui.widgets import GLPlotWidget
from pycho.world.navigation import DIRECTIONS
from pycho.gui.interaction import QT_KEYS, is_left, is_right, is_up, is_down
from pycho.world.helpers import box_around
import logging
xrange = range
TURN_BASED = 0
#Qt uses camelCase for naming methods,
#hence why they are used here
class DefaultWindow(QtWidgets.QMainWindow):
def __init__(self, game,
key_press_handler=None,
mouse_click_handler=None,
mouse_release_handler=None,
tick_time=0,
width=600,
height=400,
key_press_handlers=None,
mouse_click_handlers=None,
mouse_release_handlers=None):
super(DefaultWindow, self).__init__()
self.game = game
self.widget = GLPlotWidget(100, 100, self.game)
self.widget.setGeometry(0, 0, self.widget.width, self.widget.height)
self.setCentralWidget(self.widget)
self.show()
self.paint_timer = QtCore.QBasicTimer()
self.clean_timer = QtCore.QBasicTimer()
self.tick_timer = QtCore.QBasicTimer()
self.callbacks = [self.widget.updateGL, self.game.world.clean_up, self.game.world.tick]
QtCore.QMetaObject.connectSlotsByName(self)
self.paint_timer.start(30, self)
self.clean_timer.start(40, self)
self.timers = [self.paint_timer, self.clean_timer]
self.timer_times = [30, 40]
if tick_time != TURN_BASED:
self.tick_timer.start(tick_time, self)
self.timers.append(self.tick_timer)
self.timer_times.append(tick_time)
self.resize(width, height)
if key_press_handler is None:
key_press_handler = lambda self, event: self._defaultKeyPressHandler(event)
if mouse_click_handler is None:
mouse_click_handler = lambda self, event: self._defaultMousePressHandler(event)
if mouse_release_handler is None:
mouse_release_handler = lambda *a, **kw: None
if key_press_handlers is None:
key_press_handlers = {'*' : key_press_handler}
if mouse_click_handlers is None:
mouse_click_handlers = {'*' : mouse_click_handler}
if mouse_release_handlers is None:
mouse_release_handlers = {'*' : mouse_release_handler}
self.key_press_handlers = key_press_handlers
self.mouse_click_handlers = mouse_click_handlers
self.mouse_release_handlers = mouse_release_handlers
self.is_paused = False
def timerEvent(self, event):
self.callbacks[event.timerId() - 1]()
def _defaultKeyPressHandler(self, event):
key = event.key()
logging.debug('Key {} was pressed'.format(key))
if is_left(key):
face_movement = DIRECTIONS['left']
elif is_right(key):
face_movement = DIRECTIONS['right']
elif is_up(key):
face_movement = DIRECTIONS['up']
elif is_down(key):
face_movement = DIRECTIONS['down']
elif key == QT_KEYS['Space']:
face_movement = DIRECTIONS['still']
else:
return
logging.debug('Face movement set to {}'.format(face_movement))
logging.debug('Player is facing {}'.format(self.game.player.facing))
self.game.player.facing = face_movement
self.game.world.tick()
def map_point_to_game_world(self, x, y):
i = int((x / self.widget.width) * self.game.world.width)
j = int(((self.widget.height - y) / self.widget.height) * self.game.world.height)
return (i, j)
def _current_handler(self, handlers):
level_id = self.game.world.current_level.id
if level_id not in handlers:
try:
return handlers['*']
except KeyError:
logging.error('No default handler set as *!')
return handlers[level_id]
def _defaultMousePressHandler(self, event, pointer_size=5):
x, y = self.map_point_to_game_world(event.x(), event.y())
# gradually grow the pointer to be bigger to
# allow for a greater control on what is clicked
for j in xrange(pointer_size):
try:
obj = self.game.world.colliding_object(None,
box_around(x, y, j, j))
except:
break
if obj is not None:
logging.error(obj)
break
else:
logging.error("Nothing found!")
def pause(self):
for timer in self.timers:
self.timers.stop()
self.is_paused = True
def unpause(self):
for timer, time in zip(self.timers, self.timer_times):
self.timers.start(time)
self.is_paused = False
def keyPressEvent(self, event):
self._current_handler(self.key_press_handlers)(self, event)
def mousePressEvent(self, event):
self._current_handler(self.mouse_click_handlers)(self, event)
def mouseReleaseEvent(self, event):
self._current_handler(self.mouse_release_handlers)(self, event)
def closeEvent(self, event):
logging.debug("Dumping to text file")
self.game.world.mind_dump()
| eeue56/pycho | pycho/gui/windows.py | Python | bsd-3-clause | 5,304 |
"""This module contains an interface for using the GPy library in ELFI."""
# TODO: make own general GPRegression and kernel classes
import copy
import logging
import GPy
import numpy as np
logger = logging.getLogger(__name__)
logging.getLogger("GP").setLevel(logging.WARNING) # GPy library logger
class GPyRegression:
"""Gaussian Process regression using the GPy library.
GPy API: https://sheffieldml.github.io/GPy/
"""
def __init__(self,
parameter_names=None,
bounds=None,
optimizer="scg",
max_opt_iters=50,
gp=None,
**gp_params):
"""Initialize GPyRegression.
Parameters
----------
parameter_names : list of str, optional
Names of parameter nodes. If None, sets dimension to 1.
bounds : dict, optional
The region where to estimate the posterior for each parameter in
model.parameters.
`{'parameter_name':(lower, upper), ... }`
If not supplied, defaults to (0, 1) bounds for all dimensions.
optimizer : string, optional
Optimizer for the GP hyper parameters
Alternatives: "scg", "fmin_tnc", "simplex", "lbfgsb", "lbfgs", "sgd"
See also: paramz.Model.optimize()
max_opt_iters : int, optional
gp : GPy.model.GPRegression instance, optional
**gp_params
kernel : GPy.Kern
noise_var : float
mean_function
"""
if parameter_names is None:
input_dim = 1
elif isinstance(parameter_names, (list, tuple)):
input_dim = len(parameter_names)
else:
raise ValueError("Keyword `parameter_names` must be a list of strings")
if bounds is None:
logger.warning('Parameter bounds not specified. Using [0,1] for each parameter.')
bounds = [(0, 1)] * input_dim
elif len(bounds) != input_dim:
raise ValueError(
'Length of `bounds` ({}) does not match the length of `parameter_names` ({}).'
.format(len(bounds), input_dim))
elif isinstance(bounds, dict):
if len(bounds) == 1: # might be the case parameter_names=None
bounds = [bounds[n] for n in bounds.keys()]
else:
# turn bounds dict into a list in the same order as parameter_names
bounds = [bounds[n] for n in parameter_names]
else:
raise ValueError("Keyword `bounds` must be a dictionary "
"`{'parameter_name': (lower, upper), ... }`")
self.input_dim = input_dim
self.bounds = bounds
self.gp_params = gp_params
self.optimizer = optimizer
self.max_opt_iters = max_opt_iters
self._gp = gp
self._rbf_is_cached = False
self.is_sampling = False # set to True once in sampling phase
def __str__(self):
"""Return GPy's __str__."""
return self._gp.__str__()
def __repr__(self):
"""Return GPy's __str__."""
return self.__str__()
def predict(self, x, noiseless=False):
"""Return the GP model mean and variance at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
noiseless : bool
whether to include the noise variance or not to the returned variance
Returns
-------
tuple
GP (mean, var) at x where
mean : np.array
with shape (x.shape[0], 1)
var : np.array
with shape (x.shape[0], 1)
"""
# Ensure it's 2d for GPy
x = np.asanyarray(x).reshape((-1, self.input_dim))
if self._gp is None:
# TODO: return from GP mean function if given
return np.zeros((x.shape[0], 1)), \
np.ones((x.shape[0], 1))
# direct (=faster) implementation for RBF kernel
if self.is_sampling and self._kernel_is_default:
if not self._rbf_is_cached:
self._cache_RBF_kernel()
r2 = np.sum(x**2., 1)[:, None] + self._rbf_x2sum - 2. * x.dot(self._gp.X.T)
kx = self._rbf_var * np.exp(r2 * self._rbf_factor) + self._rbf_bias
mu = kx.dot(self._rbf_woodbury)
var = self._rbf_var + self._rbf_bias
var -= kx.dot(self._rbf_woodbury_inv.dot(kx.T))
var += self._rbf_noisevar # likelihood
return mu, var
else:
self._rbf_is_cached = False # in case one resumes fitting the GP after sampling
if noiseless:
return self._gp.predict_noiseless(x)
else:
return self._gp.predict(x)
# TODO: find a more general solution
# cache some RBF-kernel-specific values for faster sampling
def _cache_RBF_kernel(self):
self._rbf_var = float(self._gp.kern.rbf.variance)
self._rbf_factor = -0.5 / float(self._gp.kern.rbf.lengthscale)**2
self._rbf_bias = float(self._gp.kern.bias.K(self._gp.X)[0, 0])
self._rbf_noisevar = float(self._gp.likelihood.variance[0])
self._rbf_woodbury = self._gp.posterior.woodbury_vector
self._rbf_woodbury_inv = self._gp.posterior.woodbury_inv
self._rbf_woodbury_chol = self._gp.posterior.woodbury_chol
self._rbf_x2sum = np.sum(self._gp.X**2., 1)[None, :]
self._rbf_is_cached = True
def predict_mean(self, x):
"""Return the GP model mean function at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
Returns
-------
np.array
with shape (x.shape[0], 1)
"""
return self.predict(x)[0]
def predictive_gradients(self, x):
"""Return the gradients of the GP model mean and variance at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
Returns
-------
tuple
GP (grad_mean, grad_var) at x where
grad_mean : np.array
with shape (x.shape[0], input_dim)
grad_var : np.array
with shape (x.shape[0], input_dim)
"""
# Ensure it's 2d for GPy
x = np.asanyarray(x).reshape((-1, self.input_dim))
if self._gp is None:
# TODO: return from GP mean function if given
return np.zeros((x.shape[0], self.input_dim)), \
np.zeros((x.shape[0], self.input_dim))
# direct (=faster) implementation for RBF kernel
if self.is_sampling and self._kernel_is_default:
if not self._rbf_is_cached:
self._cache_RBF_kernel()
r2 = np.sum(x**2., 1)[:, None] + self._rbf_x2sum - 2. * x.dot(self._gp.X.T)
kx = self._rbf_var * np.exp(r2 * self._rbf_factor)
dkdx = 2. * self._rbf_factor * (x - self._gp.X) * kx.T
grad_mu = dkdx.T.dot(self._rbf_woodbury).T
v = np.linalg.solve(self._rbf_woodbury_chol, kx.T + self._rbf_bias)
dvdx = np.linalg.solve(self._rbf_woodbury_chol, dkdx)
grad_var = -2. * dvdx.T.dot(v).T
else:
grad_mu, grad_var = self._gp.predictive_gradients(x)
grad_mu = grad_mu[:, :, 0] # Assume 1D output (distance in ABC)
return grad_mu, grad_var
def predictive_gradient_mean(self, x):
"""Return the gradient of the GP model mean at x.
Parameters
----------
x : np.array
numpy compatible (n, input_dim) array of points to evaluate
if len(x.shape) == 1 will be cast to 2D with x[None, :]
Returns
-------
np.array
with shape (x.shape[0], input_dim)
"""
return self.predictive_gradients(x)[0]
def _init_gp(self, x, y):
self._kernel_is_default = False
if self.gp_params.get('kernel') is None:
kernel = self._default_kernel(x, y)
if self.gp_params.get('noise_var') is None and self.gp_params.get(
'mean_function') is None:
self._kernel_is_default = True
else:
kernel = self.gp_params.get('kernel')
noise_var = self.gp_params.get('noise_var') or np.max(y)**2. / 100.
mean_function = self.gp_params.get('mean_function')
self._gp = self._make_gpy_instance(
x, y, kernel=kernel, noise_var=noise_var, mean_function=mean_function)
def _default_kernel(self, x, y):
# Some heuristics to choose kernel parameters based on the initial data
length_scale = (np.max(self.bounds) - np.min(self.bounds)) / 3.
kernel_var = (np.max(y) / 3.)**2.
bias_var = kernel_var / 4.
# Construct a default kernel
kernel = GPy.kern.RBF(input_dim=self.input_dim)
# Set the priors
kernel.lengthscale.set_prior(
GPy.priors.Gamma.from_EV(length_scale, length_scale), warning=False)
kernel.variance.set_prior(GPy.priors.Gamma.from_EV(kernel_var, kernel_var), warning=False)
# If no mean function is specified, add a bias term to the kernel
if 'mean_function' not in self.gp_params:
bias = GPy.kern.Bias(input_dim=self.input_dim)
bias.set_prior(GPy.priors.Gamma.from_EV(bias_var, bias_var), warning=False)
kernel += bias
return kernel
def _make_gpy_instance(self, x, y, kernel, noise_var, mean_function):
return GPy.models.GPRegression(
X=x, Y=y, kernel=kernel, noise_var=noise_var, mean_function=mean_function)
def update(self, x, y, optimize=False):
"""Update the GP model with new data.
Parameters
----------
x : np.array
y : np.array
optimize : bool, optional
Whether to optimize hyperparameters.
"""
# Must cast these as 2d for GPy
x = x.reshape((-1, self.input_dim))
y = y.reshape((-1, 1))
if self._gp is None:
self._init_gp(x, y)
else:
# Reconstruct with new data
x = np.r_[self._gp.X, x]
y = np.r_[self._gp.Y, y]
# It seems that GPy will do some optimization unless you make copies of everything
kernel = self._gp.kern.copy() if self._gp.kern else None
noise_var = self._gp.Gaussian_noise.variance[0]
mean_function = self._gp.mean_function.copy() if self._gp.mean_function else None
self._gp = self._make_gpy_instance(
x, y, kernel=kernel, noise_var=noise_var, mean_function=mean_function)
if optimize:
self.optimize()
def optimize(self):
"""Optimize GP hyperparameters."""
logger.debug("Optimizing GP hyperparameters")
try:
self._gp.optimize(self.optimizer, max_iters=self.max_opt_iters)
except np.linalg.linalg.LinAlgError:
logger.warning("Numerical error in GP optimization. Stopping optimization")
@property
def n_evidence(self):
"""Return the number of observed samples."""
if self._gp is None:
return 0
return self._gp.num_data
@property
def X(self):
"""Return input evidence."""
return self._gp.X
@property
def Y(self):
"""Return output evidence."""
return self._gp.Y
def copy(self):
"""Return a copy of current instance."""
kopy = copy.copy(self)
if self._gp:
kopy._gp = self._gp.copy()
if 'kernel' in self.gp_params:
kopy.gp_params['kernel'] = self.gp_params['kernel'].copy()
if 'mean_function' in self.gp_params:
kopy.gp_params['mean_function'] = self.gp_params['mean_function'].copy()
return kopy
def __copy__(self):
"""Return a copy of current instance."""
return self.copy()
| lintusj1/elfi | elfi/methods/bo/gpy_regression.py | Python | bsd-3-clause | 12,384 |
import os
WAGTAIL_ROOT = os.path.dirname(__file__)
STATIC_ROOT = os.path.join(WAGTAIL_ROOT, 'test-static')
MEDIA_ROOT = os.path.join(WAGTAIL_ROOT, 'test-media')
MEDIA_URL = '/media/'
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('DATABASE_NAME', 'wagtail'),
'USER': os.environ.get('DATABASE_USER', None),
'PASSWORD': os.environ.get('DATABASE_PASS', None),
'HOST': os.environ.get('DATABASE_HOST', None),
'TEST': {
'NAME': os.environ.get('DATABASE_NAME', None),
}
}
}
SECRET_KEY = 'not needed'
ROOT_URLCONF = 'wagtail.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = STATIC_ROOT
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
USE_TZ = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'wagtail.tests.context_processors.do_not_use_static_url',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {
'extensions': [
'wagtail.wagtailcore.jinja2tags.core',
'wagtail.wagtailadmin.jinja2tags.userbar',
'wagtail.wagtailimages.jinja2tags.images',
],
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
INSTALLED_APPS = (
# Install wagtailredirects with its appconfig
# Theres nothing special about wagtailredirects, we just need to have one
# app which uses AppConfigs to test that hooks load properly
'wagtail.wagtailredirects.apps.WagtailRedirectsAppConfig',
'wagtail.tests.testapp',
'wagtail.tests.demosite',
'wagtail.tests.customuser',
'wagtail.tests.snippets',
'wagtail.tests.routablepage',
'wagtail.tests.search',
'wagtail.contrib.wagtailstyleguide',
'wagtail.contrib.wagtailsitemaps',
'wagtail.contrib.wagtailroutablepage',
'wagtail.contrib.wagtailfrontendcache',
'wagtail.contrib.wagtailapi',
'wagtail.contrib.wagtailsearchpromotions',
'wagtail.contrib.settings',
'wagtail.wagtailforms',
'wagtail.wagtailsearch',
'wagtail.wagtailembeds',
'wagtail.wagtailimages',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'taggit',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Using DatabaseCache to make sure that the cache is cleared between tests.
# This prevents false-positives in some wagtail core tests where we are
# changing the 'wagtail_root_paths' key which may cause future tests to fail.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cache',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher', # don't use the intentionally slow default password hasher
)
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.db',
}
}
AUTH_USER_MODEL = 'customuser.CustomUser'
if 'ELASTICSEARCH_URL' in os.environ:
WAGTAILSEARCH_BACKENDS['elasticsearch'] = {
'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch',
'URLS': [os.environ['ELASTICSEARCH_URL']],
'TIMEOUT': 10,
'max_retries': 1,
'AUTO_UPDATE': False,
}
WAGTAIL_SITE_NAME = "Test Site"
| inonit/wagtail | wagtail/tests/settings.py | Python | bsd-3-clause | 4,667 |
from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
strings = [
("2:00", "2:30"),
("0200", "0230"),
("2:00am", "2:30am"),
("0200am", "0230am"),
("2:00:00", "2:30:00"),
("020000", "023000"),
("2:00:00am", "2:30:00am"),
("020000am", "023000am"),
]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_between_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
ts_local = ts.tz_localize(tzstr)
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1, t2).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_between_time_types(self, frame_or_series):
# GH11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
obj = DataFrame({"A": 0}, index=rng)
obj = tm.get_obj(obj, frame_or_series)
msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
with pytest.raises(ValueError, match=msg):
obj.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
def test_between_time(self, inclusive_endpoints_fixture, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
stime = time(0, 0)
etime = time(1, 0)
inclusive = inclusive_endpoints_fixture
filtered = ts.between_time(stime, etime, inclusive=inclusive)
exp_len = 13 * 4 + 1
if inclusive in ["right", "neither"]:
exp_len -= 5
if inclusive in ["left", "neither"]:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inclusive in ["left", "both"]:
assert t >= stime
else:
assert t > stime
if inclusive in ["right", "both"]:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inclusive=inclusive)
exp_len = (12 * 11 + 1) * 4 + 1
if inclusive in ["right", "neither"]:
exp_len -= 4
if inclusive in ["left", "neither"]:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inclusive in ["left", "both"]:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inclusive in ["right", "both"]:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
obj = tm.get_obj(obj, frame_or_series)
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, frame_or_series):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
stime, etime = ("08:00:00", "09:00:00")
expected_length = 7
assert len(ts.between_time(stime, etime)) == expected_length
assert len(ts.between_time(stime, etime, axis=0)) == expected_length
msg = f"No axis named {ts.ndim} for object type {type(ts).__name__}"
with pytest.raises(ValueError, match=msg):
ts.between_time(stime, etime, axis=ts.ndim)
def test_between_time_axis_aliases(self, axis):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq="30min")
df = DataFrame(np.random.randn(len(index), 5), index=index)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.between_time(bkey.start, bkey.stop)
expected = df.loc[bkey]
expected2 = df.iloc[binds]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, expected2)
assert len(result) == 12
@pytest.mark.parametrize("include_start", [True, False])
@pytest.mark.parametrize("include_end", [True, False])
def test_between_time_warn(self, include_start, include_end, frame_or_series):
# GH40245
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
stime = time(0, 0)
etime = time(1, 0)
match = (
"`include_start` and `include_end` "
"are deprecated in favour of `inclusive`."
)
with tm.assert_produces_warning(FutureWarning, match=match):
_ = ts.between_time(stime, etime, include_start, include_end)
def test_between_time_incorr_arg_inclusive(self):
# GH40245
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inclusive = "bad_string"
msg = "Inclusive has to be either 'both', 'neither', 'left' or 'right'"
with pytest.raises(ValueError, match=msg):
ts.between_time(stime, etime, inclusive=inclusive)
@pytest.mark.parametrize(
"include_start, include_end", [(True, None), (True, True), (None, True)]
)
def test_between_time_incompatiable_args_given(self, include_start, include_end):
# GH40245
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
msg = (
"Deprecated arguments `include_start` and `include_end` cannot be "
"passed if `inclusive` has been given."
)
with pytest.raises(ValueError, match=msg):
ts.between_time(stime, etime, include_start, include_end, inclusive="left")
def test_between_time_same_functionality_old_and_new_args(self):
# GH40245
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
match = (
"`include_start` and `include_end` "
"are deprecated in favour of `inclusive`."
)
result = ts.between_time(stime, etime)
expected = ts.between_time(stime, etime, inclusive="both")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=match):
result = ts.between_time(stime, etime, include_start=False)
expected = ts.between_time(stime, etime, inclusive="right")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=match):
result = ts.between_time(stime, etime, include_end=False)
expected = ts.between_time(stime, etime, inclusive="left")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=match):
result = ts.between_time(
stime, etime, include_start=False, include_end=False
)
expected = ts.between_time(stime, etime, inclusive="neither")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=match):
result = ts.between_time(stime, etime, include_start=True, include_end=True)
expected = ts.between_time(stime, etime, inclusive="both")
tm.assert_frame_equal(result, expected)
| pandas-dev/pandas | pandas/tests/frame/methods/test_between_time.py | Python | bsd-3-clause | 10,811 |
from django.conf.urls import include, url
from django.views.generic import TemplateView
urlpatterns = [
url(r"^home/", TemplateView.as_view(template_name="no-ie.html"), name="home"),
url(r"^", include("formly.urls", namespace="formly")),
]
| eldarion/formly | formly/tests/urls.py | Python | bsd-3-clause | 249 |
Subsets and Splits