repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
v-iam/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/patch_route_filter_rule.py | 2 | 2974 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class PatchRouteFilterRule(SubResource):
"""Route Filter Rule Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param access: The access type of the rule. Valid values are: 'Allow',
'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or :class:`Access
<azure.mgmt.network.v2017_03_01.models.Access>`
:ivar route_filter_rule_type: The rule type of the rule. Valid value is:
'Community'. Default value: "Community" .
:vartype route_filter_rule_type: str
:param communities: The collection for bgp community values to filter on.
e.g. ['12076:5010','12076:5020']
:type communities: list of str
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
:param tags: Resource tags.
:type tags: dict
"""
_validation = {
'access': {'required': True},
'route_filter_rule_type': {'required': True, 'constant': True},
'communities': {'required': True},
'provisioning_state': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'route_filter_rule_type': {'key': 'properties.routeFilterRuleType', 'type': 'str'},
'communities': {'key': 'properties.communities', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
route_filter_rule_type = "Community"
def __init__(self, access, communities, id=None, tags=None):
super(PatchRouteFilterRule, self).__init__(id=id)
self.access = access
self.communities = communities
self.provisioning_state = None
self.name = None
self.etag = None
self.tags = tags
| mit | -2,235,292,810,136,055,600 | 38.653333 | 91 | 0.601547 | false |
sysown/proxysql-0.2 | docker/scenarios/repl1/test1_.py | 6 | 1806 | #!/usr/bin/env python
import MySQLdb
import os
import sys
import thread
import time
import signal
import threading
qcounter = 0
qcounter_lock = threading.Lock()
qOK = 0
qOK_lock = threading.Lock()
qERR = 0
qERR_lock = threading.Lock()
# config
mysqluser = os.getenv('MYSQL_USER')
mysqlpass = os.getenv('MYSQL_PASS')
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
sys.exit(0)
def print_exception(e):
line_number = sys.exc_info()[2].tb_lineno
print "Line: " + str(line_number)
print e
def thread_client_conn():
conn = None
try:
conn = MySQLdb.connect(host="127.0.0.1", port=6033, user=mysqluser, passwd=mysqlpass)
#conn = MySQLdb.connect(host="172.17.0.139", port=6033, user=mysqluser, passwd=mysqlpass)
cur = conn.cursor()
while True:
# for x in range(0, 100):
global qcounter
global qOK
global qERR
qcounter_lock.acquire()
qcounter += 1
x = qcounter
qcounter_lock.release()
time.sleep(1)
query = "SELECT " + str(x)
try:
cur.execute(query)
res = cur.fetchone()
# print res[0]
qOK_lock.acquire()
qOK += 1
qOK_lock.release()
except Exception, e:
qERR_lock.acquire()
qERR += 1
qERR_lock.release()
#print "Query failed"
except Exception, e:
print "Failed to connect"
print_exception(e)
finally:
if conn:
conn.close()
def main():
signal.signal(signal.SIGTERM, sigterm_handler)
if not mysqluser or not mysqlpass:
sys.exit("environment incorrectly configured; aborting!")
try:
threads = [threading.Thread(target=thread_client_conn) for t in range(10)]
for t in threads:
t.setDaemon(True)
t.start()
# for t in threads:
# t.join()
while True:
time.sleep(1)
finally:
print "Queries result. OK: " , qOK , " ERR: " , qERR
if __name__ == '__main__':
main()
| gpl-3.0 | 5,936,771,669,012,207,000 | 19.292135 | 91 | 0.661683 | false |
redhat-cip/hardware | hardware/diskinfo.py | 2 | 5601 | # Copyright (C) 2013-2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import sys
from hardware import detect_utils
from hardware import smart_utils
def sizeingb(size):
return int((size * 512) / (1000 * 1000 * 1000))
def disksize(name):
size = open('/sys/block/' + name + '/size').read(-1)
return sizeingb(int(size))
def disknames():
names = []
for name in os.listdir('/sys/block'):
if (name[1] == 'd' and name[0] in 'shv') or name.startswith('nvme'):
names.append(name)
return names
def get_disk_info(name, sizes, hw_lst):
hw_lst.append(('disk', name, 'size', str(sizes[name])))
info_list = ['device/vendor', 'device/model', 'device/rev',
'queue/optimal_io_size', 'queue/physical_block_size',
'queue/rotational', 'queue/nr_requests']
for info in info_list:
disk_sys_info = '/sys/block/%s/%s' % (name, info)
# revision can be explicitly named
if not os.path.exists(disk_sys_info) and info == 'device/rev':
info = 'device/revision'
# for nvme devices we can have a nested device dir
if not os.path.exists(disk_sys_info):
disk_sys_info = '/sys/block/%s/device/%s' % (name, info)
try:
with open(disk_sys_info, 'r') as dev:
hw_lst.append(('disk', name, info.split('/')[1],
dev.readline().rstrip('\n').strip()))
except Exception as exc:
sys.stderr.write(
'Failed retrieving disk information %s for %s: %s\n' % (
info, name, str(exc)))
try:
with open('/sys/block/%s/queue/scheduler' % name, 'r') as dev:
s_line = dev.readline().rstrip('\n').strip()
sched = re.findall(r'\[(.*?)\]', s_line)
if sched:
hw_lst.append(('disk', name, 'scheduler', sched[0]))
except Exception as exc:
sys.stderr.write('Cannot extract scheduler for disk %s: %s' % (
name, exc))
def get_disk_cache(name, hw_lst):
# WCE & RCD from sysfs
# https://www.kernel.org/doc/Documentation/scsi/sd-parameters.txt
device_path = '/sys/block/{0}/device'.format(name)
try:
_link_info = os.readlink(device_path)
_scsi_addr = _link_info.rsplit('/', 1)[1]
device_path = (device_path + '/scsi_disk/{0}/cache_type').format(
_scsi_addr)
with open(device_path, 'r') as cache_info:
my_text = cache_info.readline().rstrip('\n').strip()
_wce = '1'
_rcd = '0'
if my_text == 'write through':
_wce = '0'
elif my_text == 'none':
_wce = '0'
_rcd = '1'
elif 'daft' in my_text:
_rcd = '1'
hw_lst.append(('disk', name, 'Write Cache Enable', _wce))
hw_lst.append(('disk', name, 'Read Cache Disable', _rcd))
except Exception as exc:
sys.stderr.write(
'Failed at getting disk information at %s: %s\n' %
(device_path, str(exc)))
def get_disk_id(name, hw_lst):
# In some VMs, the disk-by id doesn't exists
if os.path.exists('/dev/disk/by-id/'):
for entry in os.listdir('/dev/disk/by-id/'):
idp = os.path.realpath('/dev/disk/by-id/' + entry).split('/')
if idp[-1] == name:
id_name = "id"
if entry.startswith('wwn'):
id_name = "wwn-id"
elif entry.startswith('scsi'):
id_name = "scsi-id"
hw_lst.append(('disk', name, id_name, entry))
def parse_hdparm_output(output):
res = output.split(' = ')
if len(res) != 2:
return 0.0
try:
mbsec = res[1].split(' ')[-2]
return float(mbsec)
except (ValueError, KeyError):
return 0.0
def diskperfs(names):
return dict((name, parse_hdparm_output(
detect_utils.cmd('hdparm -t /dev/%s' % name))) for name in names)
def disksizes(names):
return dict((name, disksize(name)) for name in names)
def detect():
"""Detect disks."""
hw_lst = []
names = disknames()
sizes = disksizes(names)
disks = [name for name, size in sizes.items() if size > 0]
hw_lst.append(('disk', 'logical', 'count', str(len(disks))))
for name in disks:
get_disk_info(name, sizes, hw_lst)
# nvme devices do not need standard cache mechanisms
if not name.startswith('nvme'):
get_disk_cache(name, hw_lst)
get_disk_id(name, hw_lst)
# smartctl support
# run only if smartctl command is there
if detect_utils.which("smartctl"):
if name.startswith('nvme'):
sys.stderr.write('Reading SMART for nvme\n')
smart_utils.read_smart_nvme(hw_lst, name)
else:
smart_utils.read_smart(hw_lst, "/dev/%s" % name)
else:
sys.stderr.write("Cannot find smartctl, exiting\n")
return hw_lst
| apache-2.0 | -6,860,660,782,328,157,000 | 33.361963 | 76 | 0.560436 | false |
eonpatapon/neutron | neutron/plugins/embrane/common/constants.py | 47 | 2749 | # Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heleosapi import exceptions as h_exc
from neutron.plugins.common import constants
# Router specific constants
UTIF_LIMIT = 7
QUEUE_TIMEOUT = 300
class Status(object):
# Transient
CREATING = constants.PENDING_CREATE
UPDATING = constants.PENDING_UPDATE
DELETING = constants.PENDING_DELETE
# Final
ACTIVE = constants.ACTIVE
ERROR = constants.ERROR
READY = constants.INACTIVE
DELETED = "DELETED" # not visible
class Events(object):
CREATE_ROUTER = "create_router"
UPDATE_ROUTER = "update_router"
DELETE_ROUTER = "delete_router"
GROW_ROUTER_IF = "grow_router_if"
SHRINK_ROUTER_IF = "shrink_router_if"
SET_NAT_RULE = "set_nat_rule"
RESET_NAT_RULE = "reset_nat_rule"
_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s")
_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, "
"probably was cancelled through the heleos UI")
_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s")
_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken "
"for reason %s")
_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s")
_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state "
"for reason %s")
_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s")
_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend "
"router for reason %s. Please remove "
"it manually through the heleos UI")
error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG,
h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG,
h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG,
h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG,
h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG,
h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG,
h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG,
h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG}
| apache-2.0 | -8,579,062,419,240,772,000 | 39.426471 | 78 | 0.656602 | false |
binghongcha08/pyQMD | GWP/2D/1.1.0/harm2.py | 5 | 1458 | ##!/usr/bin/python
import numpy as np
import pylab as plt
import subprocess
import seaborn as sns
#sns.set_context('poster')
plt.subplot(1,1,1)
t,dat0,dat1 = np.genfromtxt(fname='cor2.dat',usecols=(0,1,2),unpack=True)
#for x in range(1,data.shape[-1]):
#plt.plot(data[:,0],data[:,1],lw=2,label='Re')
#plt.plot(data[:,0],data[:,2],lw=2,label='Im')
#plt.plot(data[:,0],data[:,3],lw=2,label='$|C(t)|$')
#dat = np.genfromtxt(fname='../spo/1.0.2/corr')
#plt.plot(dat[:,0],dat[:,1],'--',label='Re, QM')
#plt.plot(dat[:,0],dat[:,2],'--',label='Im, QM')
#x = np.linspace(0,4,100)
#y = -np.sin(x)
#plt.plot(x,y,lw=2,label='sin(x)')
#plt.xlabel('$Time$')
#plt.ylabel('$C(t)$')
#dat0 = ' '.join(map(str, corr.tolist()))
#dat1 = ' '.join(map(str, cori.tolist()))
dat = ''
str1 = ''
str2 = ''
for i in range(0,len(dat0)):
dat = dat + str(dat0[i])+'+'+str(dat1[i])+'i '
# dat = dat+str(dat0[i])+' '
#plt.subplot(2,1,2)
#dat = np.genfromtxt(fname='/home/bing/gwp/spo_2d/1.0.0/cor1')
f = open('harm_cor2.dat', 'w')
f.write(str(dat))
dt = t[1]-t[0]
#cmd = 'harminv -t'+str(dt)+'0-3 < harm_cor.dat'
print('harminv -t 0.004 0-1 < harm_cor2.dat')
#subprocess.Popen("harminv -t 0.004 0-1 < harm_cor.dat",shell=True)
#plt.plot(dat[:,0],dat[:,2],'--',label='$\Im(C(t))$',lw=2)
#z = np.sqrt(data[:,1]**2+data[:,2]**2)
#plt.plot(data[:,0],z,label='$|C(t)|$',lw=1)
#plt.ylim(-0.2,0.2)
#plt.legend()
#plt.xlim(0,36)
#plt.savefig('cor.pdf')
#plt.show()
| gpl-3.0 | 7,410,507,648,625,719,000 | 26 | 74 | 0.579561 | false |
artwr/airflow | tests/sensors/test_s3_prefix_sensor.py | 7 | 1532 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import unittest
from airflow.sensors.s3_prefix_sensor import S3PrefixSensor
class S3PrefixSensorTests(unittest.TestCase):
@mock.patch('airflow.hooks.S3_hook.S3Hook')
def test_poke(self, mock_hook):
s = S3PrefixSensor(
task_id='s3_prefix',
bucket_name='bucket',
prefix='prefix')
mock_hook.return_value.check_for_prefix.return_value = False
self.assertFalse(s.poke(None))
mock_hook.return_value.check_for_prefix.assert_called_with(
prefix='prefix',
delimiter='/',
bucket_name='bucket')
mock_hook.return_value.check_for_prefix.return_value = True
self.assertTrue(s.poke(None))
| apache-2.0 | 5,546,866,808,866,343,000 | 34.627907 | 68 | 0.70235 | false |
jayvdb/coala | tests/coalaDebugTest.py | 4 | 4372 | import os
import sys
import unittest
from unittest.mock import patch
from coalib.coala_main import run_coala
from coalib.output.printers.LogPrinter import LogPrinter
from coalib import coala
from pyprint.ConsolePrinter import ConsolePrinter
from coala_utils.ContextManagers import prepare_file
from coalib.output.Logging import configure_logging
from tests.TestUtilities import execute_coala, bear_test_module
class coalaDebugTest(unittest.TestCase):
def setUp(self):
self.old_argv = sys.argv
def tearDown(self):
sys.argv = self.old_argv
def test_coala_main_bear__init__raises(self):
with bear_test_module():
with prepare_file(['#fixme '], None) as (lines, filename):
with self.assertRaisesRegex(RuntimeError,
r'^The bear ErrorTestBear does '
r'not fulfill all requirements\. '
r"'I_do_not_exist' is not "
r'installed\.$'):
execute_coala(
coala.main, 'coala',
'-c', os.devnull,
'-f', filename,
'-b', 'ErrorTestBear',
debug=True)
def test_run_coala_bear__init__raises(self):
configure_logging()
with bear_test_module():
with prepare_file(['#fixme '], None) as (lines, filename):
with self.assertRaisesRegex(RuntimeError,
r'^The bear ErrorTestBear does '
r'not fulfill all requirements\. '
r"'I_do_not_exist' is not "
r'installed\.$'):
run_coala(
console_printer=ConsolePrinter(),
log_printer=LogPrinter(),
arg_list=(
'-c', os.devnull,
'-f', filename,
'-b', 'ErrorTestBear'
),
debug=True)
def test_coala_main_bear_run_raises(self):
with bear_test_module():
with prepare_file(['#fixme '], None) as (lines, filename):
with self.assertRaisesRegex(RuntimeError,
r"^That's all the RaiseTestBear "
r'can do\.$'):
execute_coala(
coala.main, 'coala',
'-c', os.devnull,
'-f', filename,
'-b', 'RaiseTestBear',
debug=True)
def test_run_coala_bear_run_raises(self):
configure_logging()
with bear_test_module():
with prepare_file(['#fixme '], None) as (lines, filename):
with self.assertRaisesRegex(RuntimeError,
r"^That's all the RaiseTestBear "
r'can do\.$'):
run_coala(
console_printer=ConsolePrinter(),
log_printer=LogPrinter(),
arg_list=(
'-c', os.devnull,
'-f', filename,
'-b', 'RaiseTestBear'
),
debug=True)
@patch('coalib.coala_modes.mode_json')
def test_coala_main_mode_json_raises(self, mocked_mode_json):
mocked_mode_json.side_effect = RuntimeError('Mocked mode_json fails.')
with bear_test_module():
with prepare_file(['#fixme '], None) as (lines, filename):
with self.assertRaisesRegex(RuntimeError,
r'^Mocked mode_json fails\.$'):
# additionally use RaiseTestBear to verify independency from
# failing bears
execute_coala(
coala.main, 'coala', '--json',
'-c', os.devnull,
'-f', filename,
'-b', 'RaiseTestBear',
debug=True)
| agpl-3.0 | 2,853,897,202,652,480,000 | 41.038462 | 80 | 0.442818 | false |
googlefonts/TachyFont | run_time/src/gae_server/third_party/fonttools/Lib/fontTools/ttLib/tables/_g_a_s_p.py | 11 | 1689 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import struct
GASP_SYMMETRIC_GRIDFIT = 0x0004
GASP_SYMMETRIC_SMOOTHING = 0x0008
GASP_DOGRAY = 0x0002
GASP_GRIDFIT = 0x0001
class table__g_a_s_p(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
self.version, numRanges = struct.unpack(">HH", data[:4])
assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
data = data[4:]
self.gaspRange = {}
for i in range(numRanges):
rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
data = data[4:]
assert not data, "too much data"
def compile(self, ttFont):
version = 0 # ignore self.version
numRanges = len(self.gaspRange)
data = b""
items = sorted(self.gaspRange.items())
for rangeMaxPPEM, rangeGaspBehavior in items:
data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
version = 1
data = struct.pack(">HH", version, numRanges) + data
return data
def toXML(self, writer, ttFont):
items = sorted(self.gaspRange.items())
for rangeMaxPPEM, rangeGaspBehavior in items:
writer.simpletag("gaspRange", [
("rangeMaxPPEM", rangeMaxPPEM),
("rangeGaspBehavior", rangeGaspBehavior)])
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name != "gaspRange":
return
if not hasattr(self, "gaspRange"):
self.gaspRange = {}
self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"])
| apache-2.0 | 7,639,828,070,683,123,000 | 31.480769 | 88 | 0.708703 | false |
qilicun/python | python2/PyMOTW-1.132/PyMOTW/BaseHTTPServer/BaseHTTPServer_errors.py | 1 | 1531 | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""GET handler with BaseHTTPServer illustrating error handling
"""
__version__ = "$Id$"
#end_pymotw_header
from BaseHTTPServer import BaseHTTPRequestHandler
class ErrorHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_error(404)
return
if __name__ == '__main__':
from BaseHTTPServer import HTTPServer
server = HTTPServer(('localhost', 8080), ErrorHandler)
print 'Starting server, use <Ctrl-C> to stop'
server.serve_forever()
| gpl-3.0 | 3,415,180,245,810,100,000 | 32.282609 | 70 | 0.743958 | false |
sileht/deb-openstack-quantum | quantum/tests/unit/test_flags.py | 3 | 7245 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gflags
import os
import tempfile
import unittest
from quantum.common import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only')
class FlagsTestCase(unittest.TestCase):
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
setattr(FLAGS, k, v)
def reset_flags(self):
"""Resets all flag variables for the test.
Runs after each test.
"""
FLAGS.Reset()
for k, v in self._original_flags.iteritems():
setattr(FLAGS, k, v)
def setUp(self):
super(FlagsTestCase, self).setUp()
self.FLAGS = flags.FlagValues()
self.global_FLAGS = flags.FLAGS
self._original_flags = FLAGS.FlagValuesDict()
def test_define(self):
self.assert_('string' not in self.FLAGS)
self.assert_('int' not in self.FLAGS)
self.assert_('false' not in self.FLAGS)
self.assert_('true' not in self.FLAGS)
flags.DEFINE_string('string', 'default', 'desc',
flag_values=self.FLAGS)
flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS)
self.assert_(self.FLAGS['string'])
self.assert_(self.FLAGS['int'])
self.assert_(self.FLAGS['false'])
self.assert_(self.FLAGS['true'])
self.assertEqual(self.FLAGS.string, 'default')
self.assertEqual(self.FLAGS.int, 1)
self.assertEqual(self.FLAGS.false, False)
self.assertEqual(self.FLAGS.true, True)
argv = ['flags_test',
'--string', 'foo',
'--int', '2',
'--false',
'--notrue']
self.FLAGS(argv)
self.assertEqual(self.FLAGS.string, 'foo')
self.assertEqual(self.FLAGS.int, 2)
self.assertEqual(self.FLAGS.false, True)
self.assertEqual(self.FLAGS.true, False)
def test_define_float(self):
flags.DEFINE_float('float', 6.66, 'desc', flag_values=self.FLAGS)
self.assertEqual(self.FLAGS.float, 6.66)
def test_define_multistring(self):
flags.DEFINE_multistring('multi', [], 'desc', flag_values=self.FLAGS)
argv = ['flags_test', '--multi', 'foo', '--multi', 'bar']
self.FLAGS(argv)
self.assertEqual(self.FLAGS.multi, ['foo', 'bar'])
def test_define_list(self):
flags.DEFINE_list('list', ['foo'], 'desc', flag_values=self.FLAGS)
self.assert_(self.FLAGS['list'])
self.assertEqual(self.FLAGS.list, ['foo'])
argv = ['flags_test', '--list=a,b,c,d']
self.FLAGS(argv)
self.assertEqual(self.FLAGS.list, ['a', 'b', 'c', 'd'])
def test_error(self):
flags.DEFINE_integer('error', 1, 'desc', flag_values=self.FLAGS)
self.assertEqual(self.FLAGS.error, 1)
argv = ['flags_test', '--error=foo']
self.assertRaises(gflags.IllegalFlagValue, self.FLAGS, argv)
def test_declare(self):
self.assert_('answer' not in self.global_FLAGS)
flags.DECLARE('answer', 'quantum.tests.unit.declare_flags')
self.assert_('answer' in self.global_FLAGS)
self.assertEqual(self.global_FLAGS.answer, 42)
# Make sure we don't overwrite anything
self.global_FLAGS.answer = 256
self.assertEqual(self.global_FLAGS.answer, 256)
flags.DECLARE('answer', 'quantum.tests.unit.declare_flags')
self.assertEqual(self.global_FLAGS.answer, 256)
def test_getopt_non_interspersed_args(self):
self.assert_('runtime_answer' not in self.global_FLAGS)
argv = ['flags_test', 'extra_arg', '--runtime_answer=60']
args = self.global_FLAGS(argv)
self.assertEqual(len(args), 3)
self.assertEqual(argv, args)
def test_runtime_and_unknown_flags(self):
self.assert_('runtime_answer' not in self.global_FLAGS)
argv = ['flags_test', '--runtime_answer=60', 'extra_arg']
args = self.global_FLAGS(argv)
self.assertEqual(len(args), 2)
self.assertEqual(args[1], 'extra_arg')
self.assert_('runtime_answer' not in self.global_FLAGS)
import quantum.tests.unit.runtime_flags
self.assert_('runtime_answer' in self.global_FLAGS)
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
def test_flag_overrides(self):
self.assertEqual(FLAGS.flags_unittest, 'foo')
self.flags(flags_unittest='bar')
self.assertEqual(FLAGS.flags_unittest, 'bar')
self.assertEqual(FLAGS['flags_unittest'].value, 'bar')
self.assertEqual(FLAGS.FlagValuesDict()['flags_unittest'], 'bar')
self.reset_flags()
self.assertEqual(FLAGS.flags_unittest, 'foo')
self.assertEqual(FLAGS['flags_unittest'].value, 'foo')
self.assertEqual(FLAGS.FlagValuesDict()['flags_unittest'], 'foo')
def test_flagfile(self):
flags.DEFINE_string('string', 'default', 'desc',
flag_values=self.FLAGS)
flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS)
(fd, path) = tempfile.mkstemp(prefix='nova', suffix='.flags')
try:
os.write(fd, '--string=foo\n--int=2\n--false\n--notrue\n')
os.close(fd)
self.FLAGS(['flags_test', '--flagfile=' + path])
self.assertEqual(self.FLAGS.string, 'foo')
self.assertEqual(self.FLAGS.int, 2)
self.assertEqual(self.FLAGS.false, True)
self.assertEqual(self.FLAGS.true, False)
finally:
os.remove(path)
def test_defaults(self):
flags.DEFINE_string('foo', 'bar', 'help', flag_values=self.FLAGS)
self.assertEqual(self.FLAGS.foo, 'bar')
self.FLAGS['foo'].SetDefault('blaa')
self.assertEqual(self.FLAGS.foo, 'blaa')
def test_templated_values(self):
flags.DEFINE_string('foo', 'foo', 'help', flag_values=self.FLAGS)
flags.DEFINE_string('bar', 'bar', 'help', flag_values=self.FLAGS)
flags.DEFINE_string('blaa', '$foo$bar', 'help', flag_values=self.FLAGS)
self.assertEqual(self.FLAGS.blaa, 'foobar')
| apache-2.0 | -629,454,031,431,070,500 | 35.77665 | 79 | 0.621394 | false |
Yukarumya/Yukarum-Redfoxes | testing/mozharness/configs/single_locale/linux.py | 2 | 5605 | import os
config = {
"platform": "linux",
"stage_product": "firefox",
"update_platform": "Linux_x86-gcc3",
"mozconfig": "%(branch)s/browser/config/mozconfigs/linux32/l10n-mozconfig",
"bootstrap_env": {
"MOZ_OBJDIR": "obj-l10n",
"EN_US_BINARY_URL": "%(en_us_binary_url)s",
"LOCALE_MERGEDIR": "%(abs_merge_dir)s/",
"MOZ_UPDATE_CHANNEL": "%(update_channel)s",
"DIST": "%(abs_objdir)s",
"LOCALE_MERGEDIR": "%(abs_merge_dir)s/",
"L10NBASEDIR": "../../l10n",
"MOZ_MAKE_COMPLETE_MAR": "1",
'TOOLTOOL_CACHE': '/builds/tooltool_cache',
'TOOLTOOL_HOME': '/builds',
'EN_US_PACKAGE_NAME': 'target.tar.bz2',
},
"ssh_key_dir": "/home/mock_mozilla/.ssh",
"log_name": "single_locale",
"objdir": "obj-l10n",
"js_src_dir": "js/src",
"vcs_share_base": "/builds/hg-shared",
# tooltool
'tooltool_url': 'https://api.pub.build.mozilla.org/tooltool/',
'tooltool_script': ["/builds/tooltool.py"],
'tooltool_bootstrap': "setup.sh",
'tooltool_manifest_src': 'browser/config/tooltool-manifests/linux32/releng.manifest',
# balrog credential file:
'balrog_credentials_file': 'oauth.txt',
# l10n
"ignore_locales": ["en-US", "ja-JP-mac"],
"l10n_dir": "l10n",
"locales_file": "%(branch)s/browser/locales/all-locales",
"locales_dir": "browser/locales",
"hg_l10n_tag": "default",
"merge_locales": True,
# MAR
"previous_mar_dir": "dist/previous",
"current_mar_dir": "dist/current",
"update_mar_dir": "dist/update", # sure?
"previous_mar_filename": "previous.mar",
"current_work_mar_dir": "current.work",
"package_base_dir": "dist/l10n-stage",
"application_ini": "application.ini",
"buildid_section": 'App',
"buildid_option": "BuildID",
"unpack_script": "tools/update-packaging/unwrap_full_update.pl",
"incremental_update_script": "tools/update-packaging/make_incremental_update.sh",
"balrog_release_pusher_script": "scripts/updates/balrog-release-pusher.py",
"update_packaging_dir": "tools/update-packaging",
"local_mar_tool_dir": "dist/host/bin",
"mar": "mar",
"mbsdiff": "mbsdiff",
"current_mar_filename": "firefox-%(version)s.%(locale)s.linux-i686.complete.mar",
"complete_mar": "firefox-%(version)s.en-US.linux-i686.complete.mar",
"localized_mar": "firefox-%(version)s.%(locale)s.linux-i686.complete.mar",
"partial_mar": "firefox-%(version)s.%(locale)s.linux-i686.partial.%(from_buildid)s-%(to_buildid)s.mar",
'installer_file': "firefox-%(version)s.en-US.linux-i686.tar.bz2",
# Mock
'mock_target': 'mozilla-centos6-x86_64',
'mock_packages': [
'autoconf213', 'python', 'mozilla-python27', 'zip', 'mozilla-python27-mercurial',
'git', 'ccache', 'perl-Test-Simple', 'perl-Config-General',
'yasm', 'wget',
'mpfr', # required for system compiler
'xorg-x11-font*', # fonts required for PGO
'imake', # required for makedepend!?!
### <-- from releng repo
'gcc45_0moz3', 'gcc454_0moz1', 'gcc472_0moz1', 'gcc473_0moz1',
'yasm', 'ccache',
###
'valgrind',
######## 32 bit specific ###########
'glibc-static.i686', 'libstdc++-static.i686',
'gtk2-devel.i686', 'libnotify-devel.i686',
'alsa-lib-devel.i686', 'libcurl-devel.i686',
'wireless-tools-devel.i686', 'libX11-devel.i686',
'libXt-devel.i686', 'mesa-libGL-devel.i686',
'gnome-vfs2-devel.i686', 'GConf2-devel.i686',
'pulseaudio-libs-devel.i686',
'gstreamer-devel.i686', 'gstreamer-plugins-base-devel.i686',
# Packages already installed in the mock environment, as x86_64
# packages.
'glibc-devel.i686', 'libgcc.i686', 'libstdc++-devel.i686',
# yum likes to install .x86_64 -devel packages that satisfy .i686
# -devel packages dependencies. So manually install the dependencies
# of the above packages.
'ORBit2-devel.i686', 'atk-devel.i686', 'cairo-devel.i686',
'check-devel.i686', 'dbus-devel.i686', 'dbus-glib-devel.i686',
'fontconfig-devel.i686', 'glib2-devel.i686',
'hal-devel.i686', 'libICE-devel.i686', 'libIDL-devel.i686',
'libSM-devel.i686', 'libXau-devel.i686', 'libXcomposite-devel.i686',
'libXcursor-devel.i686', 'libXdamage-devel.i686',
'libXdmcp-devel.i686', 'libXext-devel.i686', 'libXfixes-devel.i686',
'libXft-devel.i686', 'libXi-devel.i686', 'libXinerama-devel.i686',
'libXrandr-devel.i686', 'libXrender-devel.i686',
'libXxf86vm-devel.i686', 'libdrm-devel.i686', 'libidn-devel.i686',
'libpng-devel.i686', 'libxcb-devel.i686', 'libxml2-devel.i686',
'pango-devel.i686', 'perl-devel.i686', 'pixman-devel.i686',
'zlib-devel.i686',
# Freetype packages need to be installed be version, because a newer
# version is available, but we don't want it for Firefox builds.
'freetype-2.3.11-6.el6_1.8.i686',
'freetype-devel-2.3.11-6.el6_1.8.i686',
'freetype-2.3.11-6.el6_1.8.x86_64',
######## 32 bit specific ###########
],
'mock_files': [
('/home/cltbld/.ssh', '/home/mock_mozilla/.ssh'),
('/home/cltbld/.hgrc', '/builds/.hgrc'),
('/home/cltbld/.boto', '/builds/.boto'),
('/builds/gapi.data', '/builds/gapi.data'),
('/builds/relengapi.tok', '/builds/relengapi.tok'),
('/tools/tooltool.py', '/builds/tooltool.py'),
('/usr/local/lib/hgext', '/usr/local/lib/hgext'),
],
}
| mpl-2.0 | -5,740,142,525,272,263,000 | 44.201613 | 107 | 0.603568 | false |
jeremiahyan/odoo | addons/website_event_track/controllers/event_track.py | 1 | 25617 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from ast import literal_eval
from datetime import timedelta
from pytz import timezone, utc
from werkzeug.exceptions import Forbidden, NotFound
import babel
import babel.dates
import base64
import pytz
from odoo import exceptions, http, fields, tools, _
from odoo.http import request
from odoo.osv import expression
from odoo.tools import is_html_empty, plaintext2html
from odoo.tools.misc import babel_locale_parse
class EventTrackController(http.Controller):
def _get_event_tracks_base_domain(self, event):
""" Base domain for displaying tracks. Restrict to accepted or published
tracks for people not managing events. Unpublished tracks may be displayed
but not reachable for teasing purpose. """
search_domain_base = [
('event_id', '=', event.id),
]
if not request.env.user.has_group('event.group_event_registration_desk'):
search_domain_base = expression.AND([
search_domain_base,
['|', ('is_published', '=', True), ('is_accepted', '=', True)]
])
return search_domain_base
# ------------------------------------------------------------
# TRACK LIST VIEW
# ------------------------------------------------------------
@http.route([
'''/event/<model("event.event"):event>/track''',
'''/event/<model("event.event"):event>/track/tag/<model("event.track.tag"):tag>'''
], type='http', auth="public", website=True, sitemap=False)
def event_tracks(self, event, tag=None, **searches):
""" Main route
:param event: event whose tracks are about to be displayed;
:param tag: deprecated: search for a specific tag
:param searches: frontend search dict, containing
* 'search': search string;
* 'tags': list of tag IDs for filtering;
"""
return request.render(
"website_event_track.tracks_session",
self._event_tracks_get_values(event, tag=tag, **searches)
)
def _event_tracks_get_values(self, event, tag=None, **searches):
# init and process search terms
searches.setdefault('search', '')
searches.setdefault('search_wishlist', '')
searches.setdefault('tags', '')
search_domain = self._get_event_tracks_base_domain(event)
# search on content
if searches.get('search'):
search_domain = expression.AND([
search_domain,
[('name', 'ilike', searches['search'])]
])
# search on tags
search_tags = self._get_search_tags(searches['tags'])
if not search_tags and tag: # backward compatibility
search_tags = tag
if search_tags:
# Example: You filter on age: 10-12 and activity: football.
# Doing it this way allows to only get events who are tagged "age: 10-12" AND "activity: football".
# Add another tag "age: 12-15" to the search and it would fetch the ones who are tagged:
# ("age: 10-12" OR "age: 12-15") AND "activity: football
grouped_tags = dict()
for search_tag in search_tags:
grouped_tags.setdefault(search_tag.category_id, list()).append(search_tag)
search_domain_items = [
[('tag_ids', 'in', [tag.id for tag in grouped_tags[group]])]
for group in grouped_tags
]
search_domain = expression.AND([
search_domain,
*search_domain_items
])
# fetch data to display with TZ set for both event and tracks
now_tz = utc.localize(fields.Datetime.now().replace(microsecond=0), is_dst=False).astimezone(timezone(event.date_tz))
today_tz = now_tz.date()
event = event.with_context(tz=event.date_tz or 'UTC')
tracks_sudo = event.env['event.track'].sudo().search(search_domain, order='date asc')
tag_categories = request.env['event.track.tag.category'].sudo().search([])
# filter on wishlist (as post processing due to costly search on is_reminder_on)
if searches.get('search_wishlist'):
tracks_sudo = tracks_sudo.filtered(lambda track: track.is_reminder_on)
# organize categories for display: announced, live, soon and day-based
tracks_announced = tracks_sudo.filtered(lambda track: not track.date)
tracks_wdate = tracks_sudo - tracks_announced
date_begin_tz_all = list(set(
dt.date()
for dt in self._get_dt_in_event_tz(tracks_wdate.mapped('date'), event)
))
date_begin_tz_all.sort()
tracks_sudo_live = tracks_wdate.filtered(lambda track: track.is_published and track.is_track_live)
tracks_sudo_soon = tracks_wdate.filtered(lambda track: track.is_published and not track.is_track_live and track.is_track_soon)
tracks_by_day = []
for display_date in date_begin_tz_all:
matching_tracks = tracks_wdate.filtered(lambda track: self._get_dt_in_event_tz([track.date], event)[0].date() == display_date)
tracks_by_day.append({'date': display_date, 'name': display_date, 'tracks': matching_tracks})
if tracks_announced:
tracks_announced = tracks_announced.sorted('wishlisted_by_default', reverse=True)
tracks_by_day.append({'date': False, 'name': _('Coming soon'), 'tracks': tracks_announced})
for tracks_group in tracks_by_day:
# the tracks group is folded if all tracks are done (and if it's not "today")
tracks_group['default_collapsed'] = (today_tz != tracks_group['date']) and all(
track.is_track_done and not track.is_track_live
for track in tracks_group['tracks']
)
# return rendering values
return {
# event information
'event': event,
'main_object': event,
# tracks display information
'tracks': tracks_sudo,
'tracks_by_day': tracks_by_day,
'tracks_live': tracks_sudo_live,
'tracks_soon': tracks_sudo_soon,
'today_tz': today_tz,
# search information
'searches': searches,
'search_key': searches['search'],
'search_wishlist': searches['search_wishlist'],
'search_tags': search_tags,
'tag_categories': tag_categories,
# environment
'is_html_empty': is_html_empty,
'hostname': request.httprequest.host.split(':')[0],
'is_event_user': request.env.user.has_group('event.group_event_user'),
}
# ------------------------------------------------------------
# AGENDA VIEW
# ------------------------------------------------------------
@http.route(['''/event/<model("event.event"):event>/agenda'''], type='http', auth="public", website=True, sitemap=False)
def event_agenda(self, event, tag=None, **post):
event = event.with_context(tz=event.date_tz or 'UTC')
vals = {
'event': event,
'main_object': event,
'tag': tag,
'is_event_user': request.env.user.has_group('event.group_event_user'),
}
vals.update(self._prepare_calendar_values(event))
return request.render("website_event_track.agenda_online", vals)
def _prepare_calendar_values(self, event):
"""
Override that should completely replace original method in v14.
This methods slit the day (max end time - min start time) into 15 minutes time slots.
For each time slot, we assign the tracks that start at this specific time slot, and we add the number
of time slot that the track covers (track duration / 15 min)
The calendar will be divided into rows of 15 min, and the talks will cover the corresponding number of rows
(15 min slots).
"""
event = event.with_context(tz=event.date_tz or 'UTC')
local_tz = pytz.timezone(event.date_tz or 'UTC')
lang_code = request.env.context.get('lang')
event_track_ids = self._event_agenda_get_tracks(event)
locations = list(set(track.location_id for track in event_track_ids))
locations.sort(key=lambda x: x.id)
# First split day by day (based on start time)
time_slots_by_tracks = {track: self._split_track_by_days(track, local_tz) for track in event_track_ids}
# extract all the tracks time slots
track_time_slots = set().union(*(time_slot.keys() for time_slot in [time_slots for time_slots in time_slots_by_tracks.values()]))
# extract unique days
days = list(set(time_slot.date() for time_slot in track_time_slots))
days.sort()
# Create the dict that contains the tracks at the correct time_slots / locations coordinates
tracks_by_days = dict.fromkeys(days, 0)
time_slots_by_day = dict((day, dict(start=set(), end=set())) for day in days)
tracks_by_rounded_times = dict((time_slot, dict((location, {}) for location in locations)) for time_slot in track_time_slots)
for track, time_slots in time_slots_by_tracks.items():
start_date = fields.Datetime.from_string(track.date).replace(tzinfo=pytz.utc).astimezone(local_tz)
end_date = start_date + timedelta(hours=(track.duration or 0.25))
for time_slot, duration in time_slots.items():
tracks_by_rounded_times[time_slot][track.location_id][track] = {
'rowspan': duration, # rowspan
'start_date': self._get_locale_time(start_date, lang_code),
'end_date': self._get_locale_time(end_date, lang_code),
'occupied_cells': self._get_occupied_cells(track, duration, locations, local_tz)
}
# get all the time slots by day to determine the max duration of a day.
day = time_slot.date()
time_slots_by_day[day]['start'].add(time_slot)
time_slots_by_day[day]['end'].add(time_slot+timedelta(minutes=15*duration))
tracks_by_days[day] += 1
# split days into 15 minutes time slots
global_time_slots_by_day = dict((day, {}) for day in days)
for day, time_slots in time_slots_by_day.items():
start_time_slot = min(time_slots['start'])
end_time_slot = max(time_slots['end'])
time_slots_count = int(((end_time_slot - start_time_slot).total_seconds() / 3600) * 4)
current_time_slot = start_time_slot
for i in range(0, time_slots_count + 1):
global_time_slots_by_day[day][current_time_slot] = tracks_by_rounded_times.get(current_time_slot, {})
global_time_slots_by_day[day][current_time_slot]['formatted_time'] = self._get_locale_time(current_time_slot, lang_code)
current_time_slot = current_time_slot + timedelta(minutes=15)
# count the number of tracks by days
tracks_by_days = dict.fromkeys(days, 0)
for track in event_track_ids:
track_day = fields.Datetime.from_string(track.date).replace(tzinfo=pytz.utc).astimezone(local_tz).date()
tracks_by_days[track_day] += 1
return {
'days': days,
'tracks_by_days': tracks_by_days,
'time_slots': global_time_slots_by_day,
'locations': locations
}
def _event_agenda_get_tracks(self, event):
tracks_sudo = event.sudo().track_ids.filtered(lambda track: track.date)
if not request.env.user.has_group('event.group_event_manager'):
tracks_sudo = tracks_sudo.filtered(lambda track: track.is_published or track.stage_id.is_accepted)
return tracks_sudo
def _get_locale_time(self, dt_time, lang_code):
""" Get locale time from datetime object
:param dt_time: datetime object
:param lang_code: language code (eg. en_US)
"""
locale = babel_locale_parse(lang_code)
return babel.dates.format_time(dt_time, format='short', locale=locale)
def time_slot_rounder(self, time, rounded_minutes):
""" Rounds to nearest hour by adding a timedelta hour if minute >= rounded_minutes
E.g. : If rounded_minutes = 15 -> 09:26:00 becomes 09:30:00
09:17:00 becomes 09:15:00
"""
return (time.replace(second=0, microsecond=0, minute=0, hour=time.hour)
+ timedelta(minutes=rounded_minutes * (time.minute // rounded_minutes)))
def _split_track_by_days(self, track, local_tz):
"""
Based on the track start_date and the duration,
split the track duration into :
start_time by day : number of time slot (15 minutes) that the track takes on that day.
E.g. : start date = 01-01-2000 10:00 PM and duration = 3 hours
return {
01-01-2000 10:00:00 PM: 8 (2 * 4),
01-02-2000 00:00:00 AM: 4 (1 * 4)
}
Also return a set of all the time slots
"""
start_date = fields.Datetime.from_string(track.date).replace(tzinfo=pytz.utc).astimezone(local_tz)
start_datetime = self.time_slot_rounder(start_date, 15)
end_datetime = self.time_slot_rounder(start_datetime + timedelta(hours=(track.duration or 0.25)), 15)
time_slots_count = int(((end_datetime - start_datetime).total_seconds() / 3600) * 4)
time_slots_by_day_start_time = {start_datetime: 0}
for i in range(0, time_slots_count):
# If the new time slot is still on the current day
next_day = (start_datetime + timedelta(days=1)).date()
if (start_datetime + timedelta(minutes=15*i)).date() <= next_day:
time_slots_by_day_start_time[start_datetime] += 1
else:
start_datetime = next_day.datetime()
time_slots_by_day_start_time[start_datetime] = 0
return time_slots_by_day_start_time
def _get_occupied_cells(self, track, rowspan, locations, local_tz):
"""
In order to use only once the cells that the tracks will occupy, we need to reserve those cells
(time_slot, location) coordinate. Those coordinated will be given to the template to avoid adding
blank cells where already occupied by a track.
"""
occupied_cells = []
start_date = fields.Datetime.from_string(track.date).replace(tzinfo=pytz.utc).astimezone(local_tz)
start_date = self.time_slot_rounder(start_date, 15)
for i in range(0, rowspan):
time_slot = start_date + timedelta(minutes=15*i)
if track.location_id:
occupied_cells.append((time_slot, track.location_id))
# when no location, reserve all locations
else:
occupied_cells += [(time_slot, location) for location in locations if location]
return occupied_cells
# ------------------------------------------------------------
# TRACK PAGE VIEW
# ------------------------------------------------------------
@http.route('''/event/<model("event.event", "[('website_track', '=', True)]"):event>/track/<model("event.track", "[('event_id', '=', event.id)]"):track>''',
type='http', auth="public", website=True, sitemap=True)
def event_track_page(self, event, track, **options):
track = self._fetch_track(track.id, allow_is_accepted=False)
return request.render(
"website_event_track.event_track_main",
self._event_track_page_get_values(event, track.sudo(), **options)
)
def _event_track_page_get_values(self, event, track, **options):
track = track.sudo()
option_widescreen = options.get('widescreen', False)
option_widescreen = bool(option_widescreen) if option_widescreen != '0' else False
# search for tracks list
tracks_other = track._get_track_suggestions(
restrict_domain=self._get_event_tracks_base_domain(track.event_id),
limit=10
)
return {
# event information
'event': event,
'main_object': track,
'track': track,
# sidebar
'tracks_other': tracks_other,
# options
'option_widescreen': option_widescreen,
# environment
'is_html_empty': is_html_empty,
'hostname': request.httprequest.host.split(':')[0],
'is_event_user': request.env.user.has_group('event.group_event_user'),
}
@http.route("/event/track/toggle_reminder", type="json", auth="public", website=True)
def track_reminder_toggle(self, track_id, set_reminder_on):
""" Set a reminder a track for current visitor. Track visitor is created or updated
if it already exists. Exception made if un-favoriting and no track_visitor
record found (should not happen unless manually done).
:param boolean set_reminder_on:
If True, set as a favorite, otherwise un-favorite track;
If the track is a Key Track (wishlisted_by_default):
if set_reminder_on = False, blacklist the track_partner
otherwise, un-blacklist the track_partner
"""
track = self._fetch_track(track_id, allow_is_accepted=True)
force_create = set_reminder_on or track.wishlisted_by_default
event_track_partner = track._get_event_track_visitors(force_create=force_create)
visitor_sudo = event_track_partner.visitor_id
if not track.wishlisted_by_default:
if not event_track_partner or event_track_partner.is_wishlisted == set_reminder_on: # ignore if new state = old state
return {'error': 'ignored'}
event_track_partner.is_wishlisted = set_reminder_on
else:
if not event_track_partner or event_track_partner.is_blacklisted != set_reminder_on: # ignore if new state = old state
return {'error': 'ignored'}
event_track_partner.is_blacklisted = not set_reminder_on
result = {'reminderOn': set_reminder_on}
if request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token:
result['visitor_uuid'] = visitor_sudo.access_token
return result
# ------------------------------------------------------------
# TRACK PROPOSAL
# ------------------------------------------------------------
@http.route(['''/event/<model("event.event"):event>/track_proposal'''], type='http', auth="public", website=True, sitemap=False)
def event_track_proposal(self, event, **post):
return request.render("website_event_track.event_track_proposal", {'event': event, 'main_object': event})
@http.route(['''/event/<model("event.event"):event>/track_proposal/post'''], type='http', auth="public", methods=['POST'], website=True)
def event_track_proposal_post(self, event, **post):
if not event.can_access_from_current_website():
raise NotFound()
# Only accept existing tag indices. Use search instead of browse + exists:
# this prevents users to register colorless tags if not allowed to (ACL).
input_tag_indices = [int(tag_id) for tag_id in post['tags'].split(',') if tag_id]
valid_tag_indices = request.env['event.track.tag'].search([('id', 'in', input_tag_indices)]).ids
contact = request.env['res.partner']
visitor_partner = request.env['website.visitor']._get_visitor_from_request().partner_id
# Contact name is required. Therefore, empty contacts are not considered here. At least one of contact_phone
# and contact_email must be filled. Email is verified. If the post tries to create contact with no valid entry,
# raise exception. If normalized email is the same as logged partner, use its partner_id on track instead.
# This prevents contact duplication. Otherwise, create new contact with contact additional info of post.
if post.get('add_contact_information'):
valid_contact_email = tools.email_normalize(post.get('contact_email'))
# Here, the phone is not formatted. To format it, one needs a country. Based on a country, from geoip for instance.
# The problem is that one could propose a track in country A with phone number of country B. Validity is therefore
# quite tricky. We accept any format of contact_phone. Could be improved with select country phone widget.
if valid_contact_email or post.get('contact_phone'):
if visitor_partner and valid_contact_email == visitor_partner.email_normalized:
contact = visitor_partner
else:
contact = request.env['res.partner'].sudo().create({
'email': valid_contact_email,
'name': post.get('contact_name'),
'phone': post.get('contact_phone'),
})
else:
raise exceptions.ValidationError(_("Format Error : please enter a valid contact phone or contact email."))
# If the speaker email is the same as logged user's, then also uses its partner on track, same as above.
else:
valid_speaker_email = tools.email_normalize(post['partner_email'])
if visitor_partner and valid_speaker_email == visitor_partner.email_normalized:
contact = visitor_partner
track = request.env['event.track'].with_context({'mail_create_nosubscribe': True}).sudo().create({
'name': post['track_name'],
'partner_id': contact.id,
'partner_name': post['partner_name'],
'partner_email': post['partner_email'],
'partner_phone': post['partner_phone'],
'partner_function': post['partner_function'],
'contact_phone': contact.phone,
'contact_email': contact.email,
'event_id': event.id,
'tag_ids': [(6, 0, valid_tag_indices)],
'description': plaintext2html(post['description']),
'partner_biography': plaintext2html(post['partner_biography']),
'user_id': False,
'image': base64.b64encode(post['image'].read()) if post.get('image') else False,
})
if request.env.user != request.website.user_id:
track.sudo().message_subscribe(partner_ids=request.env.user.partner_id.ids)
return request.redirect('/event/%s/track_proposal/success/%s' % (event.id, track.id))
@http.route(['/event/<model("event.event"):event>/track_proposal/success/<int:track_id>'], type='http', auth="public", methods=['GET'], website=True, sitemap=False)
def event_track_proposal_success(self, event, track_id):
track = request.env['event.track'].sudo().search([
('id', '=', track_id),
('partner_id', '=', request.env['website.visitor']._get_visitor_from_request().partner_id.id),
('event_id', '=', event.id),
])
if not event.can_access_from_current_website() or not track:
raise NotFound()
return request.render("website_event_track.event_track_proposal", {'track': track, 'event': event})
# ACL : This route is necessary since rpc search_read method in js is not accessible to all users (e.g. public user).
@http.route(['''/event/track_tag/search_read'''], type='json', auth="public", website=True)
def website_event_track_fetch_tags(self, domain, fields):
return request.env['event.track.tag'].search_read(domain, fields)
# ------------------------------------------------------------
# TOOLS
# ------------------------------------------------------------
def _fetch_track(self, track_id, allow_is_accepted=False):
track = request.env['event.track'].browse(track_id).exists()
if not track:
raise NotFound()
try:
track.check_access_rights('read')
track.check_access_rule('read')
except exceptions.AccessError:
track_sudo = track.sudo()
if allow_is_accepted and track_sudo.is_accepted:
track = track_sudo
else:
raise Forbidden()
event = track.event_id
# JSON RPC have no website in requests
if hasattr(request, 'website_id') and not event.can_access_from_current_website():
raise NotFound()
try:
event.check_access_rights('read')
event.check_access_rule('read')
except exceptions.AccessError:
raise Forbidden()
return track
def _get_search_tags(self, tag_search):
# TDE FIXME: make me generic (slides, event, ...)
try:
tag_ids = literal_eval(tag_search)
except Exception:
tags = request.env['event.track.tag'].sudo()
else:
# perform a search to filter on existing / valid tags implicitly
tags = request.env['event.track.tag'].sudo().search([('id', 'in', tag_ids)])
return tags
def _get_dt_in_event_tz(self, datetimes, event):
tz_name = event.date_tz
return [
utc.localize(dt, is_dst=False).astimezone(timezone(tz_name))
for dt in datetimes
]
| gpl-3.0 | 7,416,369,309,159,150,000 | 47.98088 | 168 | 0.591482 | false |
moijes12/oh-mainline | vendor/packages/scrapy/scrapy/contrib/downloadermiddleware/stats.py | 19 | 1370 | from scrapy.exceptions import NotConfigured
from scrapy.utils.request import request_httprepr
from scrapy.utils.response import response_httprepr
from scrapy.stats import stats
from scrapy.conf import settings
class DownloaderStats(object):
def __init__(self):
if not settings.getbool('DOWNLOADER_STATS'):
raise NotConfigured
def process_request(self, request, spider):
stats.inc_value('downloader/request_count', spider=spider)
stats.inc_value('downloader/request_method_count/%s' % request.method, spider=spider)
reqlen = len(request_httprepr(request))
stats.inc_value('downloader/request_bytes', reqlen, spider=spider)
def process_response(self, request, response, spider):
stats.inc_value('downloader/response_count', spider=spider)
stats.inc_value('downloader/response_status_count/%s' % response.status, spider=spider)
reslen = len(response_httprepr(response))
stats.inc_value('downloader/response_bytes', reslen, spider=spider)
return response
def process_exception(self, request, exception, spider):
ex_class = "%s.%s" % (exception.__class__.__module__, exception.__class__.__name__)
stats.inc_value('downloader/exception_count', spider=spider)
stats.inc_value('downloader/exception_type_count/%s' % ex_class, spider=spider)
| agpl-3.0 | 1,570,326,803,235,319,800 | 46.241379 | 95 | 0.705109 | false |
berquist/cclib | cclib/method/fragments.py | 4 | 5452 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Fragment analysis based on parsed ADF data."""
import logging
import random
import numpy
numpy.inv = numpy.linalg.inv
from cclib.method.calculationmethod import Method
class FragmentAnalysis(Method):
"""Convert a molecule's basis functions from atomic-based to fragment MO-based"""
def __init__(self, data, progress=None, loglevel=logging.INFO,
logname="FragmentAnalysis of"):
# Call the __init__ method of the superclass.
super(FragmentAnalysis, self).__init__(data, progress, loglevel, logname)
self.parsed = False
def __str__(self):
"""Return a string representation of the object."""
return "Fragment molecule basis of %s" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'Fragment molecular basis("%s")' % (self.data)
def calculate(self, fragments, cupdate=0.05):
nFragBasis = 0
nFragAlpha = 0
nFragBeta = 0
self.fonames = []
unrestricted = ( len(self.data.mocoeffs) == 2 )
self.logger.info("Creating attribute fonames[]")
# Collect basis info on the fragments.
for j in range(len(fragments)):
nFragBasis += fragments[j].nbasis
nFragAlpha += fragments[j].homos[0] + 1
if unrestricted and len(fragments[j].homos) == 1:
nFragBeta += fragments[j].homos[0] + 1 #assume restricted fragment
elif unrestricted and len(fragments[j].homos) == 2:
nFragBeta += fragments[j].homos[1] + 1 #assume unrestricted fragment
#assign fonames based on fragment name and MO number
for i in range(fragments[j].nbasis):
if hasattr(fragments[j],"name"):
self.fonames.append("%s_%i"%(fragments[j].name,i+1))
else:
self.fonames.append("noname%i_%i"%(j,i+1))
nBasis = self.data.nbasis
nAlpha = self.data.homos[0] + 1
if unrestricted:
nBeta = self.data.homos[1] + 1
# Check to make sure calcs have the right properties.
if nBasis != nFragBasis:
self.logger.error("Basis functions don't match")
return False
if nAlpha != nFragAlpha:
self.logger.error("Alpha electrons don't match")
return False
if unrestricted and nBeta != nFragBeta:
self.logger.error("Beta electrons don't match")
return False
if len(self.data.atomcoords) != 1:
self.logger.warning("Molecule calc appears to be an optimization")
for frag in fragments:
if len(frag.atomcoords) != 1:
msg = "One or more fragment appears to be an optimization"
self.logger.warning(msg)
break
last = 0
for frag in fragments:
size = frag.natom
if self.data.atomcoords[0][last:last+size].tolist() != \
frag.atomcoords[0].tolist():
self.logger.error("Atom coordinates aren't aligned")
return False
if self.data.atomnos[last:last+size].tolist() != \
frag.atomnos.tolist():
self.logger.error("Elements don't match")
return False
last += size
# And let's begin!
self.mocoeffs = []
self.logger.info("Creating mocoeffs in new fragment MO basis: mocoeffs[]")
for spin in range(len(self.data.mocoeffs)):
blockMatrix = numpy.zeros((nBasis,nBasis), "d")
pos = 0
# Build up block-diagonal matrix from fragment mocoeffs.
# Need to switch ordering from [mo,ao] to [ao,mo].
for i in range(len(fragments)):
size = fragments[i].nbasis
if len(fragments[i].mocoeffs) == 1:
temp = numpy.transpose(fragments[i].mocoeffs[0])
blockMatrix[pos:pos+size, pos:pos+size] = temp
else:
temp = numpy.transpose(fragments[i].mocoeffs[spin])
blockMatrix[pos:pos+size, pos:pos+size] = temp
pos += size
# Invert and mutliply to result in fragment MOs as basis.
iBlockMatrix = numpy.inv(blockMatrix)
temp = numpy.transpose(self.data.mocoeffs[spin])
results = numpy.transpose(numpy.dot(iBlockMatrix, temp))
self.mocoeffs.append(results)
if hasattr(self.data, "aooverlaps"):
tempMatrix = numpy.dot(self.data.aooverlaps, blockMatrix)
tBlockMatrix = numpy.transpose(blockMatrix)
if spin == 0:
self.fooverlaps = numpy.dot(tBlockMatrix, tempMatrix)
self.logger.info("Creating fooverlaps: array[x,y]")
elif spin == 1:
self.fooverlaps2 = numpy.dot(tBlockMatrix, tempMatrix)
self.logger.info("Creating fooverlaps (beta): array[x,y]")
else:
self.logger.warning("Overlap matrix missing")
self.parsed = True
self.nbasis = nBasis
self.homos = self.data.homos
return True
| bsd-3-clause | -975,815,224,822,829,200 | 36.088435 | 85 | 0.569516 | false |
vikas1885/test1 | lms/djangoapps/certificates/migrations/0024_auto__add_certificatetemplate__add_unique_certificatetemplate_organiza.py | 52 | 15622 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CertificateTemplate'
db.create_table('certificates_certificatetemplate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('template', self.gf('django.db.models.fields.TextField')()),
('organization_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('course_key', self.gf('xmodule_django.models.CourseKeyField')(db_index=True, max_length=255, null=True, blank=True)),
('mode', self.gf('django.db.models.fields.CharField')(default='honor', max_length=125, null=True, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('certificates', ['CertificateTemplate'])
# Adding unique constraint on 'CertificateTemplate', fields ['organization_id', 'course_key', 'mode']
db.create_unique('certificates_certificatetemplate', ['organization_id', 'course_key', 'mode'])
# Adding model 'CertificateTemplateAsset'
db.create_table('certificates_certificatetemplateasset', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('asset', self.gf('django.db.models.fields.files.FileField')(max_length=255)),
))
db.send_create_signal('certificates', ['CertificateTemplateAsset'])
def backwards(self, orm):
# Removing unique constraint on 'CertificateTemplate', fields ['organization_id', 'course_key', 'mode']
db.delete_unique('certificates_certificatetemplate', ['organization_id', 'course_key', 'mode'])
# Deleting model 'CertificateTemplate'
db.delete_table('certificates_certificatetemplate')
# Deleting model 'CertificateTemplateAsset'
db.delete_table('certificates_certificatetemplateasset')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.badgeassertion': {
'Meta': {'unique_together': "(('course_id', 'user', 'mode'),)", 'object_name': 'BadgeAssertion'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'certificates.badgeimageconfiguration': {
'Meta': {'object_name': 'BadgeImageConfiguration'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '125'})
},
'certificates.certificategenerationconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'CertificateGenerationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificategenerationcoursesetting': {
'Meta': {'object_name': 'CertificateGenerationCourseSetting'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'certificates.certificatehtmlviewconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'CertificateHtmlViewConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'configuration': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificatetemplate': {
'Meta': {'unique_together': "(('organization_id', 'course_key', 'mode'),)", 'object_name': 'CertificateTemplate'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '125', 'null': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {})
},
'certificates.certificatetemplateasset': {
'Meta': {'object_name': 'CertificateTemplateAsset'},
'asset': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.examplecertificate': {
'Meta': {'object_name': 'ExampleCertificate'},
'access_key': ('django.db.models.fields.CharField', [], {'default': "'f14d7721cd154a57a4fb52b9d4b4bc75'", 'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
'error_reason': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'example_cert_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['certificates.ExampleCertificateSet']"}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "u'John Do\\xeb'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '255'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'789810b9a54b4dd5bae3feec5b4e9fdb'", 'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'certificates.examplecertificateset': {
'Meta': {'object_name': 'ExampleCertificateSet'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates'] | agpl-3.0 | -5,492,538,675,353,509,000 | 78.304569 | 182 | 0.580207 | false |
minhphung171093/GreenERP_V9 | openerp/addons/gamification/__openerp__.py | 36 | 1512 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Gamification',
'version': '1.0',
'sequence': 160,
'category': 'Human Resources',
'website' : 'https://www.odoo.com/page/gamification',
'depends': ['mail', 'web_kanban_gauge'],
'description': """
Gamification process
====================
The Gamification module provides ways to evaluate and motivate the users of OpenERP.
The users can be evaluated using goals and numerical objectives to reach.
**Goals** are assigned through **challenges** to evaluate and compare members of a team with each others and through time.
For non-numerical achievements, **badges** can be granted to users. From a simple "thank you" to an exceptional achievement, a badge is an easy way to exprimate gratitude to a user for their good work.
Both goals and badges are flexibles and can be adapted to a large range of modules and actions. When installed, this module creates easy goals to help new users to discover OpenERP and configure their user profile.
""",
'data': [
'wizard/update_goal.xml',
'wizard/grant_badge.xml',
'views/badge.xml',
'views/challenge.xml',
'views/goal.xml',
'data/cron.xml',
'security/gamification_security.xml',
'security/ir.model.access.csv',
'data/goal_base.xml',
'data/badge.xml',
'views/gamification.xml',
],
'application': False,
'auto_install': False,
}
| gpl-3.0 | -6,531,797,504,724,503,000 | 38.789474 | 214 | 0.666667 | false |
ramaxlo/cerbero | cerbero/packages/wix.py | 23 | 22025 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import uuid
import shutil
from cerbero.utils import etree, to_winepath, shell
from cerbero.errors import FatalError
from cerbero.config import Platform, Architecture
from cerbero.packages import PackageType
from cerbero.packages.package import Package, SDKPackage, App, InstallerPackage
WIX_SCHEMA = "http://schemas.microsoft.com/wix/2006/wi"
class VSTemplatePackage(Package):
'''
A Package for Visual Studio templates
@cvar: vs_template_name: name of the template
@type vs_template_name: string
@cvar vs_template_dir: directory of the template files
@type vs_template_dir: string
@cvar: vs_wizard_dir: directory of the wizard files
@type vs_wizard_dir: string
'''
vs_template_dir = None
vs_wizard_dir = None
vs_template_name = None
def __init__(self, config, store, cookbook):
Package.__init__(self, config, store, cookbook)
def devel_files_list(self):
files = []
for f in [self.vs_template_dir, self.vs_wizard_dir]:
files += shell.ls_dir (os.path.join(self.config.prefix,f),
self.config.prefix)
return files
class WixBase():
def __init__(self, config, package):
self.config = config
self.package = package
self.platform = config.platform
self.target_platform = config.target_platform
self._with_wine = self.platform != Platform.WINDOWS
self.prefix = config.prefix
self.filled = False
self.id_count = 0
self.ids = {}
def fill(self):
if self.filled:
return
self._fill()
self.filled = True
def write(self, filepath):
self.fill()
tree = etree.ElementTree(self.root)
tree.write(filepath, encoding='utf-8', pretty_print=True)
def _format_level(self, selected):
return selected and '1' or '4'
def _format_absent(self, required):
return required and 'disallow' or 'allow'
def _add_root(self):
self.root = etree.Element("Wix", xmlns=WIX_SCHEMA)
def _format_id(self, string, replace_dots=False):
ret = string
ret = ret.replace('_', '__')
for r in ['/', '-', ' ', '@', '+']:
ret = ret.replace(r, '_')
if replace_dots:
ret = ret.replace('.', '')
# For directories starting with a number
return '_' + ret
def _format_path_id(self, path, replace_dots=False):
ret = self._format_id(os.path.split(path)[1], replace_dots)
ret = ret.lower()
if ret not in self.ids:
self.ids[ret] = 0
else:
self.ids[ret] += 1
if self.ids[ret] != 0:
ret = '%s_%s' % (ret, self.ids[ret])
return ret
def _get_uuid(self):
return "%s" % uuid.uuid1()
def _format_version(self, version):
# mayor and minor must be less than 256 on windows,
# so 2012.5 must be changed to 20.12.5
versions = version.split('.')
tversions = []
for version in versions:
i = int(version)
if i > 9999:
raise FatalError("Unsupported version number, mayor and minor "
"must be less than 9999")
elif i > 255:
tversions.append(version[:-2])
tversions.append(version[-2:])
else:
tversions.append(version)
return '.'.join(tversions)
class MergeModule(WixBase):
'''
Creates WiX merge modules from cerbero packages
@ivar package: package with the info to build the merge package
@type pacakge: L{cerbero.packages.package.Package}
'''
def __init__(self, config, files_list, package):
WixBase.__init__(self, config, package)
self.files_list = files_list
self._dirnodes = {}
def _fill(self):
self._add_root()
self._add_module()
self._add_package()
self._add_root_dir()
self._add_files()
def _add_module(self):
self.module = etree.SubElement(self.root, "Module",
Id=self._format_id(self.package.name),
Version=self._format_version(self.package.version),
Language='1033')
def _add_package(self):
self.pkg = etree.SubElement(self.module, "Package",
Id=self.package.uuid or self._get_uuid(),
Description=self.package.shortdesc,
Comments=self.package.longdesc,
Manufacturer=self.package.vendor)
def _add_root_dir(self):
self.rdir = etree.SubElement(self.module, "Directory",
Id='TARGETDIR', Name='SourceDir')
self._dirnodes[''] = self.rdir
def _add_files(self):
for f in self.files_list:
self._add_file(f)
def _add_directory(self, dirpath):
if dirpath in self._dirnodes:
return
parentpath = os.path.split(dirpath)[0]
if parentpath == []:
parentpath = ['']
if parentpath not in self._dirnodes:
self._add_directory(parentpath)
parent = self._dirnodes[parentpath]
dirnode = etree.SubElement(parent, "Directory",
Id=self._format_path_id(dirpath),
Name=os.path.split(dirpath)[1])
self._dirnodes[dirpath] = dirnode
def _add_file(self, filepath):
dirpath, filename = os.path.split(filepath)
self._add_directory(dirpath)
dirnode = self._dirnodes[dirpath]
component = etree.SubElement(dirnode, 'Component',
Id=self._format_path_id(filepath), Guid=self._get_uuid())
filepath = os.path.join(self.prefix, filepath)
p_id = self._format_path_id(filepath, True)
if self._with_wine:
filepath = to_winepath(filepath)
etree.SubElement(component, 'File', Id=p_id, Name=filename,
Source=filepath)
class VSMergeModule(MergeModule):
'''
Creates a Merge Module for Visual Studio templates
@ivar package: package with the info to build the merge package
@type pacakge: L{cerbero.packages.package.Package}
'''
def __init__(self, config, files_list, package):
MergeModule.__init__(self, config, files_list, package)
def _add_root_dir(self):
MergeModule._add_root_dir(self)
self._add_vs_templates()
def _add_vs_templates(self):
etree.SubElement(self.module, 'PropertyRef',
Id='VS_PROJECTTEMPLATES_DIR')
etree.SubElement(self.module, 'PropertyRef',
Id='VS_WIZARDS_DIR')
etree.SubElement(self.module, 'CustomActionRef',
Id='VS2010InstallVSTemplates')
etree.SubElement(self.module, 'CustomActionRef',
Id='VC2010InstallVSTemplates')
prop = etree.SubElement(self.module, 'SetProperty',
Id="VSPROJECTTEMPLATESDIR", After="AppSearch",
Value="[VS_PROJECTTEMPLATES_DIR]\\%s" % \
self.package.vs_template_name or "")
prop.text = "VS_PROJECTTEMPLATES_DIR"
prop = etree.SubElement(self.module, 'SetProperty',
Id="VSWIZARDSDIR", After="AppSearch",
Value="[VS_WIZARDS_DIR]\\%s" % \
os.path.split(self.package.vs_template_dir)[1])
prop.text = "VS_WIZARDS_DIR"
self._wizard_dir = etree.SubElement(self.rdir, 'Directory',
Id='VSPROJECTTEMPLATESDIR')
self._tpl_dir = etree.SubElement(self.rdir, 'Directory',
Id='VSWIZARDSDIR')
self._dirnodes[self.package.vs_template_dir] = self._tpl_dir
self._dirnodes[self.package.vs_wizard_dir] = self._wizard_dir
class WixConfig(WixBase):
wix_config = 'wix/Config.wxi'
def __init__(self, config, package):
self.config_path = os.path.join(config.data_dir, self.wix_config)
self.arch = config.target_arch
self.package = package
if isinstance(self.package, App):
self.ui_type = 'WixUI_InstallDir'
else:
self.ui_type = 'WixUI_Mondo'
def write(self, output_dir):
config_out_path = os.path.join(output_dir,
os.path.basename(self.wix_config))
shutil.copy(self.config_path, os.path.join(output_dir,
os.path.basename(self.wix_config)))
replacements = {
"@ProductID@": '*',
"@UpgradeCode@": self.package.get_wix_upgrade_code(),
"@Language@": '1033',
"@Manufacturer@": self.package.vendor,
"@Version@": self._format_version(self.package.version),
"@PackageComments@": self.package.longdesc,
"@Description@": self.package.shortdesc,
"@ProjectURL": self.package.url,
"@ProductName@": self._product_name(),
"@ProgramFilesFolder@": self._program_folder(),
"@Platform@": self._platform(),
"@UIType@": self.ui_type
}
shell.replace(config_out_path, replacements)
return config_out_path
def _product_name(self):
return '%s' % self.package.shortdesc
def _program_folder(self):
if self.arch == Architecture.X86:
return 'ProgramFilesFolder'
return 'ProgramFiles64Folder'
def _platform(self):
if self.arch == Architecture.X86_64:
return 'x64'
return 'x86'
class MSI(WixBase):
'''Creates an installer package from a
L{cerbero.packages.package.MetaPackage}
@ivar package: meta package used to create the installer package
@type package: L{cerbero.packages.package.MetaPackage}
'''
wix_sources = 'wix/installer.wxs'
REG_ROOT = 'HKLM'
BANNER_BMP = 'banner.bmp'
DIALOG_BMP = 'dialog.bmp'
LICENSE_RTF = 'license.rtf'
ICON = 'icon.ico'
def __init__(self, config, package, packages_deps, wix_config, store):
WixBase.__init__(self, config, package)
self.packages_deps = packages_deps
self.store = store
self.wix_config = wix_config
self._parse_sources()
self._add_include()
self._customize_ui()
self.product = self.root.find(".//Product")
self._add_vs_properties()
def _parse_sources(self):
sources_path = self.package.resources_wix_installer or \
os.path.join(self.config.data_dir, self.wix_sources)
with open(sources_path, 'r') as f:
self.root = etree.fromstring(f.read())
for element in self.root.iter():
element.tag = element.tag[len(WIX_SCHEMA) + 2:]
self.root.set('xmlns', WIX_SCHEMA)
self.product = self.root.find('Product')
def _add_include(self):
if self._with_wine:
self.wix_config = to_winepath(self.wix_config)
inc = etree.PI('include %s' % self.wix_config)
self.root.insert(0, inc)
def _fill(self):
self._add_install_dir()
if isinstance(self.package, App):
self._add_application_merge_module ()
else:
self._add_merge_modules()
if isinstance(self.package, SDKPackage):
if self.package.package_mode == PackageType.RUNTIME:
self._add_registry_install_dir()
self._add_sdk_root_env_variable()
if isinstance(self.package, App):
self._add_start_menu_shortcuts()
self._add_get_install_dir_from_registry()
def _add_application_merge_module(self):
self.main_feature = etree.SubElement(self.product, "Feature",
Id=self._format_id(self.package.name + '_app'),
Title=self.package.title, Level='1', Display="expand",
AllowAdvertise="no", ConfigurableDirectory="INSTALLDIR")
self._add_merge_module(self.package, True, True, [])
etree.SubElement(self.installdir, 'Merge',
Id=self._package_id(self.package.name), Language='1033',
SourceFile=self.packages_deps[self.package], DiskId='1')
def _add_merge_modules(self):
self.main_feature = etree.SubElement(self.product, "Feature",
Id=self._format_id(self.package.name),
Title=self.package.title, Level='1', Display="expand",
AllowAdvertise="no", ConfigurableDirectory="INSTALLDIR")
packages = [(self.store.get_package(x[0]), x[1], x[2]) for x in
self.package.packages]
# Remove empty packages
packages = [x for x in packages if x[0] in self.packages_deps.keys()]
if len(packages) == 0:
raise FatalError("All packages are empty: %s" %
[x[0] for x in self.package.packages])
# Fill the list of required packages, which are the ones installed by
# a package that is always installed
req = [x[0] for x in packages if x[1] == True]
required_packages = req[:]
for p in req:
required_packages.extend(self.store.get_package_deps(p, True))
for package, required, selected in packages:
if package in self.packages_deps:
self._add_merge_module(package, required, selected,
required_packages)
# Add a merge module ref for all the packages
for package, path in self.packages_deps.iteritems():
etree.SubElement(self.installdir, 'Merge',
Id=self._package_id(package.name), Language='1033',
SourceFile=path, DiskId='1')
def _add_dir(self, parent, dir_id, name):
tdir = etree.SubElement(parent, "Directory",
Id=dir_id, Name=name)
return tdir
def _add_install_dir(self):
self.target_dir = self._add_dir(self.product, 'TARGETDIR', 'SourceDir')
# FIXME: Add a way to install to ProgramFilesFolder
if isinstance(self.package, App):
installdir = self._add_dir(self.target_dir,
'$(var.PlatformProgramFilesFolder)', 'ProgramFilesFolder')
self.installdir = self._add_dir(installdir, 'INSTALLDIR',
'$(var.ProductName)')
self.bindir = self._add_dir(self.installdir, 'INSTALLBINDIR', 'bin')
else:
installdir = self._add_dir(self.target_dir, 'INSTALLDIR',
self.package.get_install_dir())
versiondir = self._add_dir(installdir, "Version", self.package.sdk_version)
archdir = self._add_dir(versiondir, 'Architecture',
self.config.target_arch)
self.installdir = self._add_dir(archdir, 'SDKROOTDIR', '.')
def _package_id(self, package_name):
return self._format_id(package_name)
def _package_var(self):
package_type = self.package.package_mode
self.package.set_mode(PackageType.RUNTIME)
name = self.package.shortdesc
self.package.set_mode(package_type)
return name
def _registry_key(self, name):
return 'Software\\%s\\%s' % (name, self.config.target_arch)
def _customize_ui(self):
# Banner Dialog and License
for path, var in [(self.BANNER_BMP, 'BannerBmp'),
(self.DIALOG_BMP, 'DialogBmp'),
(self.LICENSE_RTF, 'LicenseRtf')]:
path = self.package.relative_path(path)
if self._with_wine:
path = to_winepath(path)
if os.path.exists(path):
etree.SubElement(self.product, 'WixVariable',
Id='WixUI%s' % var, Value=path)
# Icon
path = self.package.relative_path(self.ICON)
if self._with_wine:
path = to_winepath(path)
if os.path.exists(path):
etree.SubElement(self.product, 'Icon',
Id='MainIcon', SourceFile=path)
def _add_sdk_root_env_variable(self):
envcomponent = etree.SubElement(self.installdir, 'Component',
Id='EnvironmentVariables', Guid=self._get_uuid())
env = etree.SubElement(envcomponent, 'Environment', Id="SdkRootEnv",
Action="set", Part="all", Name=self.package.get_root_env_var(),
Permanent="no", Value='[SDKROOTDIR]')
etree.SubElement(self.main_feature, 'ComponentRef',
Id='EnvironmentVariables')
def _add_registry_install_dir(self):
# Get the package name. Both devel and runtime will share the same
# installation folder
name = self._package_var().replace(' ', '')
# Add INSTALLDIR in the registry only for the runtime package
if self.package.package_mode == PackageType.RUNTIME:
regcomponent = etree.SubElement(self.installdir, 'Component',
Id='RegistryInstallDir', Guid=self._get_uuid())
regkey = etree.SubElement(regcomponent, 'RegistryKey',
Id='RegistryInstallDirRoot',
Action='createAndRemoveOnUninstall',
Key=self._registry_key(name),
Root=self.REG_ROOT)
etree.SubElement(regkey, 'RegistryValue',
Id='RegistryInstallDirValue',
Type='string', Name='InstallDir', Value='[INSTALLDIR]')
etree.SubElement(regkey, 'RegistryValue',
Id='RegistryVersionValue',
Type='string', Name='Version',
Value=self.package.version)
etree.SubElement(regkey, 'RegistryValue',
Id='RegistrySDKVersionValue',
Type='string', Name='SdkVersion',
Value=self.package.sdk_version)
etree.SubElement(self.main_feature, 'ComponentRef',
Id='RegistryInstallDir')
def _add_get_install_dir_from_registry(self):
name = self._package_var().replace(' ', '')
if isinstance(self.package, InstallerPackage):
name = self.package.windows_sdk_reg or name
key = self._registry_key(name)
# Get INSTALLDIR from the registry key
installdir_prop = etree.SubElement(self.product, 'Property',
Id='INSTALLDIR')
etree.SubElement(installdir_prop, 'RegistrySearch', Id=name,
Type="raw", Root=self.REG_ROOT, Key=key, Name='InstallDir')
def _add_merge_module(self, package, required, selected,
required_packages):
# Create a new feature for this package
feature = etree.SubElement(self.main_feature, 'Feature',
Id=self._format_id(package.name), Title=package.shortdesc,
Level=self._format_level(selected),
Display='expand', Absent=self._format_absent(required))
deps = self.store.get_package_deps(package, True)
# Add all the merge modules required by this package, but excluding
# all the ones that are forced to be installed
if not required:
mergerefs = list(set(deps) - set(required_packages))
else:
mergerefs = [x for x in deps if x in required_packages]
# don't add empty packages
mergerefs = [x for x in mergerefs if x in self.packages_deps.keys()]
for p in mergerefs:
etree.SubElement(feature, "MergeRef",
Id=self._package_id(p.name))
etree.SubElement(feature, "MergeRef",
Id=self._package_id(package.name))
if isinstance(package, VSTemplatePackage):
c = etree.SubElement(feature, "Condition", Level="0")
c.text = "NOT VS2010DEVENV AND NOT VC2010EXPRESS_IDE"
def _add_start_menu_shortcuts(self):
# Create a folder with the application name in the Start Menu folder
programs = etree.SubElement(self.target_dir, 'Directory',
Id='ProgramMenuFolder')
etree.SubElement(programs, 'Directory', Id='ApplicationProgramsFolder',
Name='$(var.ProductName)')
# Add the shortcut to the installer package
appf = etree.SubElement(self.product, 'DirectoryRef',
Id='ApplicationProgramsFolder')
apps = etree.SubElement(appf, 'Component', Id='ApplicationShortcut',
Guid=self._get_uuid())
for desc, path, _, _ in self.package.commands[self.config.target_platform]:
etree.SubElement(apps, 'Shortcut',
Id='ApplicationStartMenuShortcut', Name=desc,
Description=desc, Target='[INSTALLBINDIR]' + path,
WorkingDirectory='INSTALLBINDIR',
Icon='MainIcon')
etree.SubElement(apps, 'RemoveFolder', Id='ApplicationProgramsFolder',
On='uninstall')
etree.SubElement(apps, 'RegistryValue', Root='HKCU',
Key='Software\Microsoft\%s' % self.package.name,
Name='installed', Type='integer', Value='1', KeyPath='yes')
# Ref it in the main feature
etree.SubElement(self.main_feature, 'ComponentRef',
Id='ApplicationShortcut')
def _add_vs_properties(self):
etree.SubElement(self.product, 'PropertyRef', Id='VS2010DEVENV')
etree.SubElement(self.product, 'PropertyRef', Id='VC2010EXPRESS_IDE')
| lgpl-2.1 | -8,070,839,893,560,263,000 | 38.26025 | 87 | 0.595823 | false |
sigma-geosistemas/django-tenants | django_tenants/routers.py | 1 | 1528 | from django.conf import settings
from django.apps import apps as django_apps
class TenantSyncRouter(object):
"""
A router to control which applications will be synced,
depending if we are syncing the shared apps or the tenant apps.
"""
def app_in_list(self, app_label, apps_list):
"""
Is 'app_label' present in 'apps_list'?
apps_list is either settings.SHARED_APPS or settings.TENANT_APPS, a
list of app names.
We check the presense of the app's name or the full path to the apps's
AppConfig class.
https://docs.djangoproject.com/en/1.8/ref/applications/#configuring-applications
"""
appconfig = django_apps.get_app_config(app_label)
appconfig_full_name = '{}.{}'.format(
appconfig.__module__, appconfig.__class__.__name__)
return (appconfig.name in apps_list) or (appconfig_full_name in apps_list)
def allow_migrate(self, db, app_label, model_name=None, **hints):
# the imports below need to be done here else django <1.5 goes crazy
# https://code.djangoproject.com/ticket/20704
from django.db import connection
from django_tenants.utils import get_public_schema_name
if connection.schema_name == get_public_schema_name():
if not self.app_in_list(app_label, settings.SHARED_APPS):
return False
else:
if not self.app_in_list(app_label, settings.TENANT_APPS):
return False
return None
| mit | -3,270,951,564,121,082,400 | 37.2 | 88 | 0.640052 | false |
Beit-Hatfutsot/dbs-back | bhs_api/models.py | 1 | 4042 | import hashlib
from bson.objectid import ObjectId
from mongoengine import (ListField, StringField, EmbeddedDocumentField,
EmbeddedDocumentListField, EmbeddedDocument,
GenericEmbeddedDocumentField, BooleanField,
DateTimeField, ReferenceField)
from flask import current_app, abort
from flask.ext.mongoengine import Document
from flask.ext.security import UserMixin, RoleMixin
from .utils import dictify, get_referrer_host_url
class Role(Document, RoleMixin):
name = StringField(max_length=80, unique=True)
description = StringField(max_length=255)
class StoryLine(EmbeddedDocument):
id = StringField(max_length=512, unique=True)
in_branch = ListField(BooleanField(), default=4*[False])
class UserName(EmbeddedDocument):
en = StringField(max_length=64)
he = StringField(max_length=64)
class User(Document, UserMixin):
email = StringField(max_length=255)
password = StringField(max_length=255, default="lookmanopassword")
name = EmbeddedDocumentField(UserName)
active = BooleanField(default=True)
confirmed_at = DateTimeField()
roles = ListField(ReferenceField(Role))
story_items = EmbeddedDocumentListField(StoryLine)
story_branches = ListField(field=StringField(max_length=64),
default=4*[''])
next = StringField(max_length=1023, default='/mjs')
hash = StringField(max_length=255, default='')
username = StringField(max_length=255)
meta = {
'indexes': ['email', 'username', 'hash']
}
safe_keys = ('email', 'name', 'confirmed_at', 'next', 'hash')
def handle(self, request):
method = request.method
referrer = request.referrer
if referrer:
referrer_host_url = get_referrer_host_url(referrer)
else:
referrer_host_url = None
if method == 'GET':
r = self.render()
elif method == 'PUT':
try:
data = request.get_json();
if not isinstance(data, dict):
abort(
400,
'Only dict like objects are supported for user management')
except ValueError:
e_message = 'Could not decode JSON from data'
current_app.logger.debug(e_message)
abort(400, e_message)
if not data:
abort(400, 'No data provided')
r = self.update(data)
if not r:
abort(500, 'User handler accepts only GET, PUT or DELETE')
return r
def update(self, user_dict):
if 'email' in user_dict:
self.email = user_dict['email']
if 'name' in user_dict:
if not self.name:
self.name = UserName()
for k,v in user_dict['name'].items():
setattr(self.name, k, v)
self.save()
return self.render()
def render(self):
# some old users might not have a hash, saving will generate one
if not self.hash:
self.save()
user_dict = dictify(self)
ret = {}
for key in self.safe_keys:
ret[key] = user_dict.get(key, None)
ret.update(self.get_mjs())
return ret
def is_admin(self):
if self.has_role('admin'):
return True
else:
return False
def get_mjs(self):
return {'story_items': [{'id': o.id, 'in_branch': o.in_branch} for o in self.story_items],
'story_branches': self.story_branches}
def clean(self):
''' this method is called by MongoEngine just before saving '''
if not self.hash:
# make sure we have a public hash
self.hash = hashlib.md5(self.email.lower()).hexdigest()
# Prevent a nasty bug where next points to a login link causing login
# to fail
if self.next.startswith('/login'):
self.next = current_app.config['DEFAULT_NEXT']
| agpl-3.0 | 7,560,715,236,348,787,000 | 32.131148 | 98 | 0.586838 | false |
palisadoes/switchmap-ng | switchmap/test/test_mib_ciscovtp.py | 2 | 9795 | #!/usr/bin/env python3
"""Test the mib_if module."""
import os
import sys
import binascii
import unittest
from mock import Mock
# Try to create a working PYTHONPATH
TEST_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
SWITCHMAP_DIRECTORY = os.path.abspath(os.path.join(TEST_DIRECTORY, os.pardir))
ROOT_DIRECTORY = os.path.abspath(os.path.join(SWITCHMAP_DIRECTORY, os.pardir))
if TEST_DIRECTORY.endswith('/switchmap-ng/switchmap/test') is True:
sys.path.append(ROOT_DIRECTORY)
else:
print(
'This script is not installed in the "switchmap-ng/bin" directory. '
'Please fix.')
sys.exit(2)
from switchmap.snmp.cisco import mib_ciscovtp as testimport
class Query(object):
"""Class for snmp_manager.Query mock.
A detailed tutorial about Python mocks can be found here:
http://www.drdobbs.com/testing/using-mocks-in-python/240168251
"""
def query(self):
"""Do an SNMP query."""
pass
def oid_exists(self):
"""Determine existence of OID on device."""
pass
def swalk(self):
"""Do a failsafe SNMPwalk."""
pass
def walk(self):
"""Do a failable SNMPwalk."""
pass
class KnownValues(unittest.TestCase):
"""Checks all functions and methods."""
#########################################################################
# General object setup
#########################################################################
# SNMPwalk results used by Mocks.
# Normalized walk returning integers
nwalk_results_integer = {
100: 1234,
200: 5678
}
# Set the stage for SNMPwalk for integer results
snmpobj_integer = Mock(spec=Query)
mock_spec_integer = {
'swalk.return_value': nwalk_results_integer,
'walk.return_value': nwalk_results_integer,
}
snmpobj_integer.configure_mock(**mock_spec_integer)
# Normalized walk returning integers for the ifIndex
nwalk_results_ifindex = {
100: 100,
200: 200
}
# Set the stage for SNMPwalk for integer results for the ifIndex
snmpobj_ifindex = Mock(spec=Query)
mock_spec_ifindex = {
'swalk.return_value': nwalk_results_ifindex,
'walk.return_value': nwalk_results_ifindex,
}
snmpobj_ifindex.configure_mock(**mock_spec_ifindex)
# Normalized walk returning strings
nwalk_results_bytes = {
100: b'1234',
200: b'5678'
}
# Set the stage for SNMPwalk for string results
snmpobj_bytes = Mock(spec=Query)
mock_spec_bytes = {
'swalk.return_value': nwalk_results_bytes,
'walk.return_value': nwalk_results_bytes,
}
snmpobj_bytes.configure_mock(**mock_spec_bytes)
# Normalized walk returning binary data
nwalk_results_binary = {
100: binascii.unhexlify('1234'),
200: binascii.unhexlify('5678')
}
# Set the stage for SNMPwalk for binary results
snmpobj_binary = Mock(spec=Query)
mock_spec_binary = {
'swalk.return_value': nwalk_results_binary,
'walk.return_value': nwalk_results_binary,
}
snmpobj_binary.configure_mock(**mock_spec_binary)
# Initializing key variables
expected_dict = {
100: {
'vlanTrunkPortDynamicState': 1234,
'vlanTrunkPortDynamicStatus': 1234,
'vlanTrunkPortNativeVlan': 1234,
'vlanTrunkPortEncapsulationType': 1234,
'vlanTrunkPortVlansEnabled': 1234,
'vtpVlanType': 1234,
'vtpVlanName': '1234',
'vtpVlanState': 1234
},
200: {
'vlanTrunkPortDynamicState': 5678,
'vlanTrunkPortDynamicStatus': 5678,
'vlanTrunkPortNativeVlan': 5678,
'vlanTrunkPortEncapsulationType': 5678,
'vlanTrunkPortVlansEnabled': 5678,
'vtpVlanType': 5678,
'vtpVlanName': '5678',
'vtpVlanState': 5678
}
}
def test_get_query(self):
"""Testing function get_query."""
pass
def test_init_query(self):
"""Testing function init_query."""
pass
def test___init__(self):
"""Testing function __init__."""
pass
def test_layer2(self):
"""Testing function layer2."""
# Layer 2 testing only seems to work when all the methods return
# the same type of results (eg. int, string, hex)
pass
def test_layer1(self):
"""Testing function layer1."""
# Layer 1 testing only seems to work when all the methods return
# the same type of results (eg. int, string, hex)
pass
def test_vlantrunkportencapsulationtype(self):
"""Testing function vlantrunkportencapsulationtype."""
# Initialize key variables
oid_key = 'vlanTrunkPortEncapsulationType'
oid = '.1.3.6.1.4.1.9.9.46.1.6.1.1.3'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.vlantrunkportencapsulationtype()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.vlantrunkportencapsulationtype(oidonly=True)
self.assertEqual(results, oid)
def test_vlantrunkportnativevlan(self):
"""Testing function vlantrunkportnativevlan."""
# Initialize key variables
oid_key = 'vlanTrunkPortNativeVlan'
oid = '.1.3.6.1.4.1.9.9.46.1.6.1.1.5'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.vlantrunkportnativevlan()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.vlantrunkportnativevlan(oidonly=True)
self.assertEqual(results, oid)
def test_vlantrunkportdynamicstatus(self):
"""Testing function vlantrunkportdynamicstatus."""
# Initialize key variables
oid_key = 'vlanTrunkPortDynamicStatus'
oid = '.1.3.6.1.4.1.9.9.46.1.6.1.1.14'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.vlantrunkportdynamicstatus()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.vlantrunkportdynamicstatus(oidonly=True)
self.assertEqual(results, oid)
def test_vlantrunkportdynamicstate(self):
"""Testing function vlantrunkportdynamicstate."""
# Initialize key variables
oid_key = 'vlanTrunkPortDynamicState'
oid = '.1.3.6.1.4.1.9.9.46.1.6.1.1.13'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.vlantrunkportdynamicstate()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.vlantrunkportdynamicstate(oidonly=True)
self.assertEqual(results, oid)
def test_vtpvlanname(self):
"""Testing function vtpvlanname."""
# Initialize key variables
oid_key = 'vtpVlanName'
oid = '.1.3.6.1.4.1.9.9.46.1.3.1.1.4'
# Get results
testobj = testimport.init_query(self.snmpobj_bytes)
results = testobj.vtpvlanname()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.vtpvlanname(oidonly=True)
self.assertEqual(results, oid)
def test_vtpvlantype(self):
"""Testing function vtpvlantype."""
# Initialize key variables
oid_key = 'vtpVlanType'
oid = '.1.3.6.1.4.1.9.9.46.1.3.1.1.3'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.vtpvlantype()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.vtpvlantype(oidonly=True)
self.assertEqual(results, oid)
def test_vtpvlanstate(self):
"""Testing function vtpvlanstate."""
# Initialize key variables
oid_key = 'vtpVlanState'
oid = '.1.3.6.1.4.1.9.9.46.1.3.1.1.2'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.vtpvlanstate()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.vtpvlanstate(oidonly=True)
self.assertEqual(results, oid)
def test_vlantrunkportvlansenabled(self):
"""Testing function vlantrunkportvlansenabled."""
pass
if __name__ == '__main__':
# Do the unit test
unittest.main()
| apache-2.0 | -9,014,452,226,342,369,000 | 31.220395 | 78 | 0.61511 | false |
cpennington/edx-platform | cms/djangoapps/contentstore/views/tests/test_library.py | 3 | 15032 | """
Unit tests for contentstore.views.library
More important high-level tests are in contentstore/tests/test_libraries.py
"""
import ddt
import mock
from django.conf import settings
from mock import patch
from opaque_keys.edx.locator import CourseKey, LibraryLocator
from six import binary_type, text_type
from six.moves import range
from contentstore.tests.utils import AjaxEnabledTestClient, CourseTestCase, parse_json
from contentstore.utils import reverse_course_url, reverse_library_url
from contentstore.views.component import get_component_templates
from contentstore.views.library import get_library_creator_status
from course_creators.views import add_user_with_status_granted as grant_course_creator_status
from student.roles import LibraryUserRole
from xmodule.modulestore.tests.factories import LibraryFactory
LIBRARY_REST_URL = '/library/' # URL for GET/POST requests involving libraries
def make_url_for_lib(key):
""" Get the RESTful/studio URL for testing the given library """
if isinstance(key, LibraryLocator):
key = text_type(key)
return LIBRARY_REST_URL + key
@ddt.ddt
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_COURSE_CREATION': False})
class UnitTestLibraries(CourseTestCase):
"""
Unit tests for library views
"""
def setUp(self):
super(UnitTestLibraries, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=self.user_password)
######################################################
# Tests for /library/ - list and create libraries:
@mock.patch("contentstore.views.library.LIBRARIES_ENABLED", False)
def test_library_creator_status_libraries_not_enabled(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
self.assertEqual(get_library_creator_status(nostaff_user), False)
@mock.patch("contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_is_staff_user(self):
self.assertEqual(get_library_creator_status(self.user), True)
@mock.patch("contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_course_creator_role(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
grant_course_creator_status(self.user, nostaff_user)
self.assertEqual(get_library_creator_status(nostaff_user), True)
@mock.patch("contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_no_course_creator_role(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
self.assertEqual(get_library_creator_status(nostaff_user), True)
@ddt.data(
(False, False, True),
(False, True, False),
(True, False, True),
(True, True, False),
(True, None, False),
(False, None, True)
)
@ddt.unpack
def test_library_creator_status_settings(self, disable_course, disable_library, expected_status):
"""
Ensure that the setting DISABLE_LIBRARY_CREATION overrides DISABLE_COURSE_CREATION as expected.
"""
_, nostaff_user = self.create_non_staff_authed_user_client()
with mock.patch("contentstore.views.library.LIBRARIES_ENABLED", True):
with mock.patch.dict(
"django.conf.settings.FEATURES",
{
"DISABLE_COURSE_CREATION": disable_course,
"DISABLE_LIBRARY_CREATION": disable_library
}
):
self.assertEqual(get_library_creator_status(nostaff_user), expected_status)
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_COURSE_CREATION': True})
@mock.patch("contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_no_course_creator_role_and_disabled_nonstaff_course_creation(self):
"""
Ensure that `DISABLE_COURSE_CREATION` feature works with libraries as well.
"""
nostaff_client, nostaff_user = self.create_non_staff_authed_user_client()
self.assertFalse(get_library_creator_status(nostaff_user))
# To be explicit, this user can GET, but not POST
get_response = nostaff_client.get_json(LIBRARY_REST_URL)
post_response = nostaff_client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(get_response.status_code, 200)
self.assertEqual(post_response.status_code, 403)
@patch("contentstore.views.library.LIBRARIES_ENABLED", False)
def test_with_libraries_disabled(self):
"""
The library URLs should return 404 if libraries are disabled.
"""
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 404)
def test_list_libraries(self):
"""
Test that we can GET /library/ to list all libraries visible to the current user.
"""
# Create some more libraries
libraries = [LibraryFactory.create() for _ in range(3)]
lib_dict = dict([(lib.location.library_key, lib) for lib in libraries])
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 200)
lib_list = parse_json(response)
self.assertEqual(len(lib_list), len(libraries))
for entry in lib_list:
self.assertIn("library_key", entry)
self.assertIn("display_name", entry)
key = CourseKey.from_string(entry["library_key"])
self.assertIn(key, lib_dict)
self.assertEqual(entry["display_name"], lib_dict[key].display_name)
del lib_dict[key] # To ensure no duplicates are matched
@ddt.data("delete", "put")
def test_bad_http_verb(self, verb):
"""
We should get an error if we do weird requests to /library/
"""
response = getattr(self.client, verb)(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 405)
def test_create_library(self):
""" Create a library. """
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org',
'library': 'lib',
'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
# That's all we check. More detailed tests are in contentstore.tests.test_libraries...
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True})
def test_lib_create_permission(self):
"""
Users who are given course creator roles should be able to create libraries.
"""
self.client.logout()
ns_user, password = self.create_non_staff_user()
self.client.login(username=ns_user.username, password=password)
grant_course_creator_status(self.user, ns_user)
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': False})
def test_lib_create_permission_no_course_creator_role_and_no_course_creator_group(self):
"""
Users who are not given course creator roles should still be able to create libraries
if ENABLE_CREATOR_GROUP is not enabled.
"""
self.client.logout()
ns_user, password = self.create_non_staff_user()
self.client.login(username=ns_user.username, password=password)
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True})
def test_lib_create_permission_no_course_creator_role_and_course_creator_group(self):
"""
Users who are not given course creator roles should not be able to create libraries
if ENABLE_CREATOR_GROUP is enabled.
"""
self.client.logout()
ns_user, password = self.create_non_staff_user()
self.client.login(username=ns_user.username, password=password)
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(response.status_code, 403)
@ddt.data(
{},
{'org': 'org'},
{'library': 'lib'},
{'org': 'C++', 'library': 'lib', 'display_name': 'Lib with invalid characters in key'},
{'org': 'Org', 'library': 'Wh@t?', 'display_name': 'Lib with invalid characters in key'},
)
def test_create_library_invalid(self, data):
"""
Make sure we are prevented from creating libraries with invalid keys/data
"""
response = self.client.ajax_post(LIBRARY_REST_URL, data)
self.assertEqual(response.status_code, 400)
def test_no_duplicate_libraries(self):
"""
We should not be able to create multiple libraries with the same key
"""
lib = LibraryFactory.create()
lib_key = lib.location.library_key
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': lib_key.org,
'library': lib_key.library,
'display_name': "A Duplicate key, same as 'lib'",
})
self.assertIn('already a library defined', parse_json(response)['ErrMsg'])
self.assertEqual(response.status_code, 400)
######################################################
# Tests for /library/:lib_key/ - get a specific library as JSON or HTML editing view
def test_get_lib_info(self):
"""
Test that we can get data about a library (in JSON format) using /library/:key/
"""
# Create a library
lib_key = LibraryFactory.create().location.library_key
# Re-load the library from the modulestore, explicitly including version information:
lib = self.store.get_library(lib_key, remove_version=False, remove_branch=False)
version = lib.location.library_key.version_guid
self.assertNotEqual(version, None)
response = self.client.get_json(make_url_for_lib(lib_key))
self.assertEqual(response.status_code, 200)
info = parse_json(response)
self.assertEqual(info['display_name'], lib.display_name)
self.assertEqual(info['library_id'], text_type(lib_key))
self.assertEqual(info['previous_version'], None)
self.assertNotEqual(info['version'], None)
self.assertNotEqual(info['version'], '')
self.assertEqual(info['version'], text_type(version))
def test_get_lib_edit_html(self):
"""
Test that we can get the studio view for editing a library using /library/:key/
"""
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<html")
self.assertContains(response, lib.display_name)
@ddt.data('library-v1:Nonexistent+library', 'course-v1:Org+Course', 'course-v1:Org+Course+Run', 'invalid')
def test_invalid_keys(self, key_str):
"""
Check that various Nonexistent/invalid keys give 404 errors
"""
response = self.client.get_json(make_url_for_lib(key_str))
self.assertEqual(response.status_code, 404)
def test_bad_http_verb_with_lib_key(self):
"""
We should get an error if we do weird requests to /library/
"""
lib = LibraryFactory.create()
for verb in ("post", "delete", "put"):
response = getattr(self.client, verb)(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 405)
def test_no_access(self):
user, password = self.create_non_staff_user()
self.client.login(username=user, password=password)
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 403)
def test_get_component_templates(self):
"""
Verify that templates for adding discussion and advanced components to
content libraries are not provided.
"""
lib = LibraryFactory.create()
lib.advanced_modules = ['lti']
lib.save()
templates = [template['type'] for template in get_component_templates(lib, library=True)]
self.assertIn('problem', templates)
self.assertNotIn('discussion', templates)
self.assertNotIn('advanced', templates)
def test_advanced_problem_types(self):
"""
Verify that advanced problem types are not provided in problem component for libraries.
"""
lib = LibraryFactory.create()
lib.save()
problem_type_templates = next(
(component['templates'] for component in get_component_templates(lib, library=True) if component['type'] == 'problem'),
[]
)
# Each problem template has a category which shows whether problem is a 'problem'
# or which of the advanced problem type (e.g drag-and-drop-v2).
problem_type_categories = [problem_template['category'] for problem_template in problem_type_templates]
for advance_problem_type in settings.ADVANCED_PROBLEM_TYPES:
self.assertNotIn(advance_problem_type['component'], problem_type_categories)
def test_manage_library_users(self):
"""
Simple test that the Library "User Access" view works.
Also tests that we can use the REST API to assign a user to a library.
"""
library = LibraryFactory.create()
extra_user, _ = self.create_non_staff_user()
manage_users_url = reverse_library_url('manage_library_users', text_type(library.location.library_key))
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
# extra_user has not been assigned to the library so should not show up in the list:
self.assertNotContains(response, extra_user.username)
# Now add extra_user to the library:
user_details_url = reverse_course_url(
'course_team_handler',
library.location.library_key, kwargs={'email': extra_user.email}
)
edit_response = self.client.ajax_post(user_details_url, {"role": LibraryUserRole.ROLE})
self.assertIn(edit_response.status_code, (200, 204))
# Now extra_user should apear in the list:
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, extra_user.username)
| agpl-3.0 | 6,763,238,492,196,737,000 | 42.697674 | 131 | 0.645756 | false |
dchirikov/luna | luna/switch.py | 1 | 5119 | '''
Written by Dmitry Chirikov <[email protected]>
This file is part of Luna, cluster provisioning tool
https://github.com/dchirikov/luna
This file is part of Luna.
Luna is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Luna is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Luna. If not, see <http://www.gnu.org/licenses/>.
'''
from config import *
import logging
from bson.dbref import DBRef
from luna import utils
from luna.base import Base
from luna.cluster import Cluster
from luna.network import Network
class Switch(Base):
"""Class for operating with switch records"""
log = logging.getLogger(__name__)
def __init__(self, name=None, mongo_db=None, create=False,
id=None, network=None, ip=None, read='public',
rw='private', oid=None, comment=''):
"""
ip - ip of the switch
read - read community
rw - rw community
oid - could be, for instance
.1.3.6.1.2.1.17.7.1.2.2.1.2
.1.3.6.1.2.1.17.4.3.1.2
.1.3.6.1.2.1.17.7.1.2.2
.1.3.6.1.2.1.17.4.3.1.2
"""
self.log.debug("function args {}".format(self._debug_function()))
# Define the schema used to represent switch objects
self._collection_name = 'switch'
self._keylist = {
'read': type(''),
'rw': type(''),
'oid': type(''),
'comment': type(''),
}
# Check if this switch is already present in the datastore
# Read it if that is the case
switch = self._get_object(name, mongo_db, create, id)
if create:
cluster = Cluster(mongo_db=self._mongo_db)
if not network:
err_msg = "Network must be provided"
self.log.error(err_msg)
raise RuntimeError, err_msg
if not name:
err_msg = "Name must be provided"
self.log.error(err_msg)
raise RuntimeError, err_msg
net = Network(name=network, mongo_db=self._mongo_db)
ip = net.reserve_ip(ip)
if not ip:
err_msg = "Could not acquire ip for switch"
self.log.error(err_msg)
raise RuntimeError, err_msg
# Store the new switch in the datastore
switch = {'name': name, 'network': net.DBRef, 'ip': ip,
'read': read, 'rw': rw, 'oid': oid, 'comment': comment}
self.log.debug("Saving switch '{}' to the datastore"
.format(switch))
self.store(switch)
# Link this switch to its dependencies and the current cluster
self.link(cluster)
self.link(net)
self.log = logging.getLogger('switch.' + self._name)
def get(self, key):
if key == 'ip':
net_dbref = self._json['network']
if not net_dbref:
return None
net = Network(id=net_dbref.id, mongo_db=self._mongo_db)
return utils.ip.reltoa(
net._json['NETWORK'], self._json['ip'], net.version)
if key == 'network':
net_dbref = self._json['network']
if not net_dbref:
return None
net = Network(id=net_dbref.id, mongo_db=self._mongo_db)
return net.name
return super(Switch, self).get(key)
def get_rel_ip(self):
return self._json['ip']
def set(self, key, value):
if key == 'ip':
net = Network(id=self._json['network'].id, mongo_db=self._mongo_db)
if self._json['ip']:
net.release_ip(self._json['ip'])
ip = net.reserve_ip(value)
ret = super(Switch, self).set('ip', ip)
return ret
elif key == 'network':
net = Network(id=self._json['network'].id, mongo_db=self._mongo_db)
ip = self._json['ip']
new_net = Network(name=value, mongo_db=self._mongo_db)
if net.DBRef == new_net.DBRef:
return None
new_ip = ip
if not new_net.reserve_ip(new_ip):
return None
net.release_ip(ip)
self.unlink(net)
ret = super(Switch, self).set('network', new_net.DBRef)
self.link(new_net)
return ret
else:
return super(Switch, self).set(key, value)
def release_resources(self):
net_dbref = self._json['network']
net = Network(id=net_dbref.id, mongo_db=self._mongo_db)
net.release_ip(self.get('ip'))
return True
| gpl-3.0 | -2,030,051,419,974,347,000 | 28.589595 | 79 | 0.549131 | false |
IntelLabs/hpat | examples/series/series_ge.py | 1 | 1747 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_ge():
s1 = pd.Series([5, 4, 3, 2, 1])
s2 = pd.Series([0, 2, 3, 6, 8])
return s1.ge(s2) # Expect series of True, True, True, False, False
print(series_ge())
| bsd-2-clause | 856,753,110,064,090,800 | 43.794872 | 79 | 0.685747 | false |
sedders123/phial | docs/source/conf.py | 1 | 5027 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# phial documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 1 21:23:47 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import phial
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "phial"
copyright = "2017-2019, James Seden Smith"
author = "James Seden Smith"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = phial.__version__
# The full version, including alpha/beta/rc tags.
release = phial.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "phialdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "phial.tex", "phial Documentation", "James Seden Smith", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "phial", "phial Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"phial",
"phial Documentation",
author,
"phial",
"One line description of project.",
"Miscellaneous",
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| mit | -5,040,459,451,588,161,000 | 29.840491 | 83 | 0.671176 | false |
partofthething/home-assistant | tests/components/risco/test_config_flow.py | 6 | 7254 | """Test the Risco config flow."""
from unittest.mock import PropertyMock, patch
import pytest
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.risco.config_flow import (
CannotConnectError,
UnauthorizedError,
)
from homeassistant.components.risco.const import DOMAIN
from tests.common import MockConfigEntry
TEST_SITE_NAME = "test-site-name"
TEST_DATA = {
"username": "test-username",
"password": "test-password",
"pin": "1234",
}
TEST_RISCO_TO_HA = {
"arm": "armed_away",
"partial_arm": "armed_home",
"A": "armed_home",
"B": "armed_home",
"C": "armed_night",
"D": "armed_night",
}
TEST_HA_TO_RISCO = {
"armed_away": "arm",
"armed_home": "partial_arm",
"armed_night": "C",
}
TEST_OPTIONS = {
"scan_interval": 10,
"code_arm_required": True,
"code_disarm_required": True,
}
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
return_value=True,
), patch(
"homeassistant.components.risco.config_flow.RiscoAPI.site_name",
new_callable=PropertyMock(return_value=TEST_SITE_NAME),
), patch(
"homeassistant.components.risco.config_flow.RiscoAPI.close"
) as mock_close, patch(
"homeassistant.components.risco.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.risco.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_SITE_NAME
assert result2["data"] == TEST_DATA
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
mock_close.assert_awaited_once()
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
side_effect=UnauthorizedError,
), patch("homeassistant.components.risco.config_flow.RiscoAPI.close") as mock_close:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
mock_close.assert_awaited_once()
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
side_effect=CannotConnectError,
), patch("homeassistant.components.risco.config_flow.RiscoAPI.close") as mock_close:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
mock_close.assert_awaited_once()
async def test_form_exception(hass):
"""Test we handle unknown exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
side_effect=Exception,
), patch("homeassistant.components.risco.config_flow.RiscoAPI.close") as mock_close:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
mock_close.assert_awaited_once()
async def test_form_already_exists(hass):
"""Test that a flow with an existing username aborts."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DATA["username"],
data=TEST_DATA,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_options_flow(hass):
"""Test options flow."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DATA["username"],
data=TEST_DATA,
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_OPTIONS,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "risco_to_ha"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_RISCO_TO_HA,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "ha_to_risco"
with patch("homeassistant.components.risco.async_setup_entry", return_value=True):
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_HA_TO_RISCO,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options == {
**TEST_OPTIONS,
"risco_states_to_ha": TEST_RISCO_TO_HA,
"ha_states_to_risco": TEST_HA_TO_RISCO,
}
async def test_ha_to_risco_schema(hass):
"""Test that the schema for the ha-to-risco mapping step is generated properly."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DATA["username"],
data=TEST_DATA,
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_OPTIONS,
)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_RISCO_TO_HA,
)
# Test an HA state that isn't used
with pytest.raises(vol.error.MultipleInvalid):
await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**TEST_HA_TO_RISCO, "armed_custom_bypass": "D"},
)
# Test a combo that can't be selected
with pytest.raises(vol.error.MultipleInvalid):
await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**TEST_HA_TO_RISCO, "armed_night": "A"},
)
| mit | 8,848,748,024,369,649,000 | 29.737288 | 88 | 0.641301 | false |
heran7/edx-platform | lms/djangoapps/verify_student/views.py | 2 | 10546 | """
Views for the verification flow
"""
import json
import logging
import decimal
from mitxmako.shortcuts import render_to_response
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.http import urlencode
from django.contrib.auth.decorators import login_required
from course_modes.models import CourseMode
from student.models import CourseEnrollment
from student.views import course_from_id
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors.CyberSource import (
get_signed_purchase_params, get_purchase_endpoint
)
from verify_student.models import SoftwareSecurePhotoVerification
import ssencrypt
log = logging.getLogger(__name__)
class VerifyView(View):
@method_decorator(login_required)
def get(self, request, course_id):
"""
Displays the main verification view, which contains three separate steps:
- Taking the standard face photo
- Taking the id photo
- Confirming that the photos and payment price are correct
before proceeding to payment
"""
# If the user has already been verified within the given time period,
# redirect straight to the payment -- no need to verify again.
if SoftwareSecurePhotoVerification.user_has_valid_or_pending(request.user):
return redirect(
reverse('verify_student_verified',
kwargs={'course_id': course_id}))
elif CourseEnrollment.enrollment_mode_for_user(request.user, course_id) == 'verified':
return redirect(reverse('dashboard'))
else:
# If they haven't completed a verification attempt, we have to
# restart with a new one. We can't reuse an older one because we
# won't be able to show them their encrypted photo_id -- it's easier
# bookkeeping-wise just to start over.
progress_state = "start"
verify_mode = CourseMode.mode_for_course(course_id, "verified")
# if the course doesn't have a verified mode, we want to kick them
# from the flow
if not verify_mode:
return redirect(reverse('dashboard'))
if course_id in request.session.get("donation_for_course", {}):
chosen_price = request.session["donation_for_course"][course_id]
else:
chosen_price = verify_mode.min_price
course = course_from_id(course_id)
context = {
"progress_state": progress_state,
"user_full_name": request.user.profile.name,
"course_id": course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"purchase_endpoint": get_purchase_endpoint(),
"suggested_prices": [
decimal.Decimal(price)
for price in verify_mode.suggested_prices.split(",")
],
"currency": verify_mode.currency.upper(),
"chosen_price": chosen_price,
"min_price": verify_mode.min_price,
}
return render_to_response('verify_student/photo_verification.html', context)
class VerifiedView(View):
"""
View that gets shown once the user has already gone through the
verification flow
"""
@method_decorator(login_required)
def get(self, request, course_id):
"""
Handle the case where we have a get request
"""
if CourseEnrollment.enrollment_mode_for_user(request.user, course_id) == 'verified':
return redirect(reverse('dashboard'))
verify_mode = CourseMode.mode_for_course(course_id, "verified")
if course_id in request.session.get("donation_for_course", {}):
chosen_price = request.session["donation_for_course"][course_id]
else:
chosen_price = verify_mode.min_price.format("{:g}")
course = course_from_id(course_id)
context = {
"course_id": course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"purchase_endpoint": get_purchase_endpoint(),
"currency": verify_mode.currency.upper(),
"chosen_price": chosen_price,
}
return render_to_response('verify_student/verified.html', context)
@login_required
def create_order(request):
"""
Submit PhotoVerification and create a new Order for this verified cert
"""
if not SoftwareSecurePhotoVerification.user_has_valid_or_pending(request.user):
attempt = SoftwareSecurePhotoVerification(user=request.user)
b64_face_image = request.POST['face_image'].split(",")[1]
b64_photo_id_image = request.POST['photo_id_image'].split(",")[1]
attempt.upload_face_image(b64_face_image.decode('base64'))
attempt.upload_photo_id_image(b64_photo_id_image.decode('base64'))
attempt.mark_ready()
attempt.save()
course_id = request.POST['course_id']
donation_for_course = request.session.get('donation_for_course', {})
current_donation = donation_for_course.get(course_id, decimal.Decimal(0))
contribution = request.POST.get("contribution", donation_for_course.get(course_id, 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
if amount != current_donation:
donation_for_course[course_id] = amount
request.session['donation_for_course'] = donation_for_course
verified_mode = CourseMode.modes_for_course_dict(course_id).get('verified', None)
# make sure this course has a verified mode
if not verified_mode:
return HttpResponseBadRequest(_("This course doesn't support verified certificates"))
if amount < verified_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
# I know, we should check this is valid. All kinds of stuff missing here
cart = Order.get_cart_for_user(request.user)
cart.clear()
CertificateItem.add_to_order(cart, course_id, amount, 'verified')
params = get_signed_purchase_params(cart)
return HttpResponse(json.dumps(params), content_type="text/json")
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
sig_valid = ssencrypt.has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id {}, but not found".format(receipt_id))
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for {}".format(receipt_id))
attempt.approve()
elif result == "FAIL":
log.debug("Denying verification for {}".format(receipt_id))
attempt.deny(json.dumps(reason), error_code=error_code)
elif result == "SYSTEM FAIL":
log.debug("System failure for {} -- resetting to must_retry".format(receipt_id))
attempt.system_error(json.dumps(reason), error_code=error_code)
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result {}".format(result))
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
return HttpResponse("OK!")
@login_required
def show_requirements(request, course_id):
"""
Show the requirements necessary for the verification flow.
"""
if CourseEnrollment.enrollment_mode_for_user(request.user, course_id) == 'verified':
return redirect(reverse('dashboard'))
course = course_from_id(course_id)
context = {
"course_id": course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"is_not_active": not request.user.is_active,
}
return render_to_response("verify_student/show_requirements.html", context)
| agpl-3.0 | -3,901,589,151,012,413,000 | 39.40613 | 108 | 0.66556 | false |
LeMaker/LNcommon | setup.py | 1 | 1227 | import sys
import errno
import subprocess
from distutils.core import setup
VERSION_FILE = 'LNcommon/version.py'
PY3 = sys.version_info[0] >= 3
def get_version():
if PY3:
version_vars = {}
with open(VERSION_FILE) as f:
code = compile(f.read(), VERSION_FILE, 'exec')
exec(code, None, version_vars)
return version_vars['__version__']
else:
execfile(VERSION_FILE)
return __version__
setup(
name='LNcommon',
version=get_version(),
description='The LN Digitals common function modules.',
author='Lemaker',
author_email='[email protected]',
license='GPLv3+',
url='https://github.com/LeMaker/LNcommon',
packages=['LNcommon'],
long_description=open('README.md').read(),
classifiers=[
"License :: OSI Approved :: GNU Affero General Public License v3 or "
"later (AGPLv3+)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='LN Digitals common functions bananapi',
)
| gpl-3.0 | 1,538,194,309,092,823,800 | 27.534884 | 77 | 0.618582 | false |
iriark01/greentea | mbed_greentea/cmake_handlers.py | 1 | 3571 | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <[email protected]>
"""
import re
import os
import os.path
from mbed_greentea_log import gt_log
from mbed_greentea_log import gt_log_tab
def load_ctest_testsuite(link_target, binary_type='.bin', verbose=False):
"""! Loads CMake.CTest formatted data about tests from test directory
@return Dictionary of { test_case : test_case_path } pairs
@details Example path with CTestTestFile.cmake:
c:/temp/xxx/mbed-sdk-private/build/frdm-k64f-gcc/test/
Example format of CTestTestFile.cmake:
# CMake generated Testfile for
# Source directory: c:/temp/xxx/mbed-sdk-private/build/frdm-k64f-gcc/test
# Build directory: c:/temp/xxx/mbed-sdk-private/build/frdm-k64f-gcc/test
#
# This file includes the relevant testing commands required for
# testing this directory and lists subdirectories to be tested as well.
add_test(mbed-test-stdio "mbed-test-stdio")
add_test(mbed-test-call_before_main "mbed-test-call_before_main")
add_test(mbed-test-dev_null "mbed-test-dev_null")
add_test(mbed-test-div "mbed-test-div")
add_test(mbed-test-echo "mbed-test-echo")
add_test(mbed-test-ticker "mbed-test-ticker")
add_test(mbed-test-hello "mbed-test-hello")
"""
result = {}
add_test_pattern = '[adtesADTES_]{8}\([\w\d_-]+ \"([\w\d_-]+)\"'
re_ptrn = re.compile(add_test_pattern)
if link_target is not None:
ctest_path = os.path.join(link_target, 'test', 'CTestTestfile.cmake')
try:
with open(ctest_path) as ctest_file:
for line in ctest_file:
if line.lower().startswith('add_test'):
m = re_ptrn.search(line)
if m and len(m.groups()) > 0:
if verbose:
print m.group(1) + binary_type
result[m.group(1)] = os.path.join(link_target, 'test', m.group(1) + binary_type)
except:
pass # Return empty list if path is not found
return result
def list_binaries_for_targets(build_dir='./build'):
"""! Prints tests in target directories, only if tests exist.
@details Skips empty / no tests for target directories.
"""
dir = build_dir
sub_dirs = [os.path.join(dir, o) for o in os.listdir(dir) if os.path.isdir(os.path.join(dir, o))]
gt_log("available tests for built targets")
gt_log_tab("location '%s'"% os.path.abspath(build_dir))
for sub_dir in sub_dirs:
test_list = load_ctest_testsuite(sub_dir, binary_type='')
if len(test_list):
print "target '%s':" % sub_dir.split(os.sep)[-1]
for test in test_list:
gt_log_tab("test '%s'"% test)
print
print "Example: execute 'mbedgt -t TARGET_NAME -n TEST_NAME' to run test TEST_NAME for target TARGET_NAME"
| apache-2.0 | -5,794,636,865,098,836,000 | 42.54878 | 110 | 0.626435 | false |
edmond-chhung/linkchecker | tests/test_linkchecker.py | 9 | 1301 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys
from . import linkchecker_cmd, run_checked
def run_with_options(options, cmd=linkchecker_cmd):
"""Run a command with given options."""
run_checked([sys.executable, cmd] + options)
class TestLinkchecker (unittest.TestCase):
"""Test the linkchecker commandline client."""
def test_linkchecker(self):
# test some single options
for option in ("-V", "--version", "-h", "--help", "--list-plugins", "-Dall"):
run_with_options([option])
# unknown option
self.assertRaises(OSError, run_with_options, ['--imadoofus'])
| gpl-2.0 | 3,650,746,558,767,632,400 | 37.264706 | 85 | 0.704842 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/python/debug/__init__.py | 11 | 1519 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public Python API of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-imports
from tensorflow.python.debug.debug_data import DebugDumpDir
from tensorflow.python.debug.debug_data import DebugTensorDatum
from tensorflow.python.debug.debug_data import has_inf_or_nan
from tensorflow.python.debug.debug_data import load_tensor_from_event_file
from tensorflow.python.debug.debug_utils import add_debug_tensor_watch
from tensorflow.python.debug.debug_utils import watch_graph
from tensorflow.python.debug.debug_utils import watch_graph_with_blacklists
from tensorflow.python.debug.wrappers.hooks import LocalCLIDebugHook
from tensorflow.python.debug.wrappers.local_cli_wrapper import LocalCLIDebugWrapperSession
| agpl-3.0 | -5,599,582,004,262,932,000 | 46.46875 | 90 | 0.760369 | false |
carlos-jenkins/presentations | themes/hauntr/minify.py | 1 | 1465 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Carlos Jenkins <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Custom CSS minifier using webassets library.
"""
from os.path import dirname, abspath, join
from jsmin import jsmin
IMPORTS = 'imports.css'
INPUT = [
's5-core.css',
'framing.css',
'pretty.css',
'docutils.css',
'hauntr.css',
'pygments.css',
]
OUTPUT = 'slides.css'
def main():
"""
Main minifying function.
"""
path = lambda p: join(abspath(dirname(__file__)), p)
minified = []
# Prepend imports
with open(path(IMPORTS)) as fd:
minified.append(fd.read())
# Read inputs
for css in INPUT:
with open(path(css)) as fd:
minified.append(jsmin(fd.read()))
# Write output
with open(path(OUTPUT), 'w') as fd:
fd.write('\n'.join(minified))
print(path(OUTPUT))
if __name__ == '__main__':
main()
| apache-2.0 | -5,711,047,150,555,947,000 | 21.890625 | 66 | 0.648464 | false |
BahtiyarB/the-backdoor-factory | intel/LinuxIntelELF32.py | 2 | 5647 | '''
Copyright (c) 2013-2014, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import struct
import sys
class linux_elfI32_shellcode():
"""
Linux ELFIntel x32 shellcode class
"""
def __init__(self, HOST, PORT, e_entry, SUPPLIED_SHELLCODE=None):
#could take this out HOST/PORT and put into each shellcode function
self.HOST = HOST
self.PORT = PORT
self.e_entry = e_entry
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.shellcode = ""
def pack_ip_addresses(self):
hostocts = []
if self.HOST is None:
print "This shellcode requires a HOST parameter -H"
sys.exit(1)
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def reverse_shell_tcp(self, CavesPicked={}):
"""
Modified from metasploit payload/linux/x86/shell_reverse_tcp
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80"
"\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1"
"\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3"
"\x52\x53\x89\xe1\xb0\x0b\xcd\x80")
self.shellcode = self.shellcode1
return (self.shellcode1)
def reverse_tcp_stager(self, CavesPicked={}):
"""
FOR USE WITH STAGER TCP PAYLOADS INCLUDING METERPRETER
Modified metasploit payload/linux/x64/shell/reverse_tcp
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\xb0\x66\x89\xe1\xcd\x80"
"\x97\x5b\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\x6a"
"\x66\x58\x50\x51\x57\x89\xe1\x43\xcd\x80\xb2\x07\xb9\x00\x10"
"\x00\x00\x89\xe3\xc1\xeb\x0c\xc1\xe3\x0c\xb0\x7d\xcd\x80\x5b"
"\x89\xe1\x99\xb6\x0c\xb0\x03\xcd\x80\xff\xe1")
self.shellcode = self.shellcode1
return (self.shellcode1)
def user_supplied_shellcode(self, CavesPicked={}):
"""
For user supplied shellcode
"""
if self.SUPPLIED_SHELLCODE is None:
print "[!] User must provide shellcode for this module (-U)"
sys.exit(0)
else:
supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += supplied_shellcode
self.shellcode = self.shellcode1
return (self.shellcode1)
| bsd-3-clause | 4,344,884,307,893,095,400 | 39.335714 | 90 | 0.632548 | false |
eayunstack/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py | 1 | 4236 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants as p_const
# Special vlan_id value in ovs_vlan_allocations table indicating flat network
FLAT_VLAN_ID = -1
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Name prefixes for veth device or patch port pair linking the integration
# bridge with the physical bridge for a physical network
PEER_INTEGRATION_PREFIX = 'int-'
PEER_PHYSICAL_PREFIX = 'phy-'
# Nonexistent peer used to create patch ports without associating them, it
# allows to define flows before association
NONEXISTENT_PEER = 'nonexistent-peer'
# The different types of tunnels
TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN,
p_const.TYPE_GENEVE]
### OpenFlow table IDs
## Integration bridge (int_br)
LOCAL_SWITCHING = 0
# Various tables for DVR use of integration bridge flows
DVR_TO_SRC_MAC = 1
DVR_TO_SRC_MAC_VLAN = 2
CANARY_TABLE = 23
# Table for ARP poison/spoofing prevention rules
ARP_SPOOF_TABLE = 24
# Table for MAC spoof filtering
MAC_SPOOF_TABLE = 25
# Table to decide whether further filtering is needed
TRANSIENT_TABLE = 60
# Tables used for ovs firewall
BASE_EGRESS_TABLE = 71
RULES_EGRESS_TABLE = 72
ACCEPT_OR_INGRESS_TABLE = 73
BASE_INGRESS_TABLE = 81
RULES_INGRESS_TABLE = 82
OVS_FIREWALL_TABLES = (
BASE_EGRESS_TABLE,
RULES_EGRESS_TABLE,
ACCEPT_OR_INGRESS_TABLE,
BASE_INGRESS_TABLE,
RULES_INGRESS_TABLE,
)
## Tunnel bridge (tun_br)
# Various tables for tunneling flows
DVR_PROCESS = 1
PATCH_LV_TO_TUN = 2
GRE_TUN_TO_LV = 3
VXLAN_TUN_TO_LV = 4
GENEVE_TUN_TO_LV = 6
DVR_NOT_LEARN = 9
LEARN_FROM_TUN = 10
UCAST_TO_TUN = 20
ARP_RESPONDER = 21
FLOOD_TO_TUN = 22
## Physical Bridges (phys_brs)
# Various tables for DVR use of physical bridge flows
DVR_PROCESS_VLAN = 1
LOCAL_VLAN_TRANSLATION = 2
DVR_NOT_LEARN_VLAN = 3
### end of OpenFlow table IDs
# type for ARP reply in ARP header
ARP_REPLY = '0x2'
# Map tunnel types to tables number
TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV,
p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV,
p_const.TYPE_GENEVE: GENEVE_TUN_TO_LV}
# The default respawn interval for the ovsdb monitor
DEFAULT_OVSDBMON_RESPAWN = 30
# Represent invalid OF Port
OFPORT_INVALID = -1
ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
'mod_dl_src:%(mac)s,'
'load:0x2->NXM_OF_ARP_OP[],'
'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
'load:%(mac)#x->NXM_NX_ARP_SHA[],'
'load:%(ip)#x->NXM_OF_ARP_SPA[],'
'in_port')
# Represent ovs status
OVS_RESTARTED = 0
OVS_NORMAL = 1
OVS_DEAD = 2
EXTENSION_DRIVER_TYPE = 'ovs'
# ovs datapath types
OVS_DATAPATH_SYSTEM = 'system'
OVS_DATAPATH_NETDEV = 'netdev'
OVS_DPDK_VHOST_USER = 'dpdkvhostuser'
OVS_DPDK_VHOST_USER_CLIENT = 'dpdkvhostuserclient'
# default ovs vhost-user socket location
VHOST_USER_SOCKET_DIR = '/var/run/openvswitch'
MAX_DEVICE_RETRIES = 5
# OpenFlow version constants
OPENFLOW10 = "OpenFlow10"
OPENFLOW11 = "OpenFlow11"
OPENFLOW12 = "OpenFlow12"
OPENFLOW13 = "OpenFlow13"
OPENFLOW14 = "OpenFlow14"
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
# callback resource for setting 'bridge_name' in the 'binding:vif_details'
OVS_BRIDGE_NAME = 'ovs_bridge_name'
# callback resource for notifying to ovsdb handler
OVSDB_RESOURCE = 'ovsdb'
# Used in ovs port 'external_ids' in order mark it for no cleanup when
# ovs_cleanup script is used.
SKIP_CLEANUP = 'skip_cleanup'
| apache-2.0 | -3,984,599,570,439,235,600 | 25.810127 | 77 | 0.695231 | false |
c72578/poedit | deps/boost/tools/build/test/explicit.py | 7 | 1284 | #!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
explicit hello2 ;
""")
t.write("hello.cpp", "int main() {}\n")
t.run_build_system()
t.ignore("*.tds")
t.expect_addition(BoostBuild.List("bin/$toolset/debug*/hello") * \
[".exe", ".obj"])
t.expect_nothing_more()
t.run_build_system(["hello2"])
t.expect_addition("bin/$toolset/debug*/hello2.exe")
t.rm(".")
# Test that 'explicit' used in a helper rule applies to the current project, and
# not to the Jamfile where the helper rule is defined.
t.write("jamroot.jam", """\
rule myinstall ( name : target )
{
install $(name)-bin : $(target) ;
explicit $(name)-bin ;
alias $(name) : $(name)-bin ;
}
""")
t.write("sub/a.cpp", "\n")
t.write("sub/jamfile.jam", "myinstall dist : a.cpp ;")
t.run_build_system(subdir="sub")
t.expect_addition("sub/dist-bin/a.cpp")
t.rm("sub/dist-bin")
t.write("sub/jamfile.jam", """\
myinstall dist : a.cpp ;
explicit dist ;
""")
t.run_build_system(subdir="sub")
t.expect_nothing_more()
t.cleanup()
| mit | 296,002,761,922,972,600 | 21.137931 | 81 | 0.661994 | false |
NcLang/vimrc | sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/python-future/discover_tests.py | 10 | 1696 | """
Simple auto test discovery.
From http://stackoverflow.com/a/17004409
"""
import os
import sys
import unittest
if not hasattr(unittest.defaultTestLoader, 'discover'):
try:
import unittest2 as unittest
except ImportError:
raise ImportError('The unittest2 module is required to run tests on Python 2.6')
def additional_tests():
setup_file = sys.modules['__main__'].__file__
setup_dir = os.path.abspath(os.path.dirname(setup_file))
test_dir = os.path.join(setup_dir, 'tests')
test_suite = unittest.defaultTestLoader.discover(test_dir)
blacklist = []
if '/home/travis' in __file__:
# Skip some tests that fail on travis-ci
blacklist.append('test_command')
return exclude_tests(test_suite, blacklist)
class SkipCase(unittest.TestCase):
def skeleton_run_test(self):
raise unittest.SkipTest("Test fails spuriously on travis-ci")
def exclude_tests(suite, blacklist):
"""
Example:
blacklist = [
'test_some_test_that_should_be_skipped',
'test_another_test_that_should_be_skipped'
]
"""
new_suite = unittest.TestSuite()
for test_group in suite._tests:
for test in test_group:
if not hasattr(test, '_tests'):
# e.g. ModuleImportFailure
new_suite.addTest(test)
continue
for subtest in test._tests:
method = subtest._testMethodName
if method in blacklist:
setattr(test,
method,
getattr(SkipCase(), 'skeleton_run_test'))
new_suite.addTest(test)
return new_suite
| mit | -3,249,484,671,108,228,600 | 28.241379 | 88 | 0.602594 | false |
wesleykerr/scrapy-games | scrapy_games/spiders/metacritic.py | 1 | 3606 | # -*- coding: utf-8 -*-
import scrapy
from scrapy_games import items
from scrapy_games.models import models
MAX_URLS = -1
BASE_URL = 'http://www.metacritic.com'
class MetacriticSpider(scrapy.Spider):
name = "metacritic"
pipelines = [
'ReviewsPipeline'
]
def __init__(self, *args, **kwargs):
super(MetacriticSpider, self).__init__(*args, **kwargs)
def start_requests(self):
requests = []
for game in models.Steam.select().where(models.Steam.metacritic_url != 'NULL'):
request = scrapy.Request(game.metacritic_url, callback=self.parse)
request.meta['steam_id'] = game.steam_id
requests.append(request)
if MAX_URLS != -1 and len(requests) >= MAX_URLS:
break
return requests
def parse(self, response):
critic_reviews = response.xpath('//div[@class="module reviews_module critic_reviews_module"]')
all_reviews_url = critic_reviews.xpath('.//p[@class="see_all"]/a/@href').extract()
if all_reviews_url:
request = scrapy.Request(BASE_URL + all_reviews_url[0],
callback=self.parse_metacritic_reviews)
request.meta['metacritic_url'] = response.url
request.meta['steam_id'] = response.request.meta['steam_id']
yield request
else:
request = scrapy.Request(response.url, callback=self.parse_metacritic_reviews)
request.meta['metacritic_url'] = response.url
request.meta['steam_id'] = response.request.meta['steam_id']
yield request
def parse_metacritic_reviews(self, response):
subselect = response.xpath('//div[@class="module reviews_module critic_reviews_module"]')
reviews = subselect.xpath('.//div[@class="review_content"]')
for selector in reviews:
review_item = items.ReviewItem()
review_item['steam_id'] = response.request.meta['steam_id']
review_item['metacritic_url'] = response.request.meta['metacritic_url']
source_text = selector.xpath('.//div[@class="source"]/a/text()').extract()
if source_text:
review_item['reviewer'] = source_text[0].strip()
body_text = selector.xpath('.//div[@class="review_body"]/text()').extract()
if body_text:
review_item['description'] = body_text[0].strip()
score_text = selector.xpath('.//div[@class="review_grade"]/div/text()').extract()
if score_text:
review_item['score'] = score_text[0].strip()
url = selector.xpath('.//li[@class="review_action full_review"]/a/@href').extract()
if url:
review_item['review_url'] = url[0]
yield review_item
request = scrapy.Request(url[0], callback=self.parse_review)
request.meta['steam_id'] = response.request.meta['steam_id']
request.meta['metacritic_url'] = response.request.meta['metacritic_url']
yield request
def parse_review(self, response):
if response.status == 404:
self.log("Received 404: %s" % response.request.url)
return
item = items.ReviewHtmlItem()
item['steam_id'] = response.request.meta['steam_id']
item['metacritic_url'] = response.request.meta['metacritic_url']
item['orig_review_url'] = response.request.url
item['review_url'] = response.url
item['html'] = response.body.decode(response.encoding)
yield item
| apache-2.0 | -3,815,290,967,755,813,400 | 38.626374 | 102 | 0.587354 | false |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/vm_snapshots/paths/xc_path7.py | 1 | 1707 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=ceph'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_volume_backup, 'vm1-root', 'vm1-root-backup1'],
[TestAction.detach_volume, 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot8'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.use_volume_snapshot, 'volume3-snapshot1'],
[TestAction.reboot_vm, 'vm1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'vm1-root-snapshot8', 'vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'vm1-root-backup1', 'vm1-image1']
attached:['volume1', 'volume2']
Detached:['volume3']
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5']---vm1@volume1_volume2
vm_snap3:['vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9']---vm1@volume1_volume2
'''
| apache-2.0 | -8,237,004,725,984,210,000 | 40.634146 | 183 | 0.70826 | false |
lgeiger/ide-python | dist/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydev_ipython/qt_loaders.py | 28 | 7837 | """
This module contains factory functions that attempt
to return Qt submodules from the various python Qt bindings.
It also protects against double-importing Qt with different
bindings, which is unstable and likely to crash
This is used primarily by qt and qt_for_kernel, and shouldn't
be accessed directly from the outside
"""
import sys
from functools import partial
from pydev_ipython.version import check_version
# Available APIs.
QT_API_PYQT = 'pyqt'
QT_API_PYQTv1 = 'pyqtv1'
QT_API_PYQT_DEFAULT = 'pyqtdefault' # don't set SIP explicitly
QT_API_PYSIDE = 'pyside'
QT_API_PYQT5 = 'pyqt5'
class ImportDenier(object):
"""Import Hook that will guard against bad Qt imports
once IPython commits to a specific binding
"""
def __init__(self):
self.__forbidden = set()
def forbid(self, module_name):
sys.modules.pop(module_name, None)
self.__forbidden.add(module_name)
def find_module(self, fullname, path=None):
if path:
return
if fullname in self.__forbidden:
return self
def load_module(self, fullname):
raise ImportError("""
Importing %s disabled by IPython, which has
already imported an Incompatible QT Binding: %s
""" % (fullname, loaded_api()))
ID = ImportDenier()
sys.meta_path.append(ID)
def commit_api(api):
"""Commit to a particular API, and trigger ImportErrors on subsequent
dangerous imports"""
if api == QT_API_PYSIDE:
ID.forbid('PyQt4')
ID.forbid('PyQt5')
else:
ID.forbid('PySide')
def loaded_api():
"""Return which API is loaded, if any
If this returns anything besides None,
importing any other Qt binding is unsafe.
Returns
-------
None, 'pyside', 'pyqt', or 'pyqtv1'
"""
if 'PyQt4.QtCore' in sys.modules:
if qtapi_version() == 2:
return QT_API_PYQT
else:
return QT_API_PYQTv1
elif 'PySide.QtCore' in sys.modules:
return QT_API_PYSIDE
elif 'PyQt5.QtCore' in sys.modules:
return QT_API_PYQT5
return None
def has_binding(api):
"""Safely check for PyQt4 or PySide, without importing
submodules
Parameters
----------
api : str [ 'pyqtv1' | 'pyqt' | 'pyside' | 'pyqtdefault']
Which module to check for
Returns
-------
True if the relevant module appears to be importable
"""
# we can't import an incomplete pyside and pyqt4
# this will cause a crash in sip (#1431)
# check for complete presence before importing
module_name = {QT_API_PYSIDE: 'PySide',
QT_API_PYQT: 'PyQt4',
QT_API_PYQTv1: 'PyQt4',
QT_API_PYQT_DEFAULT: 'PyQt4',
QT_API_PYQT5: 'PyQt5',
}
module_name = module_name[api]
import imp
try:
#importing top level PyQt4/PySide module is ok...
mod = __import__(module_name)
#...importing submodules is not
imp.find_module('QtCore', mod.__path__)
imp.find_module('QtGui', mod.__path__)
imp.find_module('QtSvg', mod.__path__)
#we can also safely check PySide version
if api == QT_API_PYSIDE:
return check_version(mod.__version__, '1.0.3')
else:
return True
except ImportError:
return False
def qtapi_version():
"""Return which QString API has been set, if any
Returns
-------
The QString API version (1 or 2), or None if not set
"""
try:
import sip
except ImportError:
return
try:
return sip.getapi('QString')
except ValueError:
return
def can_import(api):
"""Safely query whether an API is importable, without importing it"""
if not has_binding(api):
return False
current = loaded_api()
if api == QT_API_PYQT_DEFAULT:
return current in [QT_API_PYQT, QT_API_PYQTv1, QT_API_PYQT5, None]
else:
return current in [api, None]
def import_pyqt4(version=2):
"""
Import PyQt4
Parameters
----------
version : 1, 2, or None
Which QString/QVariant API to use. Set to None to use the system
default
ImportErrors raised within this function are non-recoverable
"""
# The new-style string API (version=2) automatically
# converts QStrings to Unicode Python strings. Also, automatically unpacks
# QVariants to their underlying objects.
import sip
if version is not None:
sip.setapi('QString', version)
sip.setapi('QVariant', version)
from PyQt4 import QtGui, QtCore, QtSvg
if not check_version(QtCore.PYQT_VERSION_STR, '4.7'):
raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
QtCore.PYQT_VERSION_STR)
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# query for the API version (in case version == None)
version = sip.getapi('QString')
api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
return QtCore, QtGui, QtSvg, api
def import_pyqt5():
"""
Import PyQt5
ImportErrors raised within this function are non-recoverable
"""
from PyQt5 import QtGui, QtCore, QtSvg
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
return QtCore, QtGui, QtSvg, QT_API_PYQT5
def import_pyside():
"""
Import PySide
ImportErrors raised within this function are non-recoverable
"""
from PySide import QtGui, QtCore, QtSvg # @UnresolvedImport
return QtCore, QtGui, QtSvg, QT_API_PYSIDE
def load_qt(api_options):
"""
Attempt to import Qt, given a preference list
of permissible bindings
It is safe to call this function multiple times.
Parameters
----------
api_options: List of strings
The order of APIs to try. Valid items are 'pyside',
'pyqt', and 'pyqtv1'
Returns
-------
A tuple of QtCore, QtGui, QtSvg, QT_API
The first three are the Qt modules. The last is the
string indicating which module was loaded.
Raises
------
ImportError, if it isn't possible to import any requested
bindings (either becaues they aren't installed, or because
an incompatible library has already been installed)
"""
loaders = {QT_API_PYSIDE: import_pyside,
QT_API_PYQT: import_pyqt4,
QT_API_PYQTv1: partial(import_pyqt4, version=1),
QT_API_PYQT_DEFAULT: partial(import_pyqt4, version=None),
QT_API_PYQT5: import_pyqt5,
}
for api in api_options:
if api not in loaders:
raise RuntimeError(
"Invalid Qt API %r, valid values are: %r, %r, %r, %r, %r" %
(api, QT_API_PYSIDE, QT_API_PYQT,
QT_API_PYQTv1, QT_API_PYQT_DEFAULT, QT_API_PYQT5))
if not can_import(api):
continue
#cannot safely recover from an ImportError during this
result = loaders[api]()
api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
commit_api(api)
return result
else:
raise ImportError("""
Could not load requested Qt binding. Please ensure that
PyQt4 >= 4.7 or PySide >= 1.0.3 is available,
and only one is imported per session.
Currently-imported Qt library: %r
PyQt4 installed: %s
PyQt5 installed: %s
PySide >= 1.0.3 installed: %s
Tried to load: %r
""" % (loaded_api(),
has_binding(QT_API_PYQT),
has_binding(QT_API_PYQT5),
has_binding(QT_API_PYSIDE),
api_options))
| mit | 7,256,599,512,802,346,000 | 26.88968 | 78 | 0.61031 | false |
mcauser/micropython | tests/basics/string_compare.py | 9 | 1156 | print("" == "")
print("" > "")
print("" < "")
print("" == "1")
print("1" == "")
print("" > "1")
print("1" > "")
print("" < "1")
print("1" < "")
print("" >= "1")
print("1" >= "")
print("" <= "1")
print("1" <= "")
print("1" == "1")
print("1" != "1")
print("1" == "2")
print("1" == "10")
print("1" > "1")
print("1" > "2")
print("2" > "1")
print("10" > "1")
print("1/" > "1")
print("1" > "10")
print("1" > "1/")
print("1" < "1")
print("2" < "1")
print("1" < "2")
print("1" < "10")
print("1" < "1/")
print("10" < "1")
print("1/" < "1")
print("1" >= "1")
print("1" >= "2")
print("2" >= "1")
print("10" >= "1")
print("1/" >= "1")
print("1" >= "10")
print("1" >= "1/")
print("1" <= "1")
print("2" <= "1")
print("1" <= "2")
print("1" <= "10")
print("1" <= "1/")
print("10" <= "1")
print("1/" <= "1")
# this tests an internal string that doesn't have a hash with a string
# that does have a hash, but the lengths of the two strings are different
try:
import usys as sys
except ImportError:
import sys
print(sys.version == 'a long string that has a hash')
# this special string would have a hash of 0 but is incremented to 1
print('Q+?' == 'Q' + '+?')
| mit | 2,133,074,550,128,650,500 | 17.95082 | 73 | 0.486159 | false |
peak6/st2 | st2common/st2common/util/misc.py | 3 | 4027 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import collections
import six
__all__ = [
'prefix_dict_keys',
'compare_path_file_name',
'lowercase_value'
]
def prefix_dict_keys(dictionary, prefix='_'):
"""
Prefix dictionary keys with a provided prefix.
:param dictionary: Dictionary whose keys to prefix.
:type dictionary: ``dict``
:param prefix: Key prefix.
:type prefix: ``str``
:rtype: ``dict``:
"""
result = {}
for key, value in six.iteritems(dictionary):
result['%s%s' % (prefix, key)] = value
return result
def compare_path_file_name(file_path_a, file_path_b):
"""
Custom compare function which compares full absolute file paths just using
the file name.
This function can be used with ``sorted`` or ``list.sort`` function.
"""
file_name_a = os.path.basename(file_path_a)
file_name_b = os.path.basename(file_path_b)
return file_name_a < file_name_b
def strip_shell_chars(input_str):
"""
Strips the last '\r' or '\n' or '\r\n' string at the end of
the input string. This is typically used to strip ``stdout``
and ``stderr`` streams of those characters.
:param input_str: Input string to be stripped.
:type input_str: ``str``
:rtype: ``str``
"""
stripped_str = rstrip_last_char(input_str, '\n')
stripped_str = rstrip_last_char(stripped_str, '\r')
return stripped_str
def rstrip_last_char(input_str, char_to_strip):
"""
Strips the last `char_to_strip` from input_str if
input_str ends with `char_to_strip`.
:param input_str: Input string to be stripped.
:type input_str: ``str``
:rtype: ``str``
"""
if not input_str:
return input_str
if not char_to_strip:
return input_str
if input_str.endswith(char_to_strip):
return input_str[:-len(char_to_strip)]
return input_str
def deep_update(d, u):
"""
Perform deep merge / update of the target dict.
"""
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = deep_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_normalized_file_path(file_path):
"""
Return a full normalized file path for the provided path string.
:rtype: ``str``
"""
if hasattr(sys, 'frozen'): # support for py2exe
file_path = 'logging%s__init__%s' % (os.sep, file_path[-4:])
elif file_path[-4:].lower() in ['.pyc', '.pyo']:
file_path = file_path[:-4] + '.py'
else:
file_path = file_path
file_path = os.path.normcase(file_path)
return file_path
def lowercase_value(value):
"""
Lowercase the provided value.
In case of a list, all the string item values are lowercases and in case of a dictionary, all
of the string keys and values are lowercased.
"""
if isinstance(value, six.string_types):
result = value.lower()
elif isinstance(value, (list, tuple)):
result = [str(item).lower() for item in value]
elif isinstance(value, dict):
result = {}
for key, value in six.iteritems(value):
result[key.lower()] = str(value).lower()
else:
result = value
return result
| apache-2.0 | -4,933,971,602,678,351,000 | 25.846667 | 97 | 0.634964 | false |
xxshutong/openerp-7.0 | openerp/addons/portal_project/project.py | 50 | 1747 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class portal_project(osv.Model):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'project.project'
def _get_visibility_selection(self, cr, uid, context=None):
""" Override to add portal option. """
selection = super(portal_project, self)._get_visibility_selection(cr, uid, context=context)
idx = [item[0] for item in selection].index('public')
selection.insert((idx + 1), ('portal', 'Portal Users and Employees'))
return selection
# return [('public', 'All Users'),
# ('portal', 'Portal Users and Employees'),
# ('employees', 'Employees Only'),
# ('followers', 'Followers Only')]
| agpl-3.0 | 8,331,608,704,605,739,000 | 44.973684 | 99 | 0.602175 | false |
google/trax | trax/models/research/terraformer_memory_test.py | 1 | 1081 | # coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test for memory usage in Terraformer models.
This test is designed to run on TPUv3 hardware, processing 1 million tokens at a
time while just barely fitting within the 16 GB memory budget.
"""
from absl.testing import absltest
class TerraformerMemoryTest(absltest.TestCase):
def test_terraformer_memory(self):
pass # TODO(jonni): Figure out an OSS-compatible memory test.
if __name__ == '__main__':
config.config_with_absl()
absltest.main()
| apache-2.0 | 5,308,801,968,719,479,000 | 28.216216 | 80 | 0.746531 | false |
gioman/QGIS | tests/src/python/test_qgsserver_accesscontrol.py | 1 | 67142 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Stephane Brunner'
__date__ = '28/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
print('CTEST_FULL_OUTPUT')
import qgis # NOQA
import os
from shutil import copyfile
from math import sqrt
from qgis.testing import unittest
from utilities import unitTestDataPath
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
from qgis.server import QgsServer, QgsAccessControlFilter, QgsServerRequest, QgsBufferServerRequest, QgsBufferServerResponse
from qgis.core import QgsRenderChecker, QgsApplication
from qgis.PyQt.QtCore import QSize
import tempfile
import urllib.request
import urllib.parse
import urllib.error
import base64
XML_NS = \
'service="WFS" version="1.0.0" ' \
'xmlns:wfs="http://www.opengis.net/wfs" ' \
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \
'xmlns:ogc="http://www.opengis.net/ogc" ' \
'xmlns="http://www.opengis.net/wfs" updateSequence="0" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" ' \
'xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.0.0/WFS-capabilities.xsd" ' \
'xmlns:gml="http://www.opengis.net/gml" ' \
'xmlns:ows="http://www.opengis.net/ows" '
WFS_TRANSACTION_INSERT = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:Transaction {xml_ns}>
<wfs:Insert idgen="GenerateNew">
<qgs:db_point>
<qgs:geometry>
<gml:Point srsDimension="2" srsName="http://www.opengis.net/def/crs/EPSG/0/4326">
<gml:coordinates decimal="." cs="," ts=" ">{x},{y}</gml:coordinates>
</gml:Point>
</qgs:geometry>
<qgs:name>{name}</qgs:name>
<qgs:color>{color}</qgs:color>
</qgs:db_point>
</wfs:Insert>
</wfs:Transaction>""".format(x=1000, y=2000, name="test", color="{color}", xml_ns=XML_NS)
WFS_TRANSACTION_UPDATE = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:Transaction {xml_ns}>
<wfs:Update typeName="db_point">
<wfs:Property>
<wfs:Name>color</wfs:Name>
<wfs:Value>{color}</wfs:Value>
</wfs:Property>
<ogc:Filter>
<ogc:FeatureId fid="{id}"/>
</ogc:Filter>
</wfs:Update>
</wfs:Transaction>"""
WFS_TRANSACTION_DELETE = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:Transaction {xml_ns}>
<wfs:Delete typeName="db_point">
<ogc:Filter>
<ogc:FeatureId fid="{id}"/>
</ogc:Filter>
</wfs:Delete>
</wfs:Transaction>"""
class RestrictedAccessControl(QgsAccessControlFilter):
""" Used to have restriction access """
# Be able to deactivate the access control to have a reference point
_active = False
def __init__(self, server_iface):
super(QgsAccessControlFilter, self).__init__(server_iface)
def layerFilterExpression(self, layer):
""" Return an additional expression filter """
if not self._active:
return super(RestrictedAccessControl, self).layerFilterExpression(layer)
return "$id = 1" if layer.name() == "Hello" else None
def layerFilterSubsetString(self, layer):
""" Return an additional subset string (typically SQL) filter """
if not self._active:
return super(RestrictedAccessControl, self).layerFilterSubsetString(layer)
if layer.name() == "Hello_SubsetString":
return "pk = 1"
elif layer.name() == "Hello_Project_SubsetString":
return "pkuid = 6 or pkuid = 7"
elif layer.name() == "Hello_Filter_SubsetString":
return "pkuid = 6 or pkuid = 7"
else:
return None
def layerPermissions(self, layer):
""" Return the layer rights """
if not self._active:
return super(RestrictedAccessControl, self).layerPermissions(layer)
rh = self.serverInterface().requestHandler()
rights = QgsAccessControlFilter.LayerPermissions()
# Used to test WFS transactions
if rh.parameter("LAYER_PERM") == "no" and rh.parameterMap()["LAYER_PERM"] == "no":
return rights
# Used to test the WCS
if rh.parameter("TEST") == "dem" and rh.parameterMap()["TEST"] == "dem":
rights.canRead = layer.name() != "dem"
else:
rights.canRead = layer.name() != "Country"
if layer.name() == "db_point":
rights.canRead = rights.canInsert = rights.canUpdate = rights.canDelete = True
return rights
def authorizedLayerAttributes(self, layer, attributes):
""" Return the authorised layer attributes """
if not self._active:
return super(RestrictedAccessControl, self).authorizedLayerAttributes(layer, attributes)
if "colour" in attributes: # spellok
attributes.remove("colour") # spellok
return attributes
def allowToEdit(self, layer, feature):
""" Are we authorise to modify the following geometry """
if not self._active:
return super(RestrictedAccessControl, self).allowToEdit(layer, feature)
return feature.attribute("color") in ["red", "yellow"]
def cacheKey(self):
return "r" if self._active else "f"
class TestQgsServerAccessControl(unittest.TestCase):
@classmethod
def _execute_request(cls, qs, requestMethod=QgsServerRequest.GetMethod, data=None):
if data is not None:
data = data.encode('utf-8')
request = QgsBufferServerRequest(qs, requestMethod, {}, data)
response = QgsBufferServerResponse()
cls._server.handleRequest(request, response)
headers = []
rh = response.headers()
rk = list(rh.keys())
rk.sort()
for k in rk:
headers.append(("%s: %s" % (k, rh[k])).encode('utf-8'))
return b"\n".join(headers) + b"\n\n", bytes(response.body())
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls._app = QgsApplication([], False)
cls._server = QgsServer()
cls._execute_request("")
cls._server_iface = cls._server.serverInterface()
cls._accesscontrol = RestrictedAccessControl(cls._server_iface)
cls._server_iface.registerAccessControl(cls._accesscontrol, 100)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
del cls._server
cls._app.exitQgis()
def setUp(self):
self.testdata_path = unitTestDataPath("qgis_server_accesscontrol")
dataFile = os.path.join(self.testdata_path, "helloworld.db")
self.assertTrue(os.path.isfile(dataFile), 'Could not find data file "{}"'.format(dataFile))
copyfile(dataFile, os.path.join(self.testdata_path, "_helloworld.db"))
for k in ["QUERY_STRING", "QGIS_PROJECT_FILE"]:
if k in os.environ:
del os.environ[k]
self.projectPath = os.path.join(self.testdata_path, "project.qgs")
self.assertTrue(os.path.isfile(self.projectPath), 'Could not find project file "{}"'.format(self.projectPath))
def tearDown(self):
copyfile(os.path.join(self.testdata_path, "_helloworld.db"), os.path.join(self.testdata_path, "helloworld.db"))
# # WMS # # WMS # # WMS # #
def test_wms_getcapabilities(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetCapabilities"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Country</Name>") != -1,
"No Country layer in GetCapabilities\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in GetCapabilities\n%s" % response)
self.assertFalse(
str(response).find("<Name>Country</Name>") != -1,
"Country layer in GetCapabilities\n%s" % response)
def test_wms_getprojectsettings(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetProjectSettings"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<TreeName>Hello</TreeName>") != -1,
"No Hello layer in GetProjectSettings\n%s" % response)
self.assertTrue(
str(response).find("<TreeName>Country</TreeName>") != -1,
"No Country layer in GetProjectSettings\n%s" % response)
self.assertTrue(
str(response).find("<LayerDrawingOrder>Country_Labels,Country,dem,Hello_Filter_SubsetString,Hello_Project_SubsetString,Hello_SubsetString,Hello,db_point</LayerDrawingOrder>") != -1,
"LayerDrawingOrder in GetProjectSettings\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<TreeName>Hello</TreeName>") != -1,
"No Hello layer in GetProjectSettings\n%s" % response)
self.assertFalse(
str(response).find("<TreeName>Country</TreeName>") != -1,
"Country layer in GetProjectSettings\n%s" % response)
self.assertTrue(
str(response).find("<LayerDrawingOrder>Country_Labels,dem,Hello_Filter_SubsetString,Hello_Project_SubsetString,Hello_SubsetString,Hello,db_point</LayerDrawingOrder>") != -1,
"LayerDrawingOrder in GetProjectSettings\n%s" % response)
def test_wms_getprojectsettings(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetContext"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("name=\"Hello\"") != -1,
"No Hello layer in GetContext\n%s" % response)
self.assertTrue(
str(response).find("name=\"Country\"") != -1,
"No Country layer in GetProjectSettings\n%s" % response)
self.assertTrue(
str(response).find("name=\"Country\"")
< str(response).find("name=\"Hello\""),
"Hello layer not after Country layer\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("name=\"Hello\"") != -1,
"No Hello layer in GetContext\n%s" % response)
self.assertFalse(
str(response).find("name=\"Country\"") != -1,
"No Country layer in GetProjectSettings\n%s" % response)
def test_wms_describelayer_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "DescribeLayer",
"LAYERS": "Hello",
"SLD_VERSION": "1.1.0"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<se:FeatureTypeName>Hello</se:FeatureTypeName>") != -1,
"No Hello layer in DescribeLayer\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<se:FeatureTypeName>Hello</se:FeatureTypeName>") != -1,
"No Hello layer in DescribeLayer\n%s" % response)
def test_wms_describelayer_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "DescribeLayer",
"LAYERS": "Country",
"SLD_VERSION": "1.1.0"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<se:FeatureTypeName>Country</se:FeatureTypeName>") != -1,
"No Country layer in DescribeLayer\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<se:FeatureTypeName>Country</se:FeatureTypeName>") != -1,
"Country layer in DescribeLayer\n%s" % response)
def test_wms_getlegendgraphic_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetLegendGraphic",
"LAYERS": "Hello",
"FORMAT": "image/png"
}.items())])
response, headers = self._get_fullaccess(query_string)
self._img_diff_error(response, headers, "WMS_GetLegendGraphic_Hello", 250, QSize(10, 10))
response, headers = self._get_restricted(query_string)
self._img_diff_error(response, headers, "WMS_GetLegendGraphic_Hello", 250, QSize(10, 10))
def test_wms_getlegendgraphic_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetLegendGraphic",
"LAYERS": "Country",
"FORMAT": "image/png"
}.items())])
response, headers = self._get_fullaccess(query_string)
self._img_diff_error(response, headers, "WMS_GetLegendGraphic_Country", 250, QSize(10, 10))
response, headers = self._get_restricted(query_string)
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find('<ServiceException code="Security">') != -1,
"Not allowed GetLegendGraphic"
)
def test_wms_getmap(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Country,Hello",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_fullaccess(query_string)
self._img_diff_error(response, headers, "WMS_GetMap")
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Hello",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_restricted(query_string)
self._img_diff_error(response, headers, "Restricted_WMS_GetMap")
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Country",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_restricted(query_string)
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find('<ServiceException code="Security">') != -1,
"Not allowed do a GetMap on Country"
)
def test_wms_getfeatureinfo_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Country,Hello",
"QUERY_LAYERS": "Hello",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "56",
"Y": "144"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeatureInfo\n%s" % response)
self.assertTrue(
str(response).find("<qgs:colour>red</qgs:colour>") != -1, # spellok
"No color in result of GetFeatureInfo\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeatureInfo\n%s" % response)
self.assertFalse(
str(response).find("<qgs:colour>red</qgs:colour>") != -1, # spellok
"Unexpected color in result of GetFeatureInfo\n%s" % response)
self.assertFalse(
str(response).find("<qgs:colour>NULL</qgs:colour>") != -1, # spellok
"Unexpected color NULL in result of GetFeatureInfo\n%s" % response)
def test_wms_getfeatureinfo_hello2(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Country,Hello",
"QUERY_LAYERS": "Hello",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "146",
"Y": "160"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeatureInfo\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeatureInfo\n%s" % response)
def test_wms_getfeatureinfo_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Country,Hello",
"QUERY_LAYERS": "Country",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "56",
"Y": "144"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeatureInfo\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeatureInfo\n%s" % response)
# # WFS # # WFS # # WFS # #
def test_wfs_getcapabilities(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Country</Name>") != -1,
"No Country layer in WFS/GetCapabilities\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertFalse(
str(response).find("<Name>Country</Name>") != -1,
"Unexpected Country layer in WFS/GetCapabilities\n%s" % response)
def test_wfs_describefeaturetype_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Hello"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
def test_wfs_describefeaturetype_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Country"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Country"') != -1,
"No Country layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find('name="Country"') != -1,
"Unexpected Country layer in DescribeFeatureType\n%s" % response)
def test_wfs_getfeature_hello(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:colour>red</qgs:colour>") != -1, # spellok
"No color in result of GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:colour>red</qgs:colour>") != -1, # spellok
"Unexpected color in result of GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:colour>NULL</qgs:colour>") != -1, # spellok
"Unexpected color NULL in result of GetFeature\n%s" % response)
def test_wfs_getfeature_hello2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_country(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Country" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pk</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeatureInfo\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpeced result in GetFeatureInfo\n%s" % response) # spellok
# # WCS # # WCS # # WCS # #
def test_wcs_getcapabilities(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WCS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities",
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<name>dem</name>") != -1,
"No dem layer in WCS/GetCapabilities\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<name>dem</name>") != -1,
"No dem layer in WCS/GetCapabilities\n%s" % response)
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WCS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities",
"TEST": "dem",
}.items())])
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<name>dem</name>") != -1,
"Unexpected dem layer in WCS/GetCapabilities\n%s" % response)
def test_wcs_describecoverage(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WCS",
"VERSION": "1.0.0",
"REQUEST": "DescribeCoverage",
"COVERAGE": "dem",
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<name>dem</name>") != -1,
"No dem layer in DescribeCoverage\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<name>dem</name>") != -1,
"No dem layer in DescribeCoverage\n%s" % response)
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WCS",
"VERSION": "1.0.0",
"REQUEST": "DescribeCoverage",
"COVERAGE": "dem",
"TEST": "dem",
}.items())])
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<name>dem</name>") != -1,
"Unexpected dem layer in DescribeCoverage\n%s" % response)
def test_wcs_getcoverage(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WCS",
"VERSION": "1.0.0",
"REQUEST": "GetCoverage",
"COVERAGE": "dem",
"CRS": "EPSG:3857",
"BBOX": "-1387454,4252256,431091,5458375",
"HEIGHT": "100",
"WIDTH": "100",
"FORMAT": "GTiff",
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertEqual(
headers.get("Content-Type"), "image/tiff",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
self._geo_img_diff(response, "WCS_GetCoverage.geotiff") == 0,
"Image for GetCoverage is wrong")
response, headers = self._get_restricted(query_string)
self.assertEqual(
headers.get("Content-Type"), "image/tiff",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
self._geo_img_diff(response, "WCS_GetCoverage.geotiff") == 0,
"Image for GetCoverage is wrong")
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WCS",
"VERSION": "1.0.0",
"REQUEST": "GetCoverage",
"COVERAGE": "dem",
"CRS": "EPSG:3857",
"BBOX": "-1387454,4252256,431091,5458375",
"HEIGHT": "100",
"WIDTH": "100",
"FORMAT": "GTiff",
"TEST": "dem",
}.items())])
response, headers = self._get_restricted(query_string)
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find('<ServiceException code="RequestNotWellFormed">') != -1,
"The layer for the COVERAGE 'dem' is not found")
# # WFS/Transactions # #
def test_wfstransaction_insert(self):
data = WFS_TRANSACTION_INSERT.format(x=1000, y=2000, name="test", color="{color}", xml_ns=XML_NS)
self._test_colors({1: "blue"})
response, headers = self._post_fullaccess(data.format(color="red"))
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Insert is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") != -1,
"WFS/Transactions Insert don't succeed\n%s" % response)
self._test_colors({2: "red"})
response, headers = self._post_restricted(data.format(color="blue"))
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Insert is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") == -1,
"WFS/Transactions Insert succeed\n%s" % response)
response, headers = self._post_restricted(data.format(color="red"), "LAYER_PERM=no")
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Insert is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find(
'<ServiceException code="Security">No permissions to do WFS changes on layer \\\'db_point\\\'</ServiceException>') != -1,
"WFS/Transactions Insert succeed\n%s" % response)
response, headers = self._post_restricted(data.format(color="yellow"), "LAYER_PERM=yes")
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Insert is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") != -1,
"WFS/Transactions Insert don't succeed\n%s" % response)
self._test_colors({3: "yellow"})
def test_wfstransaction_update(self):
data = WFS_TRANSACTION_UPDATE.format(id="1", color="{color}", xml_ns=XML_NS)
self._test_colors({1: "blue"})
response, headers = self._post_restricted(data.format(color="yellow"))
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") == -1,
"WFS/Transactions Update succeed\n%s" % response)
self._test_colors({1: "blue"})
response, headers = self._post_fullaccess(data.format(color="red"))
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Update is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") != -1,
"WFS/Transactions Update don't succeed\n%s" % response)
self._test_colors({1: "red"})
response, headers = self._post_restricted(data.format(color="blue"))
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Update is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") == -1,
"WFS/Transactions Update succeed\n%s" % response)
self._test_colors({1: "red"})
response, headers = self._post_restricted(data.format(color="yellow"), "LAYER_PERM=no")
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Update is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find(
'<ServiceException code="Security">No permissions to do WFS changes on layer \\\'db_point\\\'</ServiceException>') != -1,
"WFS/Transactions Update succeed\n%s" % response)
self._test_colors({1: "red"})
response, headers = self._post_restricted(data.format(color="yellow"), "LAYER_PERM=yes")
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for Update is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") != -1,
"WFS/Transactions Update don't succeed\n%s" % response)
self._test_colors({1: "yellow"})
def test_wfstransaction_delete_fullaccess(self):
data = WFS_TRANSACTION_DELETE.format(id="1", xml_ns=XML_NS)
self._test_colors({1: "blue"})
response, headers = self._post_fullaccess(data)
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") != -1,
"WFS/Transactions Delete don't succeed\n%s" % response)
def test_wfstransaction_delete_restricted(self):
data = WFS_TRANSACTION_DELETE.format(id="1", xml_ns=XML_NS)
self._test_colors({1: "blue"})
response, headers = self._post_restricted(data)
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") == -1,
"WFS/Transactions Delete succeed\n%s" % response)
data_update = WFS_TRANSACTION_UPDATE.format(id="1", color="red", xml_ns=XML_NS)
response, headers = self._post_fullaccess(data_update)
self._test_colors({1: "red"})
response, headers = self._post_restricted(data, "LAYER_PERM=no")
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find(
'<ServiceException code="Security">No permissions to do WFS changes on layer \\\'db_point\\\'</ServiceException>') != -1,
"WFS/Transactions Delete succeed\n%s" % response)
response, headers = self._post_restricted(data, "LAYER_PERM=yes")
self.assertEqual(
headers.get("Content-Type"), "text/xml; charset=utf-8",
"Content type for GetMap is wrong: %s" % headers.get("Content-Type"))
self.assertTrue(
str(response).find("<SUCCESS/>") != -1,
"WFS/Transactions Delete don't succeed\n%s" % response)
# # Subset String # #
# # WMS # # WMS # # WMS # #
def test_wms_getmap_subsetstring(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Country,Hello_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_fullaccess(query_string)
self._img_diff_error(response, headers, "WMS_GetMap")
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Hello_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_restricted(query_string)
self._img_diff_error(response, headers, "Restricted_WMS_GetMap")
def test_wms_getmap_subsetstring_with_filter(self):
""" test that request filter and access control subsetStrings are correctly combined. Note that for this
test we reuse the projectsubsetstring reference images as we are using filter requests to set the same
filter " pkuid in (7,8) " as the project subsetstring uses for its test.
"""
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Hello_Filter_SubsetString",
"FILTER": "Hello_Filter_SubsetString:\"pkuid\" IN ( 7 , 8 )",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_fullaccess(query_string)
self._img_diff_error(response, headers, "WMS_GetMap_projectsubstring")
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Hello_Filter_SubsetString",
"FILTER": "Hello_Filter_SubsetString:\"pkuid\" IN ( 7 , 8 )",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_restricted(query_string)
self._img_diff_error(response, headers, "Restricted_WMS_GetMap_projectsubstring")
def test_wms_getmap_projectsubsetstring(self):
""" test that project set layer subsetStrings are honored"""
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Hello_Project_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_fullaccess(query_string)
self._img_diff_error(response, headers, "WMS_GetMap_projectsubstring")
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"LAYERS": "Hello_Project_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857"
}.items())])
response, headers = self._get_restricted(query_string)
self._img_diff_error(response, headers, "Restricted_WMS_GetMap_projectsubstring")
def test_wms_getfeatureinfo_subsetstring(self):
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Country,Hello_SubsetString",
"QUERY_LAYERS": "Hello_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "56",
"Y": "144",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeatureInfo Hello/1\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeatureInfo Hello/1\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeatureInfo Hello/1\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeatureInfo Hello/1\n%s" % response)
def test_wms_getfeatureinfo_subsetstring2(self):
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Country,Hello_SubsetString",
"QUERY_LAYERS": "Hello_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "146",
"Y": "160",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result result in GetFeatureInfo Hello/2\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No good result result in GetFeatureInfo Hello/2\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result result in GetFeatureInfo Hello/2\n%s" % response)
def test_wms_getfeatureinfo_projectsubsetstring(self):
"""test that layer subsetStrings set in projects are honored. This test checks for a feature which should be filtered
out by the project set layer subsetString
"""
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Hello_Project_SubsetString",
"QUERY_LAYERS": "Hello_Project_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "56",
"Y": "144",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Project set layer subsetString not honored in WMS GetFeatureInfo/1\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Project set layer subsetString not honored in WMS GetFeatureInfo when access control applied/1\n%s" % response)
def test_wms_getfeatureinfo_projectsubsetstring5(self):
"""test that layer subsetStrings set in projects are honored. This test checks for a feature which should pass
both project set layer subsetString and access control filters
"""
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Hello_Project_SubsetString",
"QUERY_LAYERS": "Hello_Project_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-1623412,3146330,-1603412,3166330",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "146",
"Y": "160",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result result in GetFeatureInfo Hello/2\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"No good result result in GetFeatureInfo Hello/2\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result result in GetFeatureInfo Hello/2\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"No good result result in GetFeatureInfo Hello/2\n%s" % response)
def test_wms_getfeatureinfo_projectsubsetstring3(self):
"""test that layer subsetStrings set in projects are honored. This test checks for a feature which should pass
the project set layer subsetString but fail the access control checks
"""
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Hello_Project_SubsetString",
"QUERY_LAYERS": "Hello_Project_SubsetString",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "3415650,2018968,3415750,2019968",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "146",
"Y": "160",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result result in GetFeatureInfo Hello/2\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>8</qgs:pk>") != -1,
"No good result result in GetFeatureInfo Hello/2\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result from GetFeatureInfo Hello/2\n%s" % response)
def test_wms_getfeatureinfo_subsetstring_with_filter(self):
"""test that request filters are honored. This test checks for a feature which should be filtered
out by the request filter
"""
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Hello_Filter_SubsetString",
"QUERY_LAYERS": "Hello_Filter_SubsetString",
"FILTER": "Hello_Filter_SubsetString:\"pkuid\" IN ( 7 , 8 )",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-16817707,-6318936.5,5696513,16195283.5",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "56",
"Y": "144",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Request filter not honored in WMS GetFeatureInfo/1\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Request filter not honored in WMS GetFeatureInfo when access control applied/1\n%s" % response)
def test_wms_getfeatureinfo_projectsubsetstring4(self):
"""test that request filters are honored. This test checks for a feature which should pass
both request filter and access control filters
"""
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Hello_Filter_SubsetString",
"QUERY_LAYERS": "Hello_Filter_SubsetString",
"FILTER": "Hello_Filter_SubsetString:\"pkuid\" IN ( 7 , 8 )",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "-1623412,3146330,-1603412,3166330",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "146",
"Y": "160",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result result in GetFeatureInfo Hello/2\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"No good result result in GetFeatureInfo Hello/2\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result result in GetFeatureInfo Hello/2\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"No good result result in GetFeatureInfo Hello/2\n%s" % response)
def test_wms_getfeatureinfo_projectsubsetstring2(self):
"""test that request filters are honored. This test checks for a feature which should pass
the request filter but fail the access control checks
"""
query_string = "&".join(["%s=%s" % i for i in list({
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetFeatureInfo",
"LAYERS": "Hello_Filter_SubsetString",
"QUERY_LAYERS": "Hello_Filter_SubsetString",
"FILTER": "Hello_Filter_SubsetString:\"pkuid\" IN ( 7 , 8 )",
"STYLES": "",
"FORMAT": "image/png",
"BBOX": "3415650,2018968,3415750,2019968",
"HEIGHT": "500",
"WIDTH": "500",
"SRS": "EPSG:3857",
"FEATURE_COUNT": "10",
"INFO_FORMAT": "application/vnd.ogc.gml",
"X": "146",
"Y": "160",
"MAP": urllib.parse.quote(self.projectPath)
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result result in GetFeatureInfo Hello/2\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>8</qgs:pk>") != -1,
"No good result result in GetFeatureInfo Hello/2\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result from GetFeatureInfo Hello/2\n%s" % response)
# # WFS # # WFS # # WFS # #
def test_wfs_getfeature_subsetstring(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
def test_wfs_getfeature_subsetstring2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_project_subsetstring(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for retrieving a feature which should be available in with/without access control
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>7</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature, has been incorrectly filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring2(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a feature which should be filtered out by access controls
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>8</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>8</qgs:pk>") != -1,
"Feature with pkuid=8 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Feature with pkuid=8 was found in GetFeature, but should have been filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring3(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a features which should be filtered out by project subsetStrings.
For example, pkuid 6 passes the access control checks, but should not be shown because of project layer subsetString
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>6</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be no results, since pkuid 1 should be filtered out by project subsetString
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") == -1,
"Project based layer subsetString not respected in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Project based layer subsetString not respected in GetFeature with restricted access\n%s" % response)
def _handle_request(self, restricted, query_string, **kwargs):
self._accesscontrol._active = restricted
qs = "?" + query_string if query_string is not None else ''
result = self._result(self._execute_request(qs, **kwargs))
return result
def _result(self, data):
headers = {}
for line in data[0].decode('UTF-8').split("\n"):
if line != "":
header = line.split(":")
self.assertEqual(len(header), 2, line)
headers[str(header[0])] = str(header[1]).strip()
return data[1], headers
def _get_fullaccess(self, query_string):
result = self._handle_request(False, query_string)
return result
def _get_restricted(self, query_string):
result = self._handle_request(True, query_string)
return result
def _post_fullaccess(self, data, query_string=None):
self._server.putenv("QGIS_PROJECT_FILE", self.projectPath)
result = self._handle_request(False, query_string, requestMethod=QgsServerRequest.PostMethod, data=data)
self._server.putenv("QGIS_PROJECT_FILE", '')
return result
def _post_restricted(self, data, query_string=None):
self._server.putenv("QGIS_PROJECT_FILE", self.projectPath)
result = self._handle_request(True, query_string, requestMethod=QgsServerRequest.PostMethod, data=data)
self._server.putenv("QGIS_PROJECT_FILE", '')
return result
def _img_diff(self, image, control_image, max_diff, max_size_diff=QSize()):
temp_image = os.path.join(tempfile.gettempdir(), "%s_result.png" % control_image)
with open(temp_image, "wb") as f:
f.write(image)
control = QgsRenderChecker()
control.setControlPathPrefix("qgis_server_accesscontrol")
control.setControlName(control_image)
control.setRenderedImage(temp_image)
if max_size_diff.isValid():
control.setSizeTolerance(max_size_diff.width(), max_size_diff.height())
return control.compareImages(control_image), control.report()
def _img_diff_error(self, response, headers, image, max_diff=10, max_size_diff=QSize()):
self.assertEqual(
headers.get("Content-Type"), "image/png",
"Content type is wrong: %s" % headers.get("Content-Type"))
test, report = self._img_diff(response, image, max_diff, max_size_diff)
with open(os.path.join(tempfile.gettempdir(), image + "_result.png"), "rb") as rendered_file:
encoded_rendered_file = base64.b64encode(rendered_file.read())
message = "Image is wrong\n%s\nImage:\necho '%s' | base64 -d >%s/%s_result.png" % (
report, encoded_rendered_file.strip().decode('utf8'), tempfile.gettempdir(), image
)
# If the failure is in image sizes the diff file will not exists.
if os.path.exists(os.path.join(tempfile.gettempdir(), image + "_result_diff.png")):
with open(os.path.join(tempfile.gettempdir(), image + "_result_diff.png"), "rb") as diff_file:
encoded_diff_file = base64.b64encode(diff_file.read())
message += "\nDiff:\necho '%s' | base64 -d > %s/%s_result_diff.png" % (
encoded_diff_file.strip().decode('utf8'), tempfile.gettempdir(), image
)
self.assertTrue(test, message)
def _geo_img_diff(self, image_1, image_2):
if os.name == 'nt':
# Not supported on Windows due to #13061
return 0
with open(os.path.join(tempfile.gettempdir(), image_2), "wb") as f:
f.write(image_1)
image_1 = gdal.Open(os.path.join(tempfile.gettempdir(), image_2), GA_ReadOnly)
assert image_1, "No output image written: " + image_2
image_2 = gdal.Open(os.path.join(self.testdata_path, "results", image_2), GA_ReadOnly)
assert image_1, "No expected image found:" + image_2
if image_1.RasterXSize != image_2.RasterXSize or image_1.RasterYSize != image_2.RasterYSize:
image_1 = None
image_2 = None
return 1000 # wrong size
square_sum = 0
for x in range(image_1.RasterXSize):
for y in range(image_1.RasterYSize):
square_sum += (image_1.ReadAsArray()[x][y] - image_2.ReadAsArray()[x][y]) ** 2
# Explicitly close GDAL datasets
image_1 = None
image_2 = None
return sqrt(square_sum)
def _test_colors(self, colors):
for id, color in list(colors.items()):
response, headers = self._post_fullaccess(
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="db_point" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>gid</ogc:PropertyName>
<ogc:Literal>{id}</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(id=id, xml_ns=XML_NS)
)
self.assertTrue(
str(response).find("<qgs:color>{color}</qgs:color>".format(color=color)) != -1,
"Wrong color in result\n%s" % response)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -1,303,083,176,948,137,200 | 41.87484 | 193 | 0.565667 | false |
PMBio/gptwosample | gptwosample/twosample/twosample_base.py | 1 | 20377 | '''
Classes to apply GPTwoSample to data
====================================
All classes handling TwoSampleBase tasks should extend this class.
Created on Mar 18, 2011
@author: Max Zwiessele, Oliver Stegle
'''
from gptwosample.data.data_base import input_id, output_id, individual_id, \
common_id, has_model_structure, get_model_structure, DataStructureError
import scipy
from pygp.gp.gp_base import GP
from pygp.gp.composite import GroupGP
from pygp.optimize.optimize_base import opt_hyper
import numpy
from pygp.plot.gpr_plot import plot_sausage, plot_training_data
from copy import deepcopy
from matplotlib import cm
from pygp.likelihood.likelihood_base import GaussLikISO
class TwoSampleBase(object):
"""
TwoSampleBase object with the given covariance function covar.
"""
def __init__(self, learn_hyperparameters=True,
priors=None,
initial_hyperparameters=None, **kwargs):
"""
Perform TwoSampleBase with the given covariance function covar.
**Parameters**:
covar : :py:class:`pygp.covar.CovarianceFunction`
The covariance function this TwoSampleBase class works with.
learn_hyperparameters : bool
Specifies whether or not to optimize the hyperparameters for the given data
priors : {'covar': priors for covar, ...}
Default: None; The prior beliefs you provide for the hyperparamaters of the covariance function.
"""
self._learn_hyperparameters = learn_hyperparameters
if has_model_structure(priors):
self._priors = priors
else:
self._priors = get_model_structure(priors, priors)
self._models = dict()
if initial_hyperparameters is None and priors is not None:
self._initial_hyperparameters = get_model_structure({}, {});
for name, prior in self._priors.iteritems():
if prior.has_key('covar'):
# logarithmize the right hyperparameters for the covariance
logtheta = scipy.array([p[1][0] * p[1][1] for p in prior['covar']], dtype='float')
# out since version 1.0.0
# logtheta[covar.get_Iexp(logtheta)] = SP.log(logtheta[covar.get_Iexp(logtheta)])
self._initial_hyperparameters[name]['covar'] = logtheta
elif has_model_structure(initial_hyperparameters):
self._initial_hyperparameters = initial_hyperparameters
elif initial_hyperparameters is not None:
self._initial_hyperparameters = get_model_structure(initial_hyperparameters, initial_hyperparameters)
else:
self._initial_hyperparameters = get_model_structure({})
for name, hyper in self._initial_hyperparameters.iteritems():
hyper['lik'] = numpy.log([0.1])
self._invalidate_cache()
def set_data_by_xy_data(self, x1, x2, y1, y2):
#not_missing = (numpy.isfinite(x1) * numpy.isfinite(x2) * numpy.isfinite(y1) * numpy.isfinite(y2)).flatten()
#x1, x2 = x1[not_missing], x2[not_missing]
#y1, y2 = y1[not_missing], y2[not_missing]
X = numpy.array([x1, x2]); Y = numpy.array([y1, y2])
# set individual model's data
self._models[individual_id].setData(X, Y)
# set common model's data
self._models[common_id].setData(scipy.concatenate(X), scipy.concatenate(Y))
def set_data(self, training_data):
"""
Set the data of prediction.
**Parameters:**
training_data : dict traning_data
The training data to learn from. Input are time-values and
output are expression-values of e.g. a timeseries.
Training data training_data has following structure::
{'input' : {'group 1':[double] ... 'group n':[double]},
'output' : {'group 1':[double] ... 'group n':[double]}}
"""
try:
self.set_data_by_xy_data(training_data[input_id]['group_1'],
training_data[input_id]['group_2'],
training_data[output_id]['group_1'],
training_data[output_id]['group_2'])
except KeyError:
# print """Please validate training data given. \n
# training_data must have following structure: \n
# {'input' : {'group 1':[double] ... 'group n':[double]},
# 'output' : {'group 1':[double] ... 'group n':[double]}}"""
raise DataStructureError("Please use gptwosample.data.data_base.get_training_data_structure for data passing!")
def predict_model_likelihoods(self, training_data=None, interval_indices=get_model_structure(), *args, **kwargs):
"""
Predict the probabilities of the models (individual and common) to describe the data.
It will optimize hyperparameters respectively.
**Parameters**:
training_data : dict traning_data
The training data to learn from. Input are time-values and
output are expression-values of e.g. a timeseries.
If not given, training data must be given previously by
:py:class:`gptwosample.twosample.basic.set_data`.
interval_indices: :py:class:`gptwosample.data.data_base.get_model_structure()`
interval indices, which assign data to individual or common model,
respectively.
args : [..]
see :py:class:`pygp.gpr.gp_base.GP`
kwargs : {..}
see :py:class:`pygp.gpr.gp_base.GP`
"""
if(training_data is not None):
self.set_data(training_data)
for name, model in self._models.iteritems():
model.set_active_set_indices(interval_indices[name])
try:
if(self._learn_hyperparameters):
opt_hyperparameters = opt_hyper(model,
self._initial_hyperparameters[name],
priors=self._priors[name],
*args, **kwargs)[0]
self._learned_hyperparameters[name] = opt_hyperparameters
else:
self._learned_hyperparameters[name] = self._initial_hyperparameters[name]
except ValueError as r:
print "caught error:", r.message, "\r",
self._learned_hyperparameters[name] = self._initial_hyperparameters[name]
self._model_likelihoods[name] = model.LML(self._learned_hyperparameters[name],
priors=self._priors)
return self._model_likelihoods
def predict_mean_variance(self, interpolation_interval,
hyperparams=None,
interval_indices=get_model_structure(),
*args, **kwargs):
"""
Predicts the mean and variance of both models.
Returns::
{'individual':{'mean':[pointwise mean], 'var':[pointwise variance]},
'common':{'mean':[pointwise mean], 'var':[pointwise variance]}}
**Parameters:**
interpolation_interval : [double]
The interval of inputs, which shall be predicted
hyperparams : {'covar':logtheta, ...}
Default: learned hyperparameters. Hyperparams for the covariance function's prediction.
interval_indices : {'common':[boolean],'individual':[boolean]}
Indices in which to predict, for each group, respectively.
"""
if interpolation_interval.ndim < 2:
interpolation_interval = interpolation_interval[:, None]
if(hyperparams is None):
hyperparams = self._learned_hyperparameters
self._predicted_mean_variance = get_model_structure()
if(not has_model_structure(interpolation_interval)):
interpolation_interval = get_model_structure(interpolation_interval, interpolation_interval)
for name, model in self._models.iteritems():
model.set_active_set_indices(interval_indices[name])
prediction = model.predict(hyperparams[name], interpolation_interval[name], var=True, *args, **kwargs)
self._predicted_mean_variance[name] = {'mean':prediction[0], 'var':prediction[1]}
self._interpolation_interval_cache = interpolation_interval
return self._predicted_mean_variance
def bayes_factor(self, model_likelihoods=None):
"""
Return the Bayes Factor for the given log marginal likelihoods model_likelihoods
**Parameters:**
model_likelihoods : {'individual': *the individual likelihoods*, 'common': *the common likelihoods*}
The likelihoods calculated by
predict_model_likelihoods(training_data)
for given training data training_data.
"""
if model_likelihoods is numpy.NaN:
return numpy.NaN
if(model_likelihoods is None):
model_likelihoods = self._model_likelihoods
return model_likelihoods[common_id] - model_likelihoods[individual_id]
def get_covars(self):
models = self._models
return {individual_id: models[individual_id].covar, common_id: models[common_id].covar}
def get_model_likelihoods(self):
"""
Returns all calculated likelihoods in model structure. If not calculated returns None in model structure.
"""
return self._model_likelihoods
def get_learned_hyperparameters(self):
"""
Returns learned hyperparameters in model structure, if already learned.
"""
return self._learned_hyperparameters
def get_predicted_mean_variance(self):
"""
Get the predicted mean and variance as::
{'individual':{'mean':[pointwise mean], 'var':[pointwise variance]},
'common':{'mean':[pointwise mean], 'var':[pointwise variance]}}
If not yet predicted it will return 'individual' and 'common' empty.
"""
return self._predicted_mean_variance
def get_data(self, model=common_id, index=None): # , interval_indices=get_model_structure()):
"""
get inputs of model `model` with group index `index`.
If index is None, the whole model group will be returned.
"""
if(index is None):
return self._models[model].getData() # [:, interval_indices[model]].squeeze()
else:
return self._models[model].getData()[index] # [:, interval_indices[model]].squeeze()
def plot(self,
xlabel="input", ylabel="ouput", title=None,
interval_indices=None, alpha=None, legend=True,
replicate_indices=None, shift=None, *args, **kwargs):
"""
Plot the results given by last prediction.
Two Instance Plots of comparing two groups to each other:
**Parameters:**
twosample_object : :py:class:`gptwosample.twosample`
GPTwoSample object, on which already 'predict' was called.
**Differential Groups:**
.. image:: ../images/plotGPTwoSampleDifferential.pdf
:height: 8cm
**Non-Differential Groups:**
.. image:: ../images/plotGPTwoSampleSame.pdf
:height: 8cm
Returns:
Proper rectangles for use in pylab.legend().
"""
if self._predicted_mean_variance is None:
print "Not yet predicted, or not predictable"
return
if interval_indices is None:
interval_indices = get_model_structure(
common=numpy.array(numpy.zeros_like(self.get_data(common_id)[0]), dtype='bool'),
individual=numpy.array(numpy.ones_like(self.get_data(individual_id, 0)[0]), dtype='bool'))
import pylab
if title is None:
title = r'Prediction result: $\log(p(\mathcal{H}_I)/p(\mathcal{H}_S)) = %.2f $' % (self.bayes_factor())
# plparams = {'axes.labelsize': 20,
# 'text.fontsize': 20,
# 'legend.fontsize': 18,
# 'title.fontsize': 22,
# 'xtick.labelsize': 20,
# 'ytick.labelsize': 20,
# 'usetex': True }
legend_plots = []
legend_names = []
calc_replicate_indices = replicate_indices is None
alpha_groups = alpha
if alpha is not None:
alpha_groups = 1 - alpha
for name, value in self._predicted_mean_variance.iteritems():
mean = value['mean']
var = numpy.sqrt(value['var'])
if len(mean.shape) > 1:
number_of_groups = mean.shape[0]
first = True
for i in range(number_of_groups):
col_num = (i / (2. * number_of_groups))
col = cm.jet(col_num) # (i/number_of_groups,i/number_of_groups,.8) @UndefinedVariable
x, y = self.get_data(name, i)
x, y = x.squeeze(), y.squeeze()
replicate_length = len(numpy.unique(x))
number_of_replicates = len(x) / replicate_length
if calc_replicate_indices:
# Assume replicates are appended one after another
replicate_indices = []
curr = x[0] - 1
rep = 0
replicate_length = 0
for xi in x:
if xi < curr:
replicate_indices.append(numpy.repeat(rep, replicate_length))
rep += 1
replicate_length = 0
replicate_length += 1
curr = xi
replicate_indices.append(numpy.repeat(rep, replicate_length))
replicate_indices = numpy.concatenate(replicate_indices)
shifti = deepcopy(shift)
if shifti is not None:
shifti = shift[i * number_of_replicates:(i + 1) * number_of_replicates]
# import pdb;pdb.set_trace()
plot_sausage(self._interpolation_interval_cache[name] - numpy.mean(shifti), mean[i], var[i], format_fill={'alpha':0.2, 'facecolor':col}, format_line={'alpha':1, 'color':col, 'lw':3, 'ls':'--'}, alpha=alpha_groups)[0]
else:
plot_sausage(self._interpolation_interval_cache[name],
mean[i], var[i],
format_fill={'alpha':0.2, 'facecolor':col},
format_line={'alpha':1, 'color':col, 'lw':3, 'ls':'--'}, alpha=alpha_groups)[0]
plot_training_data(
numpy.array(x), numpy.array(y),
format_data={'alpha':.8,
'marker':'.',
'linestyle':'--',
'lw':1,
'markersize':6,
'color':col},
replicate_indices=replicate_indices,
shift=shifti, *args, **kwargs)
if(first):
legend_plots.append(pylab.Rectangle((0, 0), 1, 1, alpha=.2, fill=True, facecolor=col))
legend_names.append("%s %i" % (name, i + 1))
# first=False
else:
col = cm.jet(1.) # @UndefinedVariable
# data = self.get_data(name, interval_indices=interval_indices)
# PLOT.plot_training_data(
# x, y,
# format_data={'alpha':.2,
# 'marker':'.',
# 'linestyle':'',
# 'markersize':10,
# 'color':col})
legend_names.append("%s" % (name))
plot_sausage(
self._interpolation_interval_cache[name], mean, var,
format_fill={'alpha':0.2, 'facecolor':col},
format_line={'alpha':1, 'color':col, 'lw':3, 'ls':'--'}, alpha=alpha)[0]
legend_plots.append(pylab.Rectangle((0, 0), 1, 1, alpha=.2, fc=col, fill=True))
if legend:
pylab.legend(legend_plots, legend_names,
bbox_to_anchor=(0., 0., 1., 0.), loc=3,
ncol=2,
mode="expand",
borderaxespad=0.,
fancybox=False, frameon=False)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
pylab.subplots_adjust(top=.88)
pylab.title(title, fontsize=22)
return legend_plots
######### PRIVATE ##############
# def _init_twosample_model(self, covar):
# """
# The initialization of the twosample model with
# the given covariance function covar
# """
# print("please implement twosample model")
# pass
def _invalidate_cache(self):
# self._learned_hyperparameters = dict([name,None for name in self._models.keys()])
self._model_likelihoods = get_model_structure()
self._learned_hyperparameters = get_model_structure()
self._interpolation_interval_cache = None
self._predicted_mean_variance = None
class TwoSampleShare(TwoSampleBase):
"""
This class provides comparison of two Timeline Groups to each other.
see :py:class:`gptwosample.twosample.twosample_base.TwoSampleBase` for detailed description of provided methods.
"""
def __init__(self, covar, *args, **kwargs):
"""
see :py:class:`gptwosample.twosample.twosample_base.TwoSampleBase`
"""
if not kwargs.has_key('initial_hyperparameters'):
kwargs['initial_hyperparameters'] = \
get_model_structure(individual={'covar':numpy.zeros(covar.get_number_of_parameters())},
common={'covar':numpy.zeros(covar.get_number_of_parameters())})
super(TwoSampleShare, self).__init__(*args, **kwargs)
gpr1 = GP(deepcopy(covar), likelihood=GaussLikISO())
gpr2 = GP(deepcopy(covar), likelihood=GaussLikISO())
# individual = GroupGP([gpr1,gpr2])
# common = GP(covar)
# self.covar = covar
# set models for this TwoSampleBase Test
self._models = {individual_id:GroupGP([gpr1, gpr2]),
common_id:GP(deepcopy(covar), likelihood=GaussLikISO())}
class TwoSampleSeparate(TwoSampleBase):
"""
This class provides comparison of two Timeline Groups to one another, inlcuding timeshifts in replicates, respectively.
see :py:class:`gptwosample.twosample.twosample_base.TwoSampleBase` for detailed description of provided methods.
Note that this model will need one covariance function for each model, respectively!
"""
def __init__(self, covar_individual_1, covar_individual_2, covar_common, **kwargs):
"""
see :py:class:`gptwosample.twosample.twosample_base.TwoSampleBase`
"""
if not kwargs.has_key('initial_hyperparameters'):
kwargs['initial_hyperparameters'] = \
get_model_structure(individual={'covar':numpy.zeros(covar_individual_1.get_number_of_parameters())},
common={'covar':numpy.zeros(covar_common.get_number_of_parameters())})
super(TwoSampleSeparate, self).__init__(**kwargs)
gpr1 = GP(deepcopy(covar_individual_1), likelihood=GaussLikISO())
gpr2 = GP(deepcopy(covar_individual_2), likelihood=GaussLikISO())
# self.covar_individual_1 = covar_individual_1
# self.covar_individual_2 = covar_individual_2
# self.covar_common = covar_common
# set models for this TwoSampleBase Test
self._models = {individual_id:GroupGP([gpr1, gpr2]), common_id:GP(deepcopy(covar_common), likelihood=GaussLikISO())}
| apache-2.0 | -1,284,699,057,646,585,900 | 43.394336 | 240 | 0.559356 | false |
sgammon/libcloud | libcloud/test/compute/test_ktucloud.py | 7 | 4609 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qsl
try:
import simplejson as json
except ImportError:
import json
from libcloud.compute.drivers.ktucloud import KTUCloudNodeDriver
from libcloud.test import MockHttpTestCase
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
class KTUCloudNodeDriverTest(unittest.TestCase, TestCaseMixin):
def setUp(self):
KTUCloudNodeDriver.connectionCls.conn_classes = \
(None, KTUCloudStackMockHttp)
self.driver = KTUCloudNodeDriver('apikey', 'secret',
path='/test/path',
host='api.dummy.com')
self.driver.path = '/test/path'
self.driver.type = -1
KTUCloudStackMockHttp.fixture_tag = 'default'
self.driver.connection.poll_interval = 0.0
def test_create_node_immediate_failure(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
KTUCloudStackMockHttp.fixture_tag = 'deployfail'
try:
self.driver.create_node(name='node-name', image=image, size=size)
except:
return
self.assertTrue(False)
def test_create_node_delayed_failure(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
KTUCloudStackMockHttp.fixture_tag = 'deployfail2'
try:
self.driver.create_node(name='node-name', image=image, size=size)
except:
return
self.assertTrue(False)
def test_list_images_no_images_available(self):
KTUCloudStackMockHttp.fixture_tag = 'notemplates'
images = self.driver.list_images()
self.assertEqual(0, len(images))
def test_list_images_available(self):
images = self.driver.list_images()
self.assertEqual(112, len(images))
def test_list_sizes_available(self):
sizes = self.driver.list_sizes()
self.assertEqual(112, len(sizes))
def test_list_sizes_nodisk(self):
KTUCloudStackMockHttp.fixture_tag = 'nodisk'
sizes = self.driver.list_sizes()
self.assertEqual(2, len(sizes))
check = False
size = sizes[1]
if size.id == KTUCloudNodeDriver.EMPTY_DISKOFFERINGID:
check = True
self.assertTrue(check)
class KTUCloudStackMockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('ktucloud')
fixture_tag = 'default'
def _load_fixture(self, fixture):
body = self.fixtures.load(fixture)
return body, json.loads(body)
def _test_path(self, method, url, body, headers):
url = urlparse.urlparse(url)
query = dict(parse_qsl(url.query))
self.assertTrue('apiKey' in query)
self.assertTrue('command' in query)
self.assertTrue('response' in query)
self.assertTrue('signature' in query)
self.assertTrue(query['response'] == 'json')
del query['apiKey']
del query['response']
del query['signature']
command = query.pop('command')
if hasattr(self, '_cmd_' + command):
return getattr(self, '_cmd_' + command)(**query)
else:
fixture = command + '_' + self.fixture_tag + '.json'
body, obj = self._load_fixture(fixture)
return (httplib.OK, body, obj, httplib.responses[httplib.OK])
def _cmd_queryAsyncJobResult(self, jobid):
fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json'
body, obj = self._load_fixture(fixture)
return (httplib.OK, body, obj, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | -7,609,938,295,606,295,000 | 33.654135 | 77 | 0.653938 | false |
krafczyk/spack | var/spack/repos/builtin/packages/krb5/package.py | 4 | 1806 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Krb5(AutotoolsPackage):
"""Network authentication protocol"""
homepage = "https://kerberos.org"
url = "https://kerberos.org/dist/krb5/1.16/krb5-1.16.1.tar.gz"
version('1.16.1', '848e9b80d6aaaa798e3f3df24b83c407')
depends_on('openssl')
configure_directory = 'src'
build_directory = 'src'
def configure_args(self):
args = ['--disable-debug',
'--disable-dependency-tracking',
'--disable-silent-rules',
'--without-system-verto']
return args
| lgpl-2.1 | -4,382,937,387,186,588,000 | 38.26087 | 78 | 0.647841 | false |
nafitzgerald/allennlp | tests/commands/main_test.py | 1 | 2414 | from unittest import TestCase
import logging
import sys
from allennlp.commands import main
from allennlp.commands.subcommand import Subcommand
class TestMain(TestCase):
def test_fails_on_unknown_command(self):
sys.argv = ["bogus", # command
"unknown_model", # model_name
"bogus file", # input_file
"--output-file", "bogus out file",
"--silent"]
with self.assertRaises(SystemExit) as cm: # pylint: disable=invalid-name
main()
assert cm.exception.code == 2 # argparse code for incorrect usage
def test_warn_on_deprecated_flags(self):
sys.argv = ["[executable]",
"evaluate",
"--archive_file", "tests/fixtures/bidaf/serialization/model.tar.gz",
"--evaluation_data_file", "tests/fixtures/data/squad.json",
"--cuda_device", "-1"]
with self.assertLogs(level=logging.WARNING) as context:
main()
assert set(context.output) == {
'WARNING:allennlp.commands:Argument name --archive_file is deprecated '
'(and will likely go away at some point), please use --archive-file instead',
'WARNING:allennlp.commands:Argument name --evaluation_data_file is deprecated '
'(and will likely go away at some point), please use --evaluation-data-file instead',
'WARNING:allennlp.commands:Argument name --cuda_device is deprecated '
'(and will likely go away at some point), please use --cuda-device instead',
}
def test_subcommand_overrides(self):
def do_nothing(_):
pass
class FakeEvaluate(Subcommand):
add_subparser_called = False
def add_subparser(self, name, parser):
subparser = parser.add_parser(name,
description="fake",
help="fake help")
subparser.set_defaults(func=do_nothing)
self.add_subparser_called = True
return subparser
fake_evaluate = FakeEvaluate()
sys.argv = ["evaluate"]
main(subcommand_overrides={"evaluate": fake_evaluate})
assert fake_evaluate.add_subparser_called
| apache-2.0 | -6,127,869,298,480,993,000 | 35.575758 | 105 | 0.55551 | false |
deklungel/iRulez | src/output_status/domain.py | 1 | 4151 | from enum import IntEnum
import src.irulez.util as util
from abc import ABC
import src.irulez.log as log
import src.irulez.constants as constants
from typing import List, Optional, Dict
logger = log.get_logger('domain')
class ArduinoPinType(IntEnum):
"""Represents the purpose of a pin on an arduino"""
BUTTON = 1
OUTPUT = 2
DIMMER = 3
class Pin(ABC):
"""Represents a pin on an arduino"""
def __init__(self, number: int, pin_type: ArduinoPinType):
self.__number = number
self.__pin_type = pin_type
self.__state = 0
self.__direction = constants.dim_direction_up
@property
def state(self) -> bool:
if self.__state > 0:
return True
return False
@state.setter
def state(self, state: int) -> None:
self.__state = state
@property
def number(self) -> int:
return self.__number
@property
def dim_state(self) -> int:
return self.__state
@property
def direction(self) -> Optional[str]:
return self.__direction
@direction.setter
def direction(self, direction: str) -> None:
self.__direction = direction
class OutputPin(Pin):
"""Represents a single pin on an arduino"""
def __init__(self, number: int, parent: str):
super(OutputPin, self).__init__(number, ArduinoPinType.OUTPUT)
self.parent = parent
class DimmerLightValue:
"""Class for keeping the last_light_value of a dimmer_id"""
def __init__(self, id: int, last_light_value: int):
self.__last_light_value = last_light_value
self.__id = id
@property
def id(self) -> int:
return self.__id
@property
def last_light_value(self) -> int:
return self.__last_light_value
@last_light_value.setter
def last_light_value(self, last_light_value: int) -> None:
self.__last_light_value = last_light_value
class Arduino:
"""Represents an actual arduino"""
def __init__(self, name: str, number_of_outputs_pins: int):
self.name = name
self.number_of_output_pins = number_of_outputs_pins
self.__output_pins = dict()
@property
def output_pins(self) -> Dict[int, OutputPin]:
return self.__output_pins
def set_output_pin(self, output_pin: OutputPin) -> None:
self.output_pins[output_pin.number] = output_pin
def set_output_pins(self, output_pins: List[OutputPin]) -> None:
for pin in output_pins:
self.output_pins[pin.number] = pin
def get_output_pin_status(self) -> str:
"""Gets the status array of the output_pins of this arduino"""
# Initialize empty state array
pin_states = [0] * self.number_of_output_pins
# Loop over all output_pins and set their state in the array
for pin in self.output_pins.values():
pin_states[pin.number] = 1 if pin.state else 0
# convert array to hex string
return util.convert_array_to_hex(pin_states)
def get_output_dim_pin_status(self) -> str:
to_return = ''
for pin in self.output_pins.values():
to_return += str(pin.dim_state) + ' '
return to_return
def get_output_pin(self, pin_number: int) -> OutputPin:
return self.output_pins[pin_number]
def set_output_pin_status(self, payload: str) -> None:
status = util.convert_hex_to_array(payload, self.number_of_output_pins)
for pin in self.output_pins.values():
if int(status[pin.number]) == 1:
pin.state = 100
else:
pin.state = 0
def set_dimmer_pin_status(self, payload: int, pin_number: int) -> None:
pin = self.output_pins[pin_number]
if pin.dim_state - payload > 0:
pin.direction = constants.dim_direction_down
elif pin.dim_state - payload < 0:
pin.direction = constants.dim_direction_up
self.output_pins[pin_number].state = payload
class ArduinoConfig:
"""Represents the configuration of all known arduinos"""
def __init__(self, arduinos: List[Arduino]):
self.arduinos = arduinos
| mit | -7,455,802,873,022,522,000 | 28.027972 | 79 | 0.613105 | false |
pipermerriam/flex | tests/validation/schema/test_enum_validation.py | 1 | 1596 | import six
import pytest
from flex.exceptions import ValidationError
from flex.constants import EMPTY
from flex.error_messages import MESSAGES
from tests.utils import (
generate_validator_from_schema,
assert_error_message_equal,
)
#
# minLength validation tests
#
@pytest.mark.parametrize(
'letters',
('a', 'b', True, 1, 2),
)
def test_enum_with_valid_array(letters):
schema = {
'enum': [2, 1, 'a', 'b', 'c', True, False],
}
validator = generate_validator_from_schema(schema)
validator(letters)
@pytest.mark.parametrize(
'letters',
(None, 1, 0, 2, 'a'),
)
def test_enum_with_invalid_items(letters):
schema = {
'enum': [True, False, 1.0, 2.0, 'A'],
}
validator = generate_validator_from_schema(schema)
with pytest.raises(ValidationError) as err:
validator(letters)
assert_error_message_equal(
err.value.messages[0]['enum'][0],
MESSAGES['enum']['invalid'],
)
def test_enum_noop_when_not_required_and_field_not_present():
schema = {
'enum': [True, False, 1.0, 2.0, 'A'],
}
validator = generate_validator_from_schema(schema)
validator(EMPTY)
@pytest.mark.parametrize(
'enum_value,value',
(
(six.text_type('test'), six.text_type('test')),
(six.text_type('test'), b'test'),
(b'test', six.text_type('test')),
(b'test', b'test'),
)
)
def test_enum_disperate_text_types(enum_value, value):
schema = {
'enum': [enum_value],
}
validator = generate_validator_from_schema(schema)
validator(value)
| mit | -4,946,300,484,205,600,000 | 20.567568 | 61 | 0.614035 | false |
kuipertan/vitess | py/checkers/checker.py | 22 | 24151 | #!/usr/bin/env python
import collections
import datetime
import difflib
import heapq
import itertools
import json
import logging
import optparse
import os
import cPickle as pickle
import pprint
import Queue
import re
import sys
import tempfile
import threading
import time
import urlparse
import MySQLdb
import MySQLdb.cursors
ws = re.compile(r'\s+')
def clean(s):
return ws.sub(' ', s).strip()
def merge_sorted(seqs, key=None):
if key is None:
return heapq.merge(*seqs)
else:
return (i[1] for i in heapq.merge(*(((key(item), item) for item in seq) for seq in seqs)))
def parse_database_url(url, password_map):
if not url.startswith('mysql'):
url = 'mysql://' + url
url = 'http' + url[len('mysql'):]
parsed = urlparse.urlparse(url)
params = {'user': parsed.username,
'host': parsed.hostname,
'db': parsed.path[1:]}
if parsed.username:
params['user'] = parsed.username
if parsed.password:
params['passwd'] = parsed.password
elif parsed.username and parsed.username in password_map:
params['passwd'] = password_map[parsed.username][0]
if parsed.port:
params['port'] = parsed.port
params.update(dict(urlparse.parse_qsl(parsed.query)))
return params
class AtomicWriter(object):
"""AtomicWriter is a file-like object that allows you to do an
atomic write (on close), using os.rename.
"""
def __init__(self, filename, directory):
self.filename = filename
self.tempfile = tempfile.NamedTemporaryFile(delete=False, dir=directory)
def write(self, s):
return self.tempfile.write(s)
def close(self):
self.tempfile.close()
os.rename(self.tempfile.name, self.filename)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
return False
def sql_tuple_comparison(tablename, columns, column_name_prefix=''):
"""Tuple comparison has the semantics I need, but it confuses
MySQL's optimizer. This code returns SQL equivalent to tuple
comparison.
"""
head = columns[0]
if len(columns) == 1:
return '%(tablename)s.%(column)s > %%(%(column_name_prefix)s%(column)s)s' % {
'column': head,
'tablename': tablename,
'column_name_prefix': column_name_prefix}
return """%(tablename)s.%(column)s > %%(%(column_name_prefix)s%(column)s)s or
(%(tablename)s.%(column)s = %%(%(column_name_prefix)s%(column)s)s and (%(rec)s))""" % {
'column': head,
'rec': sql_tuple_comparison(tablename, columns[1:], column_name_prefix),
'tablename': tablename,
'column_name_prefix': column_name_prefix}
def sorted_row_list_difference(expected, actual, key_length):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a three-element tuple of lists. The first list contains
those elements in the "expected" list but not in the "actual"
list, the second contains those elements in the "actual" list but
not in the "expected" list, the third contains rows that are
different despite having the same primary key.
"""
# adapted from unittest.
missing = []
unexpected = []
different = []
expected, actual = iter(expected), iter(actual)
enext = expected.next
anext = actual.next
try:
e, a = enext(), anext()
while True:
if a == e:
e, a = enext(), anext()
continue
ekey, akey = e[:key_length], a[:key_length]
if ekey < akey:
missing.append(e)
e = enext()
elif ekey > akey:
unexpected.append(a)
a = anext()
else:
different.append((a, e))
e, a = enext(), anext()
except StopIteration:
missing.extend(expected)
unexpected.extend(actual)
return missing, unexpected, different
class Datastore(object):
"""Datastore is database which expects that all queries sent to it
will use the same primary key.
"""
def __init__(self, connection_params, stats=None):
self.stats = stats
self.connection_params = dict((str(key), value) for key, value in connection_params.items())
self.dbname = connection_params['db']
self._cursor = None
@property
def cursor(self):
# NOTE(szopa): This is a property so that connection object is
# created in the first thread that tries to access it.
if self._cursor is None:
self._cursor = MySQLdb.connect(**self.connection_params).cursor()
return self._cursor
def query(self, sql, params, retries=3):
start = time.time()
self.cursor.execute(sql, params)
if self.stats:
self.stats.update(self.dbname, start)
return self.cursor.fetchall()
class DatastoreThread(threading.Thread):
def __init__(self, datastore, retries=3):
super(DatastoreThread, self).__init__()
self.datastore = datastore
self.in_queue = Queue.Queue(maxsize=1)
self.out_queue = Queue.Queue(maxsize=1)
self.retries = retries
self.daemon = True
self.start()
def run(self):
while True:
sql, params = self.in_queue.get()
i = 0
while True:
try:
self.out_queue.put(self.datastore.query(sql, params))
break
except MySQLdb.OperationalError as e:
i += 1
logging.exception("Error reading from %s", self.datastore.dbname)
if i < self.retries:
self.out_queue.put(e)
break
time.sleep(1)
except Exception as e:
logging.exception("Unexpected exception while reading from %s", self.datastore.dbname)
self.out_queue.put(e)
break
def query(self, sql, params):
self.in_queue.put((sql, params))
def get(self):
d = self.out_queue.get()
if isinstance(d, Exception):
raise d
return d
class MultiDatastore(object):
"""MultiDatastore gathers results from a list of Datastores. Each
datastore is queried in a separate thread.
"""
def __init__(self, connection_params_list, nickname, stats=None):
self.nickname = nickname
self.stats = stats
self.threads = [DatastoreThread(Datastore(params)) for params in connection_params_list]
def query(self, sql, params):
"""Query all the child datastores in parallel.
"""
start = time.time()
for thread in self.threads:
thread.query(sql, params)
data = [thread.get() for thread in self.threads]
return data
class Mismatch(Exception):
def __init__(self, missing, unexpected, different):
self.missing, self.unexpected, self.different = missing, unexpected, different
def dict_diff(self, left, right):
return '\n'.join(difflib.ndiff(
pprint.pformat(left).splitlines(),
pprint.pformat(right).splitlines()))
def __str__(self):
data = []
if self.missing:
data.append("Missing in destination:\n%s" % pprint.pformat(self.missing))
if self.unexpected:
data.append("Unexpected in destination:\n%s" % pprint.pformat(self.unexpected))
if self.different:
data.append("Different:\n%s" % '\n'.join(self.dict_diff(*d) for d in self.different))
return '\n'.join(data) + '\n'
class Stats(object):
def __init__(self, interval=0, name=""):
self.lock = threading.Lock()
self.interval = interval
self.name = name
self.clear()
def clear(self):
self.times = collections.defaultdict(float)
self.items = 0
self.clear_local()
def clear_local(self):
self.local_times = collections.defaultdict(float)
self.local_items = 0
self.last_flush = time.time()
def update(self, key, from_time, items=0):
with self.lock:
logging.debug("update: key: %s, from_time: %s, items: %s", key, from_time, items)
# Items are incremented only by 'total'
t = time.time() - from_time
self.local_times[key] += t
self.times[key] += t
self.items += items
self.local_items += items
def maybe_print_local(self, force=False):
if self.interval == 0:
return
if force or (time.time() - self.last_flush) >= self.interval:
try:
total = self.local_times.pop('total')
except KeyError:
pass
else:
data = [self.name, "total speed: %0.2f items/s" % (self.local_items / total)]
data.extend("\t%s: %0.2f%%" % (k, (v * 100) / total) for k, v in self.local_times.items())
logging.info('\t'.join(data))
self.clear_local()
def print_total(self):
try:
total = self.times.pop('total')
except KeyError:
logging.info('No stats: no work was necessary.')
else:
data = [self.name, "(FINAL) total speed: %0.2f items/s" % (self.items / total)]
data.extend("\t%s: %0.2f%%" % (k, (v * 100) / total) for k, v in self.times.items())
logging.info('\t'.join(data))
class Checker(object):
def __init__(self, destination_url, sources_urls, table, directory='.',
source_column_map=None, source_table_name=None, source_force_index_pk=True,
destination_force_index_pk=True,
keyrange={}, batch_count=0, blocks=1, ratio=1.0, block_size=16384,
logging_level=logging.INFO, stats_interval=1, temp_directory=None, password_map_file=None):
self.table_name = table
if source_table_name is None:
self.source_table_name = self.table_name
else:
self.source_table_name = source_table_name
if password_map_file:
with open(password_map_file, "r") as f:
password_map = json.load(f)
else:
password_map = {}
self.table_data = self.get_table_data(table, parse_database_url(destination_url, password_map))
self.primary_key = self.table_data['pk']
if source_column_map:
self.source_column_map = source_column_map
else:
self.source_column_map = {}
columns = self.table_data['columns']
for k in self.primary_key:
columns.remove(k)
self.columns = self.primary_key + columns
self.source_columns = [self.source_column_map.get(c, c) for c in self.columns]
self.source_primary_key = [self.source_column_map.get(c, c) for c in self.primary_key]
self.pk_length = len(self.primary_key)
(self.batch_count, self.block_size,
self.ratio, self.blocks) = batch_count, block_size, ratio, blocks
self.calculate_batch_size()
self.current_pk = dict((k, 0) for k in self.primary_key)
self.iterations = 0
self.temp_directory = temp_directory
self.checkpoint_file = os.path.join(directory, table + '.pickle')
self.mismatches_file = os.path.join(directory, table + '_mismatches.txt')
self.done = False
try:
self.restore_checkpoint()
except IOError:
pass
if source_force_index_pk:
source_use_index = 'use index (primary)'
else:
source_use_index = ''
if destination_force_index_pk:
destination_use_index = 'use index (primary)'
else:
destination_use_index = ''
keyspace_sql_parts = []
if keyrange.get('start') or keyrange.get('end'):
if keyrange.get('start'):
keyspace_sql_parts.append("keyspace_id >= %s and" % keyrange.get('start'))
if keyrange.get('end'):
keyspace_sql_parts.append("keyspace_id < %s and" % keyrange.get('end'))
self.destination_sql = """
select
%(columns)s
from %(table_name)s %(use_index)s
where
%(range_sql)s
order by %(pk_columns)s limit %%(limit)s""" % {
'table_name': self.table_name,
'use_index': destination_use_index,
'columns': ', '.join(self.columns),
'pk_columns': ', '.join(self.primary_key),
'range_sql': sql_tuple_comparison(self.table_name, self.primary_key)}
# Almost like destination SQL except for the keyspace_sql clause.
self.last_source_sql = """
select
%(columns)s
from %(table_name)s %(use_index)s
where %(keyspace_sql)s
(%(range_sql)s)
order by %(pk_columns)s limit %%(limit)s""" % {
'table_name': self.source_table_name,
'use_index': source_use_index,
'keyspace_sql': ' '.join(keyspace_sql_parts),
'columns': ', '.join(self.source_columns),
'pk_columns': ', '.join(self.source_primary_key),
'range_sql': sql_tuple_comparison(self.source_table_name, self.source_primary_key)}
self.source_sql = """
select
%(columns)s
from %(table_name)s %(use_index)s
where %(keyspace_sql)s
((%(min_range_sql)s) and not (%(max_range_sql)s))
order by %(pk_columns)s""" % {
'table_name': self.source_table_name,
'use_index': source_use_index,
'keyspace_sql': ' '.join(keyspace_sql_parts),
'columns': ', '.join(self.source_columns),
'pk_columns': ', '.join(self.source_primary_key),
'min_range_sql': sql_tuple_comparison(self.source_table_name, self.source_primary_key),
'max_range_sql': sql_tuple_comparison(self.source_table_name, self.source_primary_key, column_name_prefix='max_')}
self.stats = Stats(interval=stats_interval, name=self.table_name)
self.destination = Datastore(parse_database_url(destination_url, password_map), stats=self.stats)
self.sources = MultiDatastore([parse_database_url(s, password_map) for s in sources_urls], 'all-sources', stats=self.stats)
logging.basicConfig(level=logging_level)
logging.debug("destination sql template: %s", clean(self.destination_sql))
logging.debug("source sql template: %s", clean(self.source_sql))
def get_table_data(self, table_name, params):
table = {'columns': [], 'pk': []}
conn = MySQLdb.connect(**params)
cursor = conn.cursor()
cursor.execute("SELECT avg_row_length FROM information_schema.tables WHERE table_schema = %s AND table_name = %s", (params['db'], table_name))
table['avg_row_length'] = cursor.fetchone()[0]
cursor.execute("SELECT column_name FROM information_schema.columns WHERE table_schema = %s AND table_name = %s ORDER BY table_name, ordinal_position", (params['db'], table_name))
for row in cursor.fetchall():
table['columns'].append(row[0])
cursor.execute("select column_name FROM information_schema.key_column_usage WHERE table_schema=%s AND constraint_name='PRIMARY' AND table_name = %s ORDER BY table_name, ordinal_position", (params['db'], table_name))
for row in cursor.fetchall():
table['pk'].append(row[0])
return table
def calculate_batch_size(self):
if self.batch_count != 0:
self.batch_size = self.batch_count
else:
try:
rows_per_block = float(self.block_size) / self.table_data.get('avg_row_length', 0)
except ZeroDivisionError:
rows_per_block = 20
self.batch_size = int(rows_per_block * self.ratio * self.blocks)
def get_pk(self, row):
return dict((k, v) for k, v in zip(self.primary_key, row))
def _run(self):
# initialize destination_in_queue, sources_in_queue, merger_in_queue, comparer_in_queue, comparare_out_queue
self.destination_in_queue = Queue.Queue(maxsize=3)
self.sources_in_queue = Queue.Queue(maxsize=3)
self.merger_comparer_in_queue = Queue.Queue(maxsize=3)
self.merger_comparer_out_queue = Queue.Queue(maxsize=3)
# start destination, sources, merger, comparer
threads = []
for worker_name in ['destination_worker', 'sources_worker', 'merger_comparer_worker']:
worker = getattr(self, worker_name)
t = threading.Thread(target=worker, name=worker_name)
t.daemon = True
threads.append(t)
t.start()
self.destination_in_queue.put((self.current_pk, None))
start = time.time()
while True:
# get error from the comparer out-queue, raise if it isn't None.
error_or_done, processed_rows = self.merger_comparer_out_queue.get()
self.stats.update('total', start, processed_rows)
start = time.time()
self.stats.maybe_print_local()
if isinstance(error_or_done, Mismatch):
self.handle_mismatch(error_or_done)
continue
if error_or_done:
self.destination_in_queue.put((None, True))
if error_or_done is True:
self.stats.print_total()
return
elif error_or_done is not None:
raise error_or_done
def handle_mismatch(self, mismatch):
with open(self.mismatches_file, 'a') as fi:
fi.write(str(mismatch))
def destination_worker(self):
while True:
start_pk, done = self.destination_in_queue.get()
start = time.time()
if done:
self.sources_in_queue.put((None, None, None, True))
return
params = {'limit': self.batch_size}
params.update(start_pk)
# query the destination -> data
try:
destination_data = self.destination.query(self.destination_sql, params)
except MySQLdb.ProgrammingError as e:
self.sources_in_queue.put((None, None, None, e))
return
try:
end_pk = self.get_pk(destination_data[-1])
except IndexError:
# There's no more data in the destination. The next object we
# get from the in-queue is going to be put there by _run or is
# going to be an error.
self.sources_in_queue.put((start_pk, None, [], None))
else:
# put the (range-pk, data) on the sources in-queue
self.sources_in_queue.put((start_pk, end_pk, destination_data, None))
self.destination_in_queue.put((end_pk, None))
self.stats.update('destination', start)
def sources_worker(self):
while True:
# get (range-pk, data) from the sources in-queue
(start_pk, end_pk, destination_data, error_or_done) = self.sources_in_queue.get()
start = time.time()
if error_or_done:
self.merger_comparer_in_queue.put((None, None, error_or_done))
return
# query the sources -> sources_data
params = dict((self.source_column_map.get(k, k), v) for k, v in start_pk.items())
if destination_data:
for k, v in end_pk.items():
params['max_' + self.source_column_map.get(k, k)] = v
sources_data = self.sources.query(self.source_sql, params)
else:
params['limit'] = self.batch_size
sources_data = self.sources.query(self.last_source_sql, params)
# put (sources_data, data, done) on the merger in-queue
done = not (sources_data or destination_data)
self.stats.update('sources', start)
self.merger_comparer_in_queue.put((destination_data, sources_data, done))
def merger_comparer_worker(self):
while True:
destination_data, sources_data, error_or_done = self.merger_comparer_in_queue.get()
start = time.time()
if error_or_done:
self.merger_comparer_out_queue.put((error_or_done, 0))
return
# No more data in both the sources and the destination, we are
# done.
if destination_data == [] and not any(len(s) for s in sources_data):
self.merger_comparer_out_queue.put((True, 0))
return
merged_data = heapq.merge(*sources_data)
try:
last_pk = self.get_pk(destination_data[-1])
except IndexError:
# Only sources data: short-circuit
merged_data = list(merged_data)
last_pk = self.get_pk(merged_data[-1])
missing, unexpected, different = merged_data, [], []
else:
# compare the data
missing, unexpected, different = sorted_row_list_difference(merged_data, destination_data, self.pk_length)
self.stats.update('comparer', start)
# put the mismatch or None on comparer out-queue.
if any([missing, unexpected, different]):
self.merger_comparer_out_queue.put((Mismatch(missing, unexpected, different), len(destination_data)))
else:
self.merger_comparer_out_queue.put((None, len(destination_data)))
# checkpoint
self.checkpoint(last_pk, done=False)
def restore_checkpoint(self):
with open(self.checkpoint_file) as fi:
checkpoint = pickle.load(fi)
self.current_pk, self.done = checkpoint['current_pk'], checkpoint['done']
def checkpoint(self, pk, done=False):
start = time.time()
data = {'current_pk': pk,
'done': done,
'timestamp': str(datetime.datetime.now())}
with AtomicWriter(self.checkpoint_file, self.temp_directory) as fi:
pickle.dump(data, fi)
self.stats.update('checkpoint', start)
def run(self):
try:
self._run()
except Mismatch as e:
print e
def get_range(start, end):
ret = {}
if start != "":
ret['start'] = int(start, 16)
if end != "":
ret['end'] = int(end, 16)
return ret
def main():
parser = optparse.OptionParser()
parser.add_option('--batch-count',
type='int', default=0, dest='batch_count',
help='process this many rows in one batch')
parser.add_option('--stats', type='int',
default=0, dest='stats',
help='Print stats every n seconds.')
parser.add_option('-c', '--checkpoint-directory',
dest='checkpoint_directory', type='string',
help='Directory to store checkpoints.',
default='.')
parser.add_option('-r', '--ratio', dest='ratio',
type='float', default=1.0,
help='Assumed block fill ratio.')
parser.add_option('--source-column-map',
dest='source_column_map', type='string',
help='column_in_destination:column_in_source,column_in_destination2:column_in_source2,...',
default='')
parser.add_option('--source-table-name',
dest='source_table_name', type='string',
help='name of the table in sources (if different than in destination)',
default=None)
parser.add_option('--no-source-force-index', dest='source_force_index', action='store_false',
default=True,
help='Do not add a "use index (primary)" to the SQL statements issued to the sources.')
parser.add_option('--no-destination-force-index', dest='destination_force_index', action='store_false',
default=True,
help='Do not add a "use index (primary)" to the SQL statements issued to the destination.')
parser.add_option('-b', '--blocks', dest='blocks',
type='float', default=3,
help='Try to send this many blocks in one commit.')
parser.add_option('--block-size',
type='int', default=2097152, dest='block_size',
help='Assumed size of a block')
parser.add_option('--start', type='string', dest='start', default='',
help="keyrange start (hexadecimal)")
parser.add_option('--end', type='string', dest='end', default='',
help="keyrange end (hexadecimal)")
parser.add_option('--password-map-file', type='string', default=None,
help="password map file")
(options, args) = parser.parse_args()
table, destination, sources = args[0], args[1], args[2:]
source_column_map = {}
if options.source_column_map:
for pair in options.source_column_map.split(','):
k, v = pair.split(':')
source_column_map[k] = v
checker = Checker(destination, sources, table, options.checkpoint_directory,
source_column_map=source_column_map,
source_force_index_pk=options.source_force_index,
destination_force_index_pk=options.destination_force_index,
source_table_name=options.source_table_name,
keyrange=get_range(options.start, options.end),
stats_interval=options.stats, batch_count=options.batch_count,
block_size=options.block_size, ratio=options.ratio,
temp_directory=options.checkpoint_directory,
password_map_file=options.password_map_file)
checker.run()
if __name__ == '__main__':
main()
| bsd-3-clause | 7,338,149,038,301,776,000 | 34.726331 | 219 | 0.620388 | false |
maggrey/cherrymusic | cherrymusicserver/api/v1/__init__.py | 7 | 4107 | #!/usr/bin/env python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" CherryMusic REST API version 1
(! under construction !)
"""
# __________
# || .------.|
# ||/ [|
# ||| /||
# |||\ | [|
# _ ________ _ |||.'___| |'---...__
# /o)===|________(o\ ||========| ``-..
# / / _.----'\ \ |'=.====.=' ________ \
# / | .-' ----. / | | |____| .'.-------.\ |
# \ \ .'_.----._ \ | _\_|____|.'.'_.----._ \\__|
# /\ \ .'.' __ `.\ |-_| |____| /.' __ '.\ |
# // \ \' / / \ \\|-_|_|____|// / \ \`--'
# // \ / .| | | | |____| | | | |
# // \ .'.' | \ __ / | | \ __ / |
# // /'.' '. .' '. .'
# //_____.'-' `-.__.-' `-.__.-' LGB
# http://www.ascii-art.de/ascii/pqr/roadworks.txt (brought to you by the 90s)
#python 2.6+ backward compability
from __future__ import unicode_literals
import sys
import cherrypy
from cherrymusicserver.api.v1 import jsontools
from cherrymusicserver.api.v1 import users
from cherrymusicserver.api.v1.resources import Resource
debug = True
def get_resource():
""" Assembles and return the API root resource """
root = ResourceRoot()
root.users = users.get_resource()
return root
def get_config():
""" Return the CherryPy config dict for the API mount point """
return {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'error_page.default': jsontools.json_error_handler,
'tools.json_in.on': True,
'tools.json_out.on': True,
'tools.json_out.handler': jsontools.json_handler,
'tools.sessions.on': False,
},
}
def mount(mountpath):
""" Mount and configure API root resource to cherrypy.tree """
cherrypy.tree.mount(get_resource(), mountpath, config=get_config())
if sys.version_info < (3,): # pragma: no cover
# Disable a check that crashes the server in python2.
# Our config keys are unicode, and this check exposes them to an
# incompatible .translate() call in _cpdispatch.find_handler.
# (This setting must happen globally through config.update().)
cherrypy.config.update({
'checker.check_static_paths': False,
})
class ResourceRoot(Resource):
""" Defines the behavior of the API root resource;
subresources can define their own behavior and should be attached
dynamically.
"""
def GET(self):
""" Returns a list of available subresources """
resources = []
for name, member in self.__dict__.items():
if getattr(member, 'exposed', False):
resources.append(name)
return sorted(resources)
| gpl-3.0 | 5,374,324,591,684,315,000 | 34.713043 | 80 | 0.513514 | false |
fusionbox/django_polymorphic | setup.py | 1 | 1727 | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name = 'django_polymorphic',
version = find_version('polymorphic', '__version__.py'),
license = 'BSD',
description = 'Seamless Polymorphic Inheritance for Django Models',
long_description = read('README.rst'),
url = 'https://github.com/chrisglass/django_polymorphic',
author = 'Bert Constantin',
author_email = '[email protected]',
maintainer = 'Christopher Glass',
maintainer_email = '[email protected]',
packages = find_packages(),
package_data = {
'polymorphic': [
'templates/admin/polymorphic/*.html',
],
},
install_requires=['setuptools'],
test_suite='runtests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| bsd-3-clause | 4,490,042,891,271,522,300 | 27.783333 | 88 | 0.62073 | false |
Uname-a/pythonstuff | test.py | 2 | 1713 | from tkinter import *
import pygame
import time
fields = ('hours', 'mins', 'sec')
def timer(ti):
ch = ""
if (ch == ""):#default sound
wtime(ti,"/home/laptop/git/pythonstuff/sounds/bird.ogg")
else:#sets the sound to what the user whants
os.chdir("/home/laptop/git/pythonstuff/sounds/")
wtime(ti,ch)
def wtime(ti,choice):#counts down the time and plays the song at the end
pygame.mixer.init(0)
if (ti <= 0):
pygame.mixer.music.load(choice)
pygame.mixer.music.play(-1)
return 0
else:
time.sleep(1)
ti = ti -1
print ("time left in seconds",ti)
wtime(ti,choice)
def cal(entries):
# period rate:
hi = (int(entries['hours'].get()) )
mi = (int(entries['mins'].get()) )
si = (int(entries['sec'].get()) )
ti = hi * 3600 + mi * 60 + si
timer(ti)
def makeform(root, fields):
entries = {}
for field in fields:
row = Frame(root)
lab = Label(row, width=22, text=field+": ", anchor='w')
ent = Entry(row)
ent.insert(0,"0")
row.pack(side=TOP, fill=X, padx=5, pady=5)
lab.pack(side=LEFT)
ent.pack(side=RIGHT, expand=YES, fill=X)
entries[field] = ent
return entries
def restart():
print("")
def main():
if __name__ == '__main__':
root = Tk()
ents = makeform(root, fields)
root.bind('<Return>', (lambda event, e=ents: fetch(e)))
#b1 = Button(root, text='restart',
# command=(lambda e=ents: restart(e)))
#b1.pack(side=LEFT, padx=5, pady=5)
b2 = Button(root, text='start',
command=(lambda e=ents: cal(e)))
b2.pack(side=LEFT, padx=5, pady=5)
b3 = Button(root, text='Quit', command=root.quit)
b3.pack(side=LEFT, padx=5, pady=5)
root.mainloop()
main() | mit | -6,086,804,060,909,318,000 | 24.58209 | 72 | 0.597198 | false |
kalaidin/luigi | test/contrib/hive_test.py | 14 | 14233 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import os
import sys
import tempfile
from helpers import unittest
import luigi.contrib.hive
import mock
from luigi import LocalTarget
class HiveTest(unittest.TestCase):
count = 0
def mock_hive_cmd(self, args, check_return=True):
self.last_hive_cmd = args
self.count += 1
return "statement{0}".format(self.count)
def setUp(self):
self.run_hive_cmd_saved = luigi.contrib.hive.run_hive
luigi.contrib.hive.run_hive = self.mock_hive_cmd
def tearDown(self):
luigi.contrib.hive.run_hive = self.run_hive_cmd_saved
def test_run_hive_command(self):
pre_count = self.count
res = luigi.contrib.hive.run_hive_cmd("foo")
self.assertEqual(["-e", "foo"], self.last_hive_cmd)
self.assertEqual("statement{0}".format(pre_count + 1), res)
def test_run_hive_script_not_exists(self):
def test():
luigi.contrib.hive.run_hive_script("/tmp/some-non-existant-file______")
self.assertRaises(RuntimeError, test)
def test_run_hive_script_exists(self):
with tempfile.NamedTemporaryFile(delete=True) as f:
pre_count = self.count
res = luigi.contrib.hive.run_hive_script(f.name)
self.assertEqual(["-f", f.name], self.last_hive_cmd)
self.assertEqual("statement{0}".format(pre_count + 1), res)
def test_create_parent_dirs(self):
dirname = "/tmp/hive_task_test_dir"
class FooHiveTask(object):
def output(self):
return LocalTarget(os.path.join(dirname, "foo"))
runner = luigi.contrib.hive.HiveQueryRunner()
runner.prepare_outputs(FooHiveTask())
self.assertTrue(os.path.exists(dirname))
class HiveCommandClientTest(unittest.TestCase):
"""Note that some of these tests are really for the CDH releases of Hive, to which I do not currently have access.
Hopefully there are no significant differences in the expected output"""
def setUp(self):
self.client = luigi.contrib.hive.HiveCommandClient()
self.apacheclient = luigi.contrib.hive.ApacheHiveCommandClient()
self.metastoreclient = luigi.contrib.hive.MetastoreClient()
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_default_table_location(self, run_command):
run_command.return_value = "Protect Mode: None \n" \
"Retention: 0 \n" \
"Location: hdfs://localhost:9000/user/hive/warehouse/mytable \n" \
"Table Type: MANAGED_TABLE \n"
returned = self.client.table_location("mytable")
self.assertEqual('hdfs://localhost:9000/user/hive/warehouse/mytable', returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_table_exists(self, run_command):
run_command.return_value = "OK"
returned = self.client.table_exists("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"mytable"
returned = self.client.table_exists("mytable")
self.assertTrue(returned)
# Issue #896 test case insensitivity
returned = self.client.table_exists("MyTable")
self.assertTrue(returned)
run_command.return_value = "day=2013-06-28/hour=3\n" \
"day=2013-06-28/hour=4\n" \
"day=2013-07-07/hour=2\n"
self.client.partition_spec = mock.Mock(name="partition_spec")
self.client.partition_spec.return_value = "somepart"
returned = self.client.table_exists("mytable", partition={'a': 'b'})
self.assertTrue(returned)
run_command.return_value = ""
returned = self.client.table_exists("mytable", partition={'a': 'b'})
self.assertFalse(returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_table_schema(self, run_command):
run_command.return_value = "FAILED: SemanticException [Error 10001]: blah does not exist\nSome other stuff"
returned = self.client.table_schema("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"col1 string None \n" \
"col2 string None \n" \
"col3 string None \n" \
"day string None \n" \
"hour smallint None \n\n" \
"# Partition Information \n" \
"# col_name data_type comment \n\n" \
"day string None \n" \
"hour smallint None \n" \
"Time taken: 2.08 seconds, Fetched: 34 row(s)\n"
expected = [('OK',),
('col1', 'string', 'None'),
('col2', 'string', 'None'),
('col3', 'string', 'None'),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('',),
('# Partition Information',),
('# col_name', 'data_type', 'comment'),
('',),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('Time taken: 2.08 seconds, Fetched: 34 row(s)',)]
returned = self.client.table_schema("mytable")
self.assertEqual(expected, returned)
def test_partition_spec(self):
returned = self.client.partition_spec({'a': 'b', 'c': 'd'})
self.assertEqual("a='b',c='d'", returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_apacheclient_table_exists(self, run_command):
run_command.return_value = "OK"
returned = self.apacheclient.table_exists("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"mytable"
returned = self.apacheclient.table_exists("mytable")
self.assertTrue(returned)
# Issue #896 test case insensitivity
returned = self.apacheclient.table_exists("MyTable")
self.assertTrue(returned)
run_command.return_value = "day=2013-06-28/hour=3\n" \
"day=2013-06-28/hour=4\n" \
"day=2013-07-07/hour=2\n"
self.apacheclient.partition_spec = mock.Mock(name="partition_spec")
self.apacheclient.partition_spec.return_value = "somepart"
returned = self.apacheclient.table_exists("mytable", partition={'a': 'b'})
self.assertTrue(returned)
run_command.return_value = ""
returned = self.apacheclient.table_exists("mytable", partition={'a': 'b'})
self.assertFalse(returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_apacheclient_table_schema(self, run_command):
run_command.return_value = "FAILED: SemanticException [Error 10001]: Table not found mytable\nSome other stuff"
returned = self.apacheclient.table_schema("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"col1 string None \n" \
"col2 string None \n" \
"col3 string None \n" \
"day string None \n" \
"hour smallint None \n\n" \
"# Partition Information \n" \
"# col_name data_type comment \n\n" \
"day string None \n" \
"hour smallint None \n" \
"Time taken: 2.08 seconds, Fetched: 34 row(s)\n"
expected = [('OK',),
('col1', 'string', 'None'),
('col2', 'string', 'None'),
('col3', 'string', 'None'),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('',),
('# Partition Information',),
('# col_name', 'data_type', 'comment'),
('',),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('Time taken: 2.08 seconds, Fetched: 34 row(s)',)]
returned = self.apacheclient.table_schema("mytable")
self.assertEqual(expected, returned)
@mock.patch("luigi.contrib.hive.HiveThriftContext")
def test_metastoreclient_partition_existence_regardless_of_order(self, thrift_context):
thrift_context.return_value = thrift_context
client_mock = mock.Mock(name="clientmock")
client_mock.return_value = client_mock
thrift_context.__enter__ = client_mock
client_mock.get_partition_names = mock.Mock(return_value=["p1=x/p2=y", "p1=a/p2=b"])
partition_spec = OrderedDict([("p1", "a"), ("p2", "b")])
self.assertTrue(self.metastoreclient.table_exists("table", "default", partition_spec))
partition_spec = OrderedDict([("p2", "b"), ("p1", "a")])
self.assertTrue(self.metastoreclient.table_exists("table", "default", partition_spec))
def test_metastore_partition_spec_has_the_same_order(self):
partition_spec = OrderedDict([("p1", "a"), ("p2", "b")])
spec_string = luigi.contrib.hive.MetastoreClient().partition_spec(partition_spec)
self.assertEqual(spec_string, "p1=a/p2=b")
partition_spec = OrderedDict([("p2", "b"), ("p1", "a")])
spec_string = luigi.contrib.hive.MetastoreClient().partition_spec(partition_spec)
self.assertEqual(spec_string, "p1=a/p2=b")
@mock.patch("luigi.configuration")
def test_client_def(self, hive_syntax):
hive_syntax.get_config.return_value.get.return_value = "cdh4"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.HiveCommandClient, type(client))
hive_syntax.get_config.return_value.get.return_value = "cdh3"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.HiveCommandClient, type(client))
hive_syntax.get_config.return_value.get.return_value = "apache"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.ApacheHiveCommandClient, type(client))
@mock.patch('subprocess.Popen')
def test_run_hive_command(self, popen):
# I'm testing this again to check the return codes
# I didn't want to tear up all the existing tests to change how run_hive is mocked
comm = mock.Mock(name='communicate_mock')
comm.return_value = "some return stuff", ""
preturn = mock.Mock(name='open_mock')
preturn.returncode = 0
preturn.communicate = comm
popen.return_value = preturn
returned = luigi.contrib.hive.run_hive(["blah", "blah"])
self.assertEqual("some return stuff", returned)
preturn.returncode = 17
self.assertRaises(luigi.contrib.hive.HiveCommandError, luigi.contrib.hive.run_hive, ["blah", "blah"])
comm.return_value = "", "some stderr stuff"
returned = luigi.contrib.hive.run_hive(["blah", "blah"], False)
self.assertEqual("", returned)
class TestHiveMisc(unittest.TestCase):
def test_import_old(self):
import luigi.hive
self.assertEqual(luigi.hive.HiveQueryTask, luigi.contrib.hive.HiveQueryTask)
class MyHiveTask(luigi.contrib.hive.HiveQueryTask):
param = luigi.Parameter()
def query(self):
return 'banana banana %s' % self.param
class TestHiveTask(unittest.TestCase):
@mock.patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_run(self, run_and_track_hadoop_job):
success = luigi.run(['MyHiveTask', '--param', 'foo', '--local-scheduler', '--no-lock'])
self.assertTrue(success)
self.assertEqual('hive', run_and_track_hadoop_job.call_args[0][0][0])
class TestHiveTarget(unittest.TestCase):
def test_hive_table_target(self):
client = mock.Mock()
target = luigi.contrib.hive.HiveTableTarget(database='db', table='foo', client=client)
target.exists()
client.table_exists.assert_called_with('foo', 'db')
def test_hive_partition_target(self):
client = mock.Mock()
target = luigi.contrib.hive.HivePartitionTarget(database='db', table='foo', partition='bar', client=client)
target.exists()
client.table_exists.assert_called_with('foo', 'db', 'bar')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,052,190,599,578,711,000 | 43.339564 | 119 | 0.550762 | false |
fabsx00/chucky-old | mlutils/factorization/MatrixFactorizer.py | 1 | 3163 | import scipy
import sparsesvd
import nimfa
import pymf
import pickle
class MatrixFactorizer():
def __init__(self):
self._registerAlgorithms()
def factorize(self, X, nBases=None, algo = 'SVD'):
self.algorithm = algo
"""
Returns the factorization of the sparse matrix X into
the matrices (W,H). Watchout, W and H are dense
scipy matrices.
If number of bases is not specified, it will try to
determine this parameter automatically.
"""
if nBases == None:
return self.factorizeWithoutK(X, algo)
return self.algorithms[algo](X, nBases)
def save(self, projectRoot):
WFilename = projectRoot + 'W_%s_%d.pickl' % (self.algorithm, self.W.shape[1])
HFilename = projectRoot + 'H_%s_%d.pickl' % (self.algorithm, self.W.shape[1])
pickle.dump(self.W, file(WFilename, 'w'))
pickle.dump(self.H, file(HFilename, 'w'))
def factorizeWithoutK(self, X, algo):
nBases = xrange(100, X.shape[1], 10)
W = None
H = None
for k in nBases:
print k
try:
(nextW,nextH) = self.factorize(X, k, algo)
except KeyboardInterrupt:
import sys
sys.exit()
except:
import traceback
print traceback.print_exc()
print 'Factorization failure for %d bases, incrementing' % (k)
continue
def _registerAlgorithms(self):
self.algorithms = {}
self.algorithms['SVD'] = self._SPARSESVD
self.algorithms['PYMF_PCA'] = self._PYMF_PCA
self.algorithms['NIMFA_NMF'] = self._NIMFA_NMF
def _SPARSESVD(self, X, nBases):
k = int(nBases[0])
(U, S, V) = sparsesvd.sparsesvd(X.tocsc(), k)
self.W = scipy.matrix(U.T*S)
self.H = scipy.matrix(V)
return (self.W, self.H)
def _NIMFA_NMF(self, X, nBases):
model = nimfa.mf(X, seed = 'nndsvd', rank = nBases,
method = "nmf", initialize_only = True)
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
self.W = W.todense()
self.H = H.todense()
return (self.W, self.H)
def _PYMF_PCA(self, X, nBases):
X = X.todense()
self.mdl = pymf.PCA(X, num_bases=nBases)
self.mdl.factorize()
self.W = self.mdl.W
self.H = self.mdl.H
return (self.mdl.W, self.mdl.H)
""" For Debugging Purposes """
def getBasisVectors(self, termDocMatrix, thresh=0.8):
basisVectors = []
for w in self.W.T:
vList = []
termIndex = 0
for wi in w.T:
if wi > thresh:
vList.append((wi[0,0], termDocMatrix.index2Term[termIndex]))
termIndex += 1
vList.sort(reverse = True)
basisVectors.append(vList)
return basisVectors
| gpl-3.0 | 2,138,477,852,618,782,200 | 28.296296 | 85 | 0.50901 | false |
dmsovetov/pygling | Pygling/Platform/Platform.py | 1 | 6185 | #################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
import os, glob
from collections import namedtuple
from ..Makefile import Makefile
from ..Location import Path
# class Platform
class Platform:
ExternalLibrary = namedtuple('ExternalLibrary', 'type, name, locations')
Location = namedtuple('Location', 'filename, path')
FindLibrary = namedtuple('FindLibrary', 'name, headers, libraries')
# ctor
def __init__( self ):
self._headerSearchPaths = []
self._librarySearchPaths = []
self._libraries = {}
self.register_library('vorbis', headers=['vorbis/codec.h', 'vorbis/vorbisfile.h'], libraries=['vorbis', 'vorbisfile', 'ogg'])
self.register_library('fbx', headers=['fbxsdk.h'], libraries=['fbxsdk'])
self.register_library('yaml', headers=['yaml/yaml.h'], libraries=['yaml'])
self.register_library('embree2', headers=['embree2/rtcore.h', 'embree2/rtcore_ray.h'], libraries=['embree', 'sys', 'simd', 'embree_sse41', 'embree_sse42'])
self.register_library('jsoncpp', headers=['json/json.h'], libraries=['jsoncpp'])
self.register_library('gtest', headers=['gtest/gtest.h'], libraries=['gtest'])
self.register_library('pthread', headers=['pthread.h'], libraries=['pthread'])
self.register_library('mongoc', headers=['mongoc.h'], libraries=['mongoc'])
self.register_library('bson', headers=['bcon.h'], libraries=['bson'])
# userpaths
@property
def userpaths(self):
return []
# headers
@property
def headers(self):
return Makefile.project.headerSearchPaths + self._headerSearchPaths
# libraries
@property
def libraries(self):
return Makefile.project.librarySearchPaths + self._librarySearchPaths
# find_library
def find_library(self, name, required):
if name in self._libraries.keys():
library = self._find_library_by_items(self._libraries[name])
return library if library or not required else self._find_library_by_name(name)
if not required:
return None
return self._find_library_by_name(name)
# library_file_names
def library_file_names(self, name):
return [name]
# header_file_names
def header_file_names(self, name, filename):
return [filename]
# add_header_search_paths
def add_header_search_paths(self, *paths):
for path in paths:
if not os.path.exists(path):
print 'Warning: header search path doesnt exist', path
continue
self._headerSearchPaths.append(path)
# add_library_search_paths
def add_library_search_paths(self, *paths):
for path in paths:
if not os.path.exists(path):
print 'Warning: library search path doesnt exist', path
continue
self._librarySearchPaths.append(path)
# register_library
def register_library(self, identifier, name = None, headers = [], libraries = []):
self._libraries[identifier] = Platform.FindLibrary(name=name if name else identifier, headers=headers, libraries=libraries)
# exists
@staticmethod
def exists(filename, paths, recursive):
nested = []
for path in paths:
# print 'Searching', filename, 'at', path
nested = nested + Platform.dirs(path)
if os.path.exists(os.path.join(path, filename)):
return path
if len(nested) != 0 and recursive:
return Platform.exists(filename, nested, recursive)
return None
# dirs
@staticmethod
def dirs(path):
return [fullPath for fullPath in glob.glob(os.path.join(path, '*')) if os.path.isdir(fullPath)]
# _find_headers
def _find_headers(self, name, headers):
locations = []
for header in headers:
for filename in self.header_file_names(name, header):
path = Platform.exists(filename, self.headers, True)
if path:
locations.append(Platform.Location(filename=filename, path=Path(Path.Headers, path)))
return locations
# _find_libraries
def _find_libraries(self, name, libraries):
locations = []
for library in libraries:
for filename in self.library_file_names(library):
path = Platform.exists(filename, self.libraries, False)
if path: locations.append(Platform.Location(filename=filename, path=Path(Path.Libraries, path)))
return locations
# _find_library_by_items
def _find_library_by_items(self, library):
# Locate library
librarySearchPath = self._find_libraries(library.name, library.libraries)
if not librarySearchPath:
print 'Warning: no libraries found for ' + library.name
return None
# Locate headers
headerSearchPath = self._find_headers(library.name, library.headers)
if not headerSearchPath:
print 'Warning: no headers found for ' + library.name
return None
return Platform.ExternalLibrary(type='external', name=library.name, locations=headerSearchPath + librarySearchPath)
# _find_library_by_name
def _find_library_by_name(self, library):
return None | mit | -1,484,484,770,988,454,100 | 34.348571 | 158 | 0.681487 | false |
neilhan/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_ops.py | 10 | 18611 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian mixture models Operations."""
# TODO(xavigonzalvo): Factor out covariance matrix operations to make
# code reusable for different types (e.g. diag).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops.embedding_ops import embedding_lookup
# Machine epsilon.
MEPS = np.finfo(float).eps
FULL_COVARIANCE = 'full'
DIAG_COVARIANCE = 'diag'
def _covariance(x, diag):
"""Defines the covariance operation of a matrix.
Args:
x: a matrix Tensor. Dimension 0 should contain the number of examples.
diag: if True, it computes the diagonal covariance.
Returns:
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = tf.to_float(tf.shape(x)[0])
x -= tf.reduce_mean(x, 0, keep_dims=True)
if diag:
cov = tf.reduce_sum(
tf.square(x), 0, keep_dims=True) / (num_points - 1)
else:
cov = tf.matmul(x, x, transpose_a=True) / (num_points - 1)
return cov
def _init_clusters_random(data, num_clusters, random_seed):
"""Does random initialization of clusters.
Args:
data: a list of Tensors with a matrix of data, each row is an example.
num_clusters: an integer with the number of clusters.
random_seed: Seed for PRNG used to initialize seeds.
Returns:
A Tensor with num_clusters random rows of data.
"""
assert isinstance(data, list)
num_data = tf.add_n([tf.shape(inp)[0] for inp in data])
with tf.control_dependencies([tf.assert_less_equal(num_clusters, num_data)]):
indices = tf.random_uniform([num_clusters],
minval=0,
maxval=tf.cast(num_data, tf.int64),
seed=random_seed,
dtype=tf.int64)
indices = tf.cast(indices, tf.int32) % num_data
clusters_init = embedding_lookup(data, indices, partition_strategy='div')
return clusters_init
class GmmAlgorithm(object):
"""Tensorflow Gaussian mixture model clustering class."""
CLUSTERS_VARIABLE = 'clusters'
CLUSTERS_COVS_VARIABLE = 'clusters_covs'
def __init__(self, data, num_classes, initial_means=None, params='wmc',
covariance_type=FULL_COVARIANCE, random_seed=0):
"""Constructor.
Args:
data: a list of Tensors with data, each row is a new example.
num_classes: number of clusters.
initial_means: a Tensor with a matrix of means. If None, means are
computed by sampling randomly.
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covariances.
covariance_type: one of "full", "diag".
random_seed: Seed for PRNG used to initialize seeds.
Raises:
Exception if covariance type is unknown.
"""
self._params = params
self._random_seed = random_seed
self._covariance_type = covariance_type
if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]:
raise Exception( # pylint: disable=g-doc-exception
'programmer error: Invalid covariance type: %s' %
self._covariance_type)
# Create sharded variables for multiple shards. The following
# lists are indexed by shard.
# Probability per example in a class.
num_shards = len(data)
self._probs = [None] * num_shards
# Prior probability.
self._prior_probs = [None] * num_shards
# Membership weights w_{ik} where "i" is the i-th example and "k"
# is the k-th mixture.
self._w = [None] * num_shards
# Number of examples in a class.
self._points_in_k = [None] * num_shards
first_shard = data[0]
self._dimensions = tf.shape(first_shard)[1]
self._num_classes = num_classes
# Small value to guarantee that covariances are invertible.
self._min_var = tf.diag(tf.ones(tf.pack([self._dimensions]))) * 1e-3
self._create_variables(data, initial_means)
# Operations of partial statistics for the computation of the means.
self._w_mul_x = []
# Operations of partial statistics for the computation of the covariances.
self._w_mul_x2 = []
self._define_graph(data)
def _create_variables(self, data, initial_means=None):
"""Initializes GMM algorithm.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
self._means = tf.Variable(tf.expand_dims(initial_means, 1),
name=self.CLUSTERS_VARIABLE,
validate_shape=False, dtype=tf.float32)
else:
# Sample data randomly
self._means = tf.Variable(tf.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1),
name=self.CLUSTERS_VARIABLE,
validate_shape=False)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = tf.tile(
tf.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = tf.tile(tf.expand_dims(tf.diag_part(cov), 0),
[self._num_classes, 1])
self._covs = tf.Variable(covs, name='clusters_covs', validate_shape=False)
# Mixture weights, representing the probability that a randomly
# selected unobservable data (in EM terms) was generated by component k.
self._alpha = tf.Variable(tf.tile([1.0 / self._num_classes],
[self._num_classes]))
def training_ops(self):
"""Returns the training operation."""
return self._train_ops
def alphas(self):
return self._alpha
def clusters(self):
"""Returns the clusters with dimensions num_classes X 1 X num_dimensions."""
return self._means
def covariances(self):
"""Returns the covariances matrices."""
return self._covs
def assignments(self):
"""Returns a list of Tensors with the matrix of assignments per shard."""
ret = []
for w in self._w:
ret.append(tf.argmax(w, 1))
return ret
def scores(self):
"""Returns the distances to each class.
Returns:
A tuple with two Tensors. The first contains the distance to
each class. The second contains the distance to the assigned
class.
"""
return (self._all_scores, self._scores)
def _define_graph(self, data):
"""Define graph for a single iteration.
Args:
data: a list of Tensors defining the training data.
"""
for shard_id, shard in enumerate(data):
self._num_examples = tf.shape(shard)[0]
shard = tf.expand_dims(shard, 0)
self._define_log_prob_operation(shard_id, shard)
self._define_prior_log_prob_operation(shard_id)
self._define_expectation_operation(shard_id)
self._define_partial_maximization_operation(shard_id, shard)
self._define_maximization_operation(len(data))
self._define_distance_to_clusters(data)
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilties per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = tf.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(cholesky)), 1)
x_mu_cov = tf.square(
tf.matrix_triangular_solve(
cholesky, tf.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = tf.transpose(tf.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (
diag_m + tf.to_float(self._dimensions) * tf.log(2 * np.pi) +
log_det_covs)
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = tf.reduce_sum(tf.log(self._covs + 1e-3),
1, keep_dims=True)
diff = shard - self._means
x2 = tf.square(diff)
cov_expanded = tf.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = tf.batch_matmul(x2, cov_expanded)
x2_cov = tf.transpose(tf.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
tf.to_float(self._dimensions) * tf.log(2.0 * np.pi) +
tf.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
"""Probability per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
# TODO(xavigonzalvo): Use the pdf defined in
# third_party/tensorflow/contrib/distributions/python/ops/gaussian.py
if self._covariance_type == FULL_COVARIANCE:
self._define_full_covariance_probs(shard_id, shard)
elif self._covariance_type == DIAG_COVARIANCE:
self._define_diag_covariance_probs(shard_id, shard)
self._probs[shard_id] += tf.log(self._alpha)
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.
Updates a vector where each item is the prior probabibility of an
input example.
Args:
shard_id: id of current shard_id.
"""
self._prior_probs[shard_id] = tf.log(
tf.reduce_sum(tf.exp(self._probs[shard_id]), 1, keep_dims=True))
def _define_expectation_operation(self, shard_id):
# Shape broadcasting.
probs = tf.expand_dims(self._probs[shard_id], 0)
# Membership weights are computed as:
# w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}
# {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}
# where "i" is the i-th example, "k" is the k-th mixture, theta are
# the model parameters and y_i the observations.
# These are defined for each shard.
self._w[shard_id] = tf.reshape(
tf.exp(probs - self._prior_probs[shard_id]),
tf.pack([self._num_examples, self._num_classes]))
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = tf.reduce_sum(self._w[shard_id], 0,
keep_dims=True)
# Partial means.
w_mul_x = tf.expand_dims(
tf.matmul(self._w[shard_id],
tf.squeeze(shard, [0]), transpose_a=True), 1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = tf.concat(0, [shard for _ in range(self._num_classes)])
x_trans = tf.transpose(x, perm=[0, 2, 1])
x_mul_w = tf.concat(0, [
tf.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)])
self._w_mul_x2.append(tf.batch_matmul(x_mul_w, x))
def _define_maximization_operation(self, num_batches):
"""Maximization operations."""
# TODO(xavigonzalvo): some of these operations could be moved to C++.
# Compute the effective number of data points assigned to component k.
with tf.control_dependencies(self._w):
points_in_k = tf.squeeze(tf.add_n(self._points_in_k), squeeze_dims=[0])
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = tf.to_float(tf.reduce_sum(final_points_in_k))
self._alpha_op = self._alpha.assign(
final_points_in_k / (num_examples + MEPS))
else:
self._alpha_op = tf.no_op()
self._train_ops = [self._alpha_op]
# Update means.
points_in_k_expanded = tf.reshape(points_in_k,
[self._num_classes, 1, 1])
if 'm' in self._params:
self._means_op = self._means.assign(
tf.div(tf.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
else:
self._means_op = tf.no_op()
# means are (num_classes x 1 x dims)
# Update covariances.
with tf.control_dependencies([self._means_op]):
b = tf.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
new_covs = []
for k in range(self._num_classes):
mean = self._means.ref()[k, :, :]
square_mean = tf.matmul(mean, mean, transpose_a=True)
new_cov = b[k, :, :] - square_mean + self._min_var
if self._covariance_type == FULL_COVARIANCE:
new_covs.append(tf.expand_dims(new_cov, 0))
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(tf.expand_dims(tf.diag_part(new_cov), 0))
new_covs = tf.concat(0, new_covs)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
with tf.control_dependencies([self._means_op, new_covs]):
self._train_ops.append(
tf.assign(self._covs, new_covs, validate_shape=False))
def _define_distance_to_clusters(self, data):
"""Defines the Mahalanobis distance to the assigned Gaussian."""
# TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
# mean) from log probability function.
self._all_scores = []
for shard in data:
all_scores = []
shard = tf.expand_dims(shard, 0)
for c in xrange(self._num_classes):
if self._covariance_type == FULL_COVARIANCE:
cov = self._covs[c, :, :]
elif self._covariance_type == DIAG_COVARIANCE:
cov = tf.diag(self._covs[c, :])
inverse = tf.matrix_inverse(cov + self._min_var)
inv_cov = tf.tile(
tf.expand_dims(inverse, 0),
tf.pack([self._num_examples, 1, 1]))
diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
m_left = tf.batch_matmul(diff, inv_cov)
all_scores.append(tf.sqrt(tf.batch_matmul(
m_left, tf.transpose(diff, perm=[0, 2, 1])
)))
self._all_scores.append(tf.reshape(
tf.concat(1, all_scores),
tf.pack([self._num_examples, self._num_classes])))
# Distance to the associated class.
self._all_scores = tf.concat(0, self._all_scores)
assignments = tf.concat(0, self.assignments())
rows = tf.to_int64(tf.range(0, self._num_examples))
indices = tf.concat(1, [tf.expand_dims(rows, 1),
tf.expand_dims(assignments, 1)])
self._scores = tf.gather_nd(self._all_scores, indices)
def _define_loglikelihood_operation(self):
"""Defines the total log-likelihood of current iteration."""
self._ll_op = []
for prior_probs in self._prior_probs:
self._ll_op.append(tf.reduce_sum(tf.log(prior_probs)))
tf.scalar_summary('ll', tf.reduce_sum(self._ll_op))
def gmm(inp, initial_clusters, num_clusters, random_seed,
covariance_type=FULL_COVARIANCE, params='wmc'):
"""Creates the graph for Gaussian mixture model (GMM) clustering.
Args:
inp: An input tensor or list of input tensors
initial_clusters: Specifies the clusters used during
initialization. Can be a tensor or numpy array, or a function
that generates the clusters. Can also be "random" to specify
that clusters should be chosen randomly from input data. Note: type
is diverse to be consistent with skflow.
num_clusters: number of clusters.
random_seed: Python integer. Seed for PRNG used to initialize centers.
covariance_type: one of "diag", "full".
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covars.
Returns:
Note: tuple of lists returned to be consistent with skflow
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
assignments: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to assignments but specifies the distance to the
assigned cluster instead.
training_op: an op that runs an iteration of training.
"""
initial_means = None
if initial_clusters != 'random' and not isinstance(
initial_clusters, tf.Tensor):
initial_means = tf.constant(initial_clusters, dtype=tf.float32)
# Implementation of GMM.
inp = inp if isinstance(inp, list) else [inp]
gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params,
covariance_type, random_seed)
training_ops = gmm_tool.training_ops()
assignments = gmm_tool.assignments()
all_scores, scores = gmm_tool.scores()
return [all_scores], [assignments], [scores], tf.group(*training_ops)
| apache-2.0 | 2,467,029,705,887,266,300 | 39.370933 | 80 | 0.637741 | false |
knoguchi/kenix-scm | server/lib/boto/manage/cmdshell.py | 17 | 8585 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.mashups.interactive import interactive_shell
import boto
import os
import time
import shutil
import StringIO
import paramiko
import socket
import subprocess
class SSHClient(object):
def __init__(self, server,
host_key_file='~/.ssh/known_hosts',
uname='root', timeout=None, ssh_pwd=None):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
self._timeout = timeout
self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file,
password=ssh_pwd)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
def connect(self, num_retries=5):
retry = 0
while retry < num_retries:
try:
self._ssh_client.connect(self.server.hostname,
username=self.uname,
pkey=self._pkey,
timeout=self._timeout)
return
except socket.error, (value, message):
if value in (51, 61, 111):
print 'SSH Connection refused, will retry in 5 seconds'
time.sleep(5)
retry += 1
else:
raise
except paramiko.BadHostKeyException:
print "%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname
print 'Edit that file to remove the entry and then hit return to try again'
raw_input('Hit Enter when ready')
retry += 1
except EOFError:
print 'Unexpected Error from SSH Connection, retry in 5 seconds'
time.sleep(5)
retry += 1
print 'Could not establish SSH connection'
def open_sftp(self):
return self._ssh_client.open_sftp()
def get_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.get(src, dst)
def put_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.put(src, dst)
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote system and return a file-like object.
"""
sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize)
def listdir(self, path):
sftp_client = self.open_sftp()
return sftp_client.listdir(path)
def isdir(self, path):
status = self.run('[ -d %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def exists(self, path):
status = self.run('[ -a %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def shell(self):
"""
Start an interactive shell session on the remote host.
"""
channel = self._ssh_client.invoke_shell()
interactive_shell(channel)
def run(self, command):
"""
Execute a command on the remote host. Return a tuple containing
an integer status and two strings, the first containing stdout
and the second containing stderr from the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
status = 0
try:
t = self._ssh_client.exec_command(command)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
boto.log.debug('stdout: %s' % std_out)
boto.log.debug('stderr: %s' % std_err)
return (status, std_out, std_err)
def run_pty(self, command):
"""
Execute a command on the remote host with a pseudo-terminal.
Returns a string containing the output of the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
def close(self):
transport = self._ssh_client.get_transport()
transport.close()
self.server.reset_cmdshell()
class LocalClient(object):
def __init__(self, server, host_key_file=None, uname='root'):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
def get_file(self, src, dst):
shutil.copyfile(src, dst)
def put_file(self, src, dst):
shutil.copyfile(src, dst)
def listdir(self, path):
return os.listdir(path)
def isdir(self, path):
return os.path.isdir(path)
def exists(self, path):
return os.path.exists(path)
def shell(self):
raise NotImplementedError('shell not supported with LocalClient')
def run(self):
boto.log.info('running:%s' % self.command)
log_fp = StringIO.StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while process.poll() is None:
time.sleep(1)
t = process.communicate()
log_fp.write(t[0])
log_fp.write(t[1])
boto.log.info(log_fp.getvalue())
boto.log.info('output: %s' % log_fp.getvalue())
return (process.returncode, log_fp.getvalue())
def close(self):
pass
class FakeServer(object):
"""
A little class to fake out SSHClient (which is expecting a
:class`boto.manage.server.Server` instance. This allows us
to
"""
def __init__(self, instance, ssh_key_file):
self.instance = instance
self.ssh_key_file = ssh_key_file
self.hostname = instance.dns_name
self.instance_id = self.instance.id
def start(server):
instance_id = boto.config.get('Instance', 'instance-id', None)
if instance_id == server.instance_id:
return LocalClient(server)
else:
return SSHClient(server)
def sshclient_from_instance(instance, ssh_key_file,
host_key_file='~/.ssh/known_hosts',
user_name='root', ssh_pwd=None):
"""
Create and return an SSHClient object given an
instance object.
:type instance: :class`boto.ec2.instance.Instance` object
:param instance: The instance object.
:type ssh_key_file: str
:param ssh_key_file: A path to the private key file used
to log into instance.
:type host_key_file: str
:param host_key_file: A path to the known_hosts file used
by the SSH client.
Defaults to ~/.ssh/known_hosts
:type user_name: str
:param user_name: The username to use when logging into
the instance. Defaults to root.
:type ssh_pwd: str
:param ssh_pwd: The passphrase, if any, associated with
private key.
"""
s = FakeServer(instance, ssh_key_file)
return SSHClient(s, host_key_file, user_name, ssh_pwd)
| apache-2.0 | 3,240,464,399,704,323,000 | 34.329218 | 105 | 0.592895 | false |
synhershko/swift-middleware-grok | setup.py | 1 | 1607 | # Copyright (c) 2010-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from grok import name, version
setup(
name=name,
version=version,
author="Itamar Syn-Hershko",
author_email="[email protected]",
description="Grok support middleware for OpenStack Swift",
keywords="openstack swift middleware grok",
url="https://github.com/synhershko/swift-middleware-grok",
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: OpenStack',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
install_requires=['pygrok'],
entry_points={
'paste.filter_factory': [
'grok=grok.middleware:filter_factory',
],
},
) | apache-2.0 | 2,412,861,579,786,681,300 | 33.212766 | 69 | 0.679527 | false |
elliotthill/django-oscar | oscar/core/loading.py | 1 | 5946 | import sys
import traceback
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_model
class AppNotFoundError(Exception):
pass
class ClassNotFoundError(Exception):
pass
def get_class(module_label, classname):
"""
Dynamically import a single class from the given module.
This is a simple wrapper around `get_classes` for the case of loading a
single class.
Args:
module_label (str): Module label comprising the app label and the
module name, separated by a dot. For example, 'catalogue.forms'.
classname (str): Name of the class to be imported.
Returns:
The requested class object or `None` if it can't be found
"""
return get_classes(module_label, [classname])[0]
def get_classes(module_label, classnames):
"""
Dynamically import a list of classes from the given module.
This works by looping over ``INSTALLED_APPS`` and looking for a match
against the passed module label. If the requested class can't be found in
the matching module, then we attempt to import it from the corresponding
core Oscar app (assuming the matched module isn't in Oscar).
This is very similar to ``django.db.models.get_model`` function for
dynamically loading models. This function is more general though as it can
load any class from the matching app, not just a model.
Args:
module_label (str): Module label comprising the app label and the
module name, separated by a dot. For example, 'catalogue.forms'.
classname (str): Name of the class to be imported.
Returns:
The requested class object or ``None`` if it can't be found
Examples:
Load a single class:
>>> get_class('basket.forms', 'BasketLineForm')
oscar.apps.basket.forms.BasketLineForm
Load a list of classes:
>>> get_classes('basket.forms', ['BasketLineForm', 'AddToBasketForm'])
[oscar.apps.basket.forms.BasketLineForm,
oscar.apps.basket.forms.AddToBasketForm]
Raises:
AppNotFoundError: If no app is found in ``INSTALLED_APPS`` that matches
the passed module label.
ImportError: If the attempted import of a class raises an
``ImportError``, it is re-raised
"""
app_module_path = _get_app_module_path(module_label)
if not app_module_path:
raise AppNotFoundError("No app found matching '%s'" % module_label)
# Check if app is in oscar
if app_module_path.split('.')[0] == 'oscar':
# Using core oscar class
module_path = 'oscar.apps.%s' % module_label
imported_module = __import__(module_path, fromlist=classnames)
return _pluck_classes([imported_module], classnames)
# App must be local - check if module is in local app (it could be in
# oscar's)
app_label = module_label.split('.')[0]
if '.' in app_module_path:
base_package = app_module_path.rsplit('.' + app_label, 1)[0]
local_app = "%s.%s" % (base_package, module_label)
else:
local_app = module_label
try:
imported_local_module = __import__(local_app, fromlist=classnames)
except ImportError:
# There are 2 reasons why there is ImportError:
# 1. local_app does not exist
# 2. local_app exists but is corrupted (ImportError inside of the app)
#
# Obviously, for the reason #1 we want to fall back to use Oscar app.
# For the reason #2 we want to propagate error (the dev obviously wants
# to override app and not use Oscar app)
#
# ImportError does not provide easy way to distinguish those two cases.
# Fortunately, the traceback of the ImportError starts at __import__
# statement. If the traceback has more than one frame, it means that
# application was found and ImportError originates within the local app
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise
# Module not in local app
imported_local_module = {}
oscar_app = "oscar.apps.%s" % module_label
try:
imported_oscar_module = __import__(oscar_app, fromlist=classnames)
except ImportError:
# Oscar does not have this application, can't fallback to it
imported_oscar_module = None
return _pluck_classes([imported_local_module, imported_oscar_module],
classnames)
def _pluck_classes(modules, classnames):
klasses = []
for classname in classnames:
klass = None
for module in modules:
if hasattr(module, classname):
klass = getattr(module, classname)
break
if not klass:
packages = [m.__name__ for m in modules if m is not None]
raise ClassNotFoundError("No class '%s' found in %s" % (
classname, ", ".join(packages)))
klasses.append(klass)
return klasses
def _get_app_module_path(module_label):
app_name = module_label.rsplit(".", 1)[0]
for installed_app in settings.INSTALLED_APPS:
if installed_app.endswith(app_name):
return installed_app
return None
def get_profile_class():
"""
Return the profile model class
"""
setting = getattr(settings, 'AUTH_PROFILE_MODULE', None)
if setting is None:
return None
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
profile_class = get_model(app_label, model_name)
if not profile_class:
raise ImproperlyConfigured("Can't import profile model")
return profile_class
def feature_hidden(feature_name):
"""
Test if a certain Oscar feature is disabled.
"""
return (feature_name is not None and
feature_name in settings.OSCAR_HIDDEN_FEATURES)
| bsd-3-clause | -6,922,422,192,606,424,000 | 33.569767 | 79 | 0.645644 | false |
jswope00/griffinx | lms/djangoapps/courseware/features/lti.py | 6 | 13981 | # pylint: disable=missing-docstring
import datetime
import os
import pytz
from django.conf import settings
from mock import patch
from pytz import UTC
from splinter.exceptions import ElementDoesNotExist
from selenium.common.exceptions import NoAlertPresentException
from nose.tools import assert_true, assert_equal, assert_in, assert_is_none
from lettuce import world, step
from courseware.tests.factories import InstructorFactory, BetaTesterFactory
from courseware.access import has_access
from student.tests.factories import UserFactory
from common import visit_scenario_item
@step('I view the LTI and error is shown$')
def lti_is_not_rendered(_step):
# error is shown
assert world.is_css_present('.error_message', wait_time=0)
# iframe is not presented
assert not world.is_css_present('iframe', wait_time=0)
# link is not presented
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
def check_lti_iframe_content(text):
# inside iframe test content is presented
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=0)
assert (text == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('I view the LTI and it is rendered in (.*)$')
def lti_is_rendered(_step, rendered_in):
if rendered_in.strip() == 'iframe':
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# iframe is visible
assert world.css_visible('iframe')
check_lti_iframe_content("This is LTI tool. Success.")
elif rendered_in.strip() == 'new page':
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
click_and_check_lti_popup()
else: # incorrent rendered_in parameter
assert False
@step('I view the permission alert$')
def view_lti_permission_alert(_step):
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
world.css_find('.link_lti_new_window').first.click()
alert = world.browser.get_alert()
assert alert is not None
assert len(world.browser.windows) == 1
def check_no_alert():
"""
Make sure the alert has gone away.
Note that the splinter documentation indicates that
get_alert should return None if no alert is present,
however that is not the case. Instead a
NoAlertPresentException is raised.
"""
try:
assert_is_none(world.browser.get_alert())
except NoAlertPresentException:
pass
@step('I accept the permission alert and view the LTI$')
def accept_lti_permission_alert(_step):
parent_window = world.browser.current_window # Save the parent window
# To start with you should only have one window/tab
assert len(world.browser.windows) == 1
alert = world.browser.get_alert()
alert.accept()
check_no_alert()
# Give it a few seconds for the LTI window to appear
world.wait_for(
lambda _: len(world.browser.windows) == 2,
timeout=5,
timeout_msg="Timed out waiting for the LTI window to appear."
)
# Verify the LTI window
check_lti_popup(parent_window)
@step('I reject the permission alert and do not view the LTI$')
def reject_lti_permission_alert(_step):
alert = world.browser.get_alert()
alert.dismiss()
check_no_alert()
assert len(world.browser.windows) == 1
@step('I view the LTI but incorrect_signature warning is rendered$')
def incorrect_lti_is_rendered(_step):
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# inside iframe test content is presented
check_lti_iframe_content("Wrong LTI signature")
@step('the course has correct LTI credentials with registered (.*)$')
def set_correct_lti_passport(_step, user='Instructor'):
coursenum = 'test_course'
metadata = {
'lti_passports': ["correct_lti_id:test_client_key:test_client_secret"]
}
i_am_registered_for_the_course(coursenum, metadata, user)
@step('the course has incorrect LTI credentials$')
def set_incorrect_lti_passport(_step):
coursenum = 'test_course'
metadata = {
'lti_passports': ["test_lti_id:test_client_key:incorrect_lti_secret_key"]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has an LTI component with (.*) fields(?:\:)?$') # , new_page is(.*), graded is(.*)
def add_correct_lti_to_course(_step, fields):
category = 'lti'
metadata = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://127.0.0.1:{}/correct_lti_endpoint'.format(settings.LTI_PORT),
}
if fields.strip() == 'incorrect_lti_id': # incorrect fields
metadata.update({
'lti_id': 'incorrect_lti_id'
})
elif fields.strip() == 'correct': # correct fields
pass
elif fields.strip() == 'no_launch_url':
metadata.update({
'launch_url': u''
})
else: # incorrect parameter
assert False
if _step.hashes:
metadata.update(_step.hashes[0])
world.scenario_dict['LTI'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category=category,
display_name='LTI',
metadata=metadata,
)
setattr(world.scenario_dict['LTI'], 'TEST_BASE_PATH', '{host}:{port}'.format(
host=world.browser.host,
port=world.browser.port,
))
visit_scenario_item('LTI')
def create_course_for_lti(course, metadata):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
weight = 0.1
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": weight
},
]
}
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course',
metadata=metadata,
grading_policy=grading_policy,
)
# Add a section to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
metadata={'graded': True, 'format': 'Homework'})
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def i_am_registered_for_the_course(coursenum, metadata, user='Instructor'):
# Create user
if user == 'BetaTester':
# Create the course
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=5)
metadata.update({'days_early_for_beta': 5, 'start': tomorrow})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
# create beta tester
user = BetaTesterFactory(course_key=course_descriptor.id)
normal_student = UserFactory()
instructor = InstructorFactory(course_key=course_descriptor.id)
assert not has_access(normal_student, 'load', course_descriptor)
assert has_access(user, 'load', course_descriptor)
assert has_access(instructor, 'load', course_descriptor)
else:
metadata.update({'start': datetime.datetime(1970, 1, 1, tzinfo=UTC)})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
user = InstructorFactory(course_key=course_descriptor.id)
# Enroll the user in the course and log them in
if has_access(user, 'load', course_descriptor):
world.enroll_user(user, course_descriptor.id)
world.log_in(username=user.username, password='test')
def check_lti_popup(parent_window):
# You should now have 2 browser windows open, the original courseware and the LTI
windows = world.browser.windows
assert_equal(len(windows), 2)
# For verification, iterate through the window titles and make sure that
# both are there.
tabs = []
for window in windows:
world.browser.switch_to_window(window)
tabs.append(world.browser.title)
assert_equal(tabs, [u'LTI | Test Section | test_course Courseware | edX', u'TEST TITLE'])
# Now verify the contents of the LTI window (which is the 2nd window/tab)
# Note: The LTI opens in a new browser window, but Selenium sticks with the
# current window until you explicitly switch to the context of the new one.
world.browser.switch_to_window(windows[1])
url = world.browser.url
basename = os.path.basename(url)
pathname = os.path.splitext(basename)[0]
assert_equal(pathname, u'correct_lti_endpoint')
result = world.css_find('.result').first.text
assert_equal(result, u'This is LTI tool. Success.')
world.browser.driver.close() # Close the pop-up window
world.browser.switch_to_window(parent_window) # Switch to the main window again
def click_and_check_lti_popup():
parent_window = world.browser.current_window # Save the parent window
world.css_find('.link_lti_new_window').first.click()
check_lti_popup(parent_window)
@step('visit the LTI component')
def visit_lti_component(_step):
visit_scenario_item('LTI')
@step('I see LTI component (.*) with text "([^"]*)"$')
def see_elem_text(_step, elem, text):
selector_map = {
'progress': '.problem-progress',
'feedback': '.problem-feedback',
'module title': '.problem-header',
'button': '.link_lti_new_window',
'description': '.lti-description'
}
assert_in(elem, selector_map)
assert_true(world.css_has_text(selector_map[elem], text))
@step('I see text "([^"]*)"$')
def check_progress(_step, text):
assert world.browser.is_text_present(text)
@step('I see graph with total progress "([^"]*)"$')
def see_graph(_step, progress):
selector = 'grade-detail-graph'
xpath = '//div[@id="{parent}"]//div[text()="{progress}"]'.format(
parent=selector,
progress=progress,
)
node = world.browser.find_by_xpath(xpath)
assert node
@step('I see in the gradebook table that "([^"]*)" is "([^"]*)"$')
def see_value_in_the_gradebook(_step, label, text):
table_selector = '.grade-table'
index = 0
table_headers = world.css_find('{0} thead th'.format(table_selector))
for i, element in enumerate(table_headers):
if element.text.strip() == label:
index = i
break
assert_true(world.css_has_text('{0} tbody td'.format(table_selector), text, index=index))
@step('I submit answer to LTI (.*) question$')
def click_grade(_step, version):
version_map = {
'1': {'selector': 'submit-button', 'expected_text': 'LTI consumer (edX) responded with XML content'},
'2': {'selector': 'submit-lti2-button', 'expected_text': 'LTI consumer (edX) responded with HTTP 200'},
}
assert_in(version, version_map)
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
iframe.find_by_name(version_map[version]['selector']).first.click()
assert iframe.is_text_present(version_map[version]['expected_text'])
@step('LTI provider deletes my grade and feedback$')
def click_delete_button(_step):
with world.browser.get_iframe(get_lti_frame_name()) as iframe:
iframe.find_by_name('submit-lti2-delete-button').first.click()
def get_lti_frame_name():
location = world.scenario_dict['LTI'].location.html_id()
return 'ltiFrame-' + location
@step('I see in iframe that LTI role is (.*)$')
def check_role(_step, role):
world.is_css_present('iframe')
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
expected_role = 'Role: ' + role
role = world.retry_on_exception(
lambda: iframe.find_by_tag('h5').first.value,
max_attempts=5,
ignored_exceptions=ElementDoesNotExist
)
assert_equal(expected_role, role)
@step('I switch to (.*)$')
def switch_view(_step, view):
staff_status = world.css_find('#action-preview-select').first.value
if staff_status != view:
world.browser.select("select", view)
world.wait_for_ajax_complete()
@step("in the LTI component I do not see (.*)$")
def check_lti_component_no_elem(_step, text):
selector_map = {
'a launch button': '.link_lti_new_window',
'an provider iframe': '.ltiLaunchFrame',
'feedback': '.problem-feedback',
'progress': '.problem-progress',
}
assert_in(text, selector_map)
assert_true(world.is_css_not_present(selector_map[text]))
| agpl-3.0 | 8,610,674,534,692,384,000 | 33.520988 | 111 | 0.653959 | false |
pegasus-isi/pegasus | packages/pegasus-python/src/Pegasus/monitoring/job.py | 1 | 39726 | """
This file implements the Job class for pegasus-monitord.
"""
##
# Copyright 2007-2012 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import collections
import json
import logging
import os
import re
from io import StringIO
from Pegasus.tools import utils
logger = logging.getLogger(__name__)
# Global variables
good_rsl = {"maxcputime": 1, "maxtime": 1, "maxwalltime": 1}
MAX_OUTPUT_LENGTH = 2 ** 16 - 1 # Only keep stdout to 64K
# some constants
NOOP_JOB_PREFIX = (
"noop_" # prefix for noop jobs for which .out and err files are not created
)
MONITORING_EVENT_START_MARKER = "@@@MONITORING_PAYLOAD - START@@@"
MONITORING_EVENT_END_MARKER = "@@@MONITORING_PAYLOAD - END@@@"
# Used in parse_sub_file
re_rsl_string = re.compile(r"^\s*globusrsl\W", re.IGNORECASE)
re_rsl_clean = re.compile(r"([-_])")
re_site_parse_gvds = re.compile(
r"^\s*\+(pegasus|wf)_(site|resource)\s*=\s*([\'\"])?(\S+)\3"
)
re_parse_transformation = re.compile(r"^\s*\+pegasus_wf_xformation\s*=\s*(\S+)")
re_parse_derivation = re.compile(r"^\s*\+pegasus_wf_dax_job_id\s*=\s*(\S+)")
re_parse_multiplier_factor = re.compile(r"^\s*\+pegasus_cores\s=\s(\S+)")
re_parse_executable = re.compile(r"^\s*executable\s*=\s*(\S+)")
re_parse_arguments = re.compile(r'^\s*arguments\s*=\s*"([^"\r\n]*)"')
re_parse_environment = re.compile(r"^\s*environment\s*=\s*(.*)")
re_site_parse_euryale = re.compile(r"^\#!\s+site=(\S+)")
re_parse_property = re.compile(r"([^:= \t]+)\s*[:=]?\s*(.*)")
re_parse_input = re.compile(r"^\s*intput\s*=\s*(\S+)")
re_parse_output = re.compile(r"^\s*output\s*=\s*(\S+)")
re_parse_error = re.compile(r"^\s*error\s*=\s*(\S+)")
re_parse_job_class = re.compile(r"^\s*\+pegasus_job_class\s*=\s*(\S+)")
re_parse_pegasuslite_hostname = re.compile(
r"^.*Executing on host\s*(\S+)(?:\s*IP=(\S+))*$", re.MULTILINE
)
TaskOutput = collections.namedtuple("TaskOutput", ["user_data", "events"])
class IntegrityMetric:
"""
Class for storing integrity metrics and combining them based solely on type and file type combination
"""
def __init__(self, type, file_type, count=0, succeeded=0, failed=0, duration=0.0):
self.type = type
self.file_type = file_type
self.count = count
self.succeeded = succeeded
self.failed = failed
self.duration = duration
def __eq__(self, other):
return self.type == other.type and self.file_type == other.file_type
def __hash__(self):
return hash(self.key())
def __str__(self):
return "({},{},{}, {}, {} , {})".format(
self.type,
self.file_type,
self.count,
self.succeeded,
self.failed,
self.duration,
)
def key(self):
return self.type + ":" + self.file_type
def merge(self, other):
if self == other:
self.count += other.count
self.succeeded += other.succeeded
self.failed += other.failed
self.duration += other.duration
return
raise KeyError("Objects not compatible {} {}".format(self, other))
class Job:
"""
Class used to keep information needed to track a particular job
"""
JOBTYPE_TO_DESC = {
"0": "unassigned",
"1": "compute",
"2": "stage-in-tx",
"3": "stage-out-tx",
"4": "registration",
"5": "inter-site-tx",
"6": "create-dir",
"7": "stage_worker_tx",
"8": "cleanup",
"9": "chmod",
"10": "dax",
"11": "dag",
}
# Variables that describe a job, as per the Stampede schema
# Some will be initialized in the init method, others will
# get their values from the kickstart output file when a job
# finished
def __init__(self, wf_uuid, name, job_submit_dir, job_submit_seq):
"""
This function initializes the job parameters with the
information available when a job is detected in the
"PRE_SCRIPT_STARTED" or the "SUBMIT" state. Other parameters
will remain None until a job finishes and a kickstart output
file can be parsed.
"""
self._wf_uuid = wf_uuid
self._exec_job_id = name
self._job_submit_seq = job_submit_seq
self._job_submit_dir = (
job_submit_dir # the submit directory all the job related files exist
)
self._sched_id = None
self._site_name = None
self._host_id = None
self._host_ip = None
self._remote_user = None
self._remote_working_dir = None
self._cluster_start_time = None
self._cluster_duration = None
self._job_type = None
self._job_state = None
self._job_state_seq = 0
self._job_state_timestamp = None
self._job_output_counter = 0
self._pre_script_start = None
self._pre_script_done = None
self._pre_script_exitcode = None
self._main_job_start = None
self._main_job_done = None
self._main_job_transformation = None
self._main_job_derivation = None
self._main_job_executable = None
self._main_job_arguments = None
self._main_job_exitcode = None
self._main_job_multiplier_factor = None
self._post_script_start = None
self._post_script_done = None
self._post_script_exitcode = None
self._input_file = None
self._output_file = None
self._error_file = None
self._stdout_text = None
self._stderr_text = None
self._additional_monitoring_events = []
self._multipart_events = []
self._cpu_attribs = None
self._job_dagman_out = None # _CONDOR_DAGMAN_LOG from environment
# line for pegasus-plan and subdax_ jobs
self._kickstart_parsed = False # Flag indicating if the kickstart
# output for this job was parsed or not
self._has_rotated_stdout_err_files = (
False # Flag indicating whether we detected that job stdout|stderr
)
# was rotated or not, as is the default case.
self._deferred_job_end_kwargs = None
self._integrity_metrics = set()
def _get_jobtype_desc(self):
"""
Returns a textual description of the job type
:return:
"""
desc = "unknown"
if self._job_type is None:
return desc
if self._job_type in Job.JOBTYPE_TO_DESC:
desc = Job.JOBTYPE_TO_DESC[self._job_type]
else:
logger.error(
"Unknown job type %s encountered for job %s"
% (self._job_type, self._exec_job_id)
)
return desc
def _add_multipart_events(self, events):
"""
add multipart events to the job, separating any integrity metrics,
since Integrity metrics are stored internally not as addditonal monitoring event
:param events:
:return:
"""
for event in events:
if not "multipart" in event:
# Not this one... skip to the next
logger.error(
" Mismatched multipart record %s in job %s"
% (event, self._exec_job_id)
)
continue
if "integrity_summary" in event:
# PM-1390 multipart events
m = event["integrity_summary"]
metric = IntegrityMetric(
type="check", # For time being they always refer to verification
file_type="input", # should be specified in multipart
succeeded=m["succeeded"] if "succeeded" in m else 0,
failed=m["failed"] if "failed" in m else 0,
duration=m["duration"] if "duration" in m else 0.0,
)
self.add_integrity_metric(metric)
else: # catch all
self._multipart_events.append(event)
def _add_additional_monitoring_events(self, events):
"""
add monitoring events to the job, separating any integrity metrics,
since Integrity metrics are stored internally not as addditonal monitoring event
:param events:
:return:
"""
for event in events:
if "monitoring_event" in event:
# this is how integrity metrics were reported in 4.9.x series
# as monitoring events; NOT multipart records in the job.out files
name = event["monitoring_event"]
if name == "int.metric":
# split elements in payload to IntegrityMetric
# add it internally for aggregation
for m in event["payload"]:
metric = IntegrityMetric(
type=m.get("event"),
file_type=m.get("file_type"),
count=m["count"] if "count" in m else 0,
succeeded=m["succeeded"] if "succeeded" in m else 0,
failed=m["failed"] if "failed" in m else 0,
duration=m["duration"] if "duration" in m else 0.0,
)
self.add_integrity_metric(metric)
else:
self._additional_monitoring_events.append(event)
else: # catch all
self._additional_monitoring_events.append(event)
def add_integrity_metric(self, metric):
"""
adds an integrity metric, if a metric with the same key already exists we retrive
existing value and add the contents of metric passed
:param metric:
:return:
"""
if metric is None:
return
for m in self._integrity_metrics:
if metric == m:
# add to existing metric
m.merge(metric)
break
else:
self._integrity_metrics.add(metric)
def set_job_state(self, job_state, sched_id, timestamp, status):
"""
This function sets the job state for this job. It also updates
the times the main job and PRE/POST scripts start and finish.
"""
self._job_state = job_state
self._job_state_timestamp = int(timestamp)
# Increment job state sequence
self._job_state_seq = self._job_state_seq + 1
# Set sched_id if we don't already have it...
if self._sched_id is None:
self._sched_id = sched_id
# Record timestamp for certain job states
if job_state == "PRE_SCRIPT_STARTED":
self._pre_script_start = int(timestamp)
elif job_state == "PRE_SCRIPT_SUCCESS" or job_state == "PRE_SCRIPT_FAILURE":
self._pre_script_done = int(timestamp)
self._pre_script_exitcode = utils.regular_to_raw(status)
elif job_state == "POST_SCRIPT_STARTED":
self._post_script_start = int(timestamp)
elif job_state == "POST_SCRIPT_TERMINATED":
self._post_script_done = int(timestamp)
elif job_state == "EXECUTE":
self._main_job_start = int(timestamp)
elif job_state == "JOB_TERMINATED":
self._main_job_done = int(timestamp)
elif (
job_state == "JOB_ABORTED"
or job_state == "SUBMIT_FAILED"
or job_state == "GLOBUS_SUBMIT_FAILED"
or job_state == "GRID_SUBMIT_FAILED"
):
self._main_job_done = int(
timestamp
) # PM-805, PM-877 job was aborted or submit failed, good chance job terminated event did not happen.
elif job_state == "JOB_SUCCESS" or job_state == "JOB_FAILURE":
self._main_job_exitcode = utils.regular_to_raw(status)
elif job_state == "POST_SCRIPT_SUCCESS" or job_state == "POST_SCRIPT_FAILURE":
self._post_script_exitcode = utils.regular_to_raw(status)
if self._main_job_done is None:
# PM-1016 Missing JOB_TERMINATED event.
self._main_job_done = int(timestamp)
def parse_sub_file(self, stamp, submit_file):
"""
This function parses a job's submit file and returns job
planning information. In addition, we try to populate the job
type from information in the submit file.
# paramtr: stamp(IN): timestamp associated with the log line
# paramtr: submit_file(IN): submit file name
# globals: good_rsl(IN): which RSL keys constitute time requirements
# returns: (largest job time requirement in minutes, destination site)
# returns: (None, None) if sub file not found
"""
parse_environment = False
my_result = None
my_site = None
# Update stat record for submit file
try:
my_stats = os.stat(submit_file)
except OSError:
# Could not stat file
logger.error("stat %s" % (submit_file))
return my_result, my_site
# Check submit file timestamp
if stamp < my_stats[8]: # mtime
logger.info(
"%s: sub file modified: job timestamp=%d, file mtime=%d, diff=%d"
% (submit_file, stamp, my_stats[8], my_stats[8] - stamp)
)
# Check if we need to parse the environment line
if self._exec_job_id.startswith("pegasus-plan") or self._exec_job_id.startswith(
"subdax_"
):
parse_environment = True
try:
SUB = open(submit_file)
except OSError:
logger.error("unable to parse %s" % (submit_file))
return my_result, my_site
# Parse submit file
for my_line in SUB:
if re_rsl_string.search(my_line):
# Found RSL string, do parse now
for my_match in re.findall(r"\(([^)]+)\)", my_line):
# Split into key and value
my_k, my_v = my_match.split("=", 1)
# Remove _- characters from string
my_k = re_rsl_clean.sub("", my_k)
if my_k.lower() in good_rsl and my_v > my_result:
try:
my_result = int(my_v)
except ValueError:
my_result = None
elif re_site_parse_gvds.search(my_line):
# GVDS agreement
my_site = re_site_parse_gvds.search(my_line).group(4)
self._site_name = my_site
elif re_site_parse_euryale.search(my_line):
# Euryale specific comment
my_site = re_site_parse_euryale.search(my_line).group(1)
self._site_name = my_site
elif re_parse_transformation.search(my_line):
# Found line with job transformation
my_transformation = re_parse_transformation.search(my_line).group(1)
# Remove quotes, if any
my_transformation = my_transformation.strip('"')
self._main_job_transformation = my_transformation
elif re_parse_derivation.search(my_line):
# Found line with job derivation
my_derivation = re_parse_derivation.search(my_line).group(1)
# Remove quotes, if any
my_derivation = my_derivation.strip('"')
if my_derivation == "null":
# If derivation is the "null" string, we don't want to take it
self._main_job_derivation = None
else:
self._main_job_derivation = my_derivation
elif re_parse_executable.search(my_line):
# Found line with executable
my_executable = re_parse_executable.search(my_line).group(1)
# Remove quotes, if any
my_executable = my_executable.strip('"')
self._main_job_executable = my_executable
elif re_parse_arguments.search(my_line):
# Found line with arguments
my_arguments = re_parse_arguments.search(my_line).group(1)
# Remove quotes, if any
my_arguments = my_arguments.strip('"')
self._main_job_arguments = my_arguments
elif re_parse_multiplier_factor.search(my_line):
# Found line with multiplier_factor
my_multiplier_factor = re_parse_multiplier_factor.search(my_line).group(
1
)
try:
self._main_job_multiplier_factor = int(my_multiplier_factor)
except ValueError:
logger.warning(
"%s: cannot convert multiplier factor: %s"
% (os.path.basename(submit_file), my_multiplier_factor)
)
self._main_job_multiplier_factor = None
elif re_parse_input.search(my_line):
# Found line with input file
my_input = re_parse_input.search(my_line).group(1)
# Remove quotes, if any
my_input = my_input.strip('"')
self._input_file = os.path.normpath(my_input)
elif re_parse_output.search(my_line):
# Found line with output file
my_output = re_parse_output.search(my_line).group(1)
# Remove quotes, if any
my_output = my_output.strip('"')
self._output_file = os.path.normpath(my_output)
elif re_parse_error.search(my_line):
# Found line with error file
my_error = re_parse_error.search(my_line).group(1)
# Remove quotes, if any
my_error = my_error.strip('"')
self._error_file = os.path.normpath(my_error)
elif parse_environment and re_parse_environment.search(my_line):
self._job_dagman_out = self.extract_dagman_out_from_condor_env(my_line)
if self._job_dagman_out is None:
logger.error(
"Unable to parse dagman out file from environment key %s in submit file for job %s"
% (my_line, self._exec_job_id)
)
elif re_parse_job_class.search(my_line):
self._job_type = re_parse_job_class.search(my_line).group(1)
SUB.close()
# All done!
return my_result, my_site
def extract_dagman_out_from_condor_env(self, condor_env):
"""
This function extracts the dagman out file from the condor environment
if one is specified
:param condor_env: the environment line from the condor submit file
:return: the dagman out file if detected else None
"""
# Found line with environment
env_value = re_parse_environment.search(condor_env).group(1)
# strip any enclosing quotes if any
stripped_env_value = re.sub(r'^"|"$', "", env_value)
if len(env_value) == len(stripped_env_value):
# we have old style condor environment with environment NOT ENCLOSED in double quotes
# and split by ;
sub_props = stripped_env_value.split(";")
else:
# we have new style condor environment with environment enclosed in double quotes
# and split by whitespace
sub_props = stripped_env_value.split(" ")
dagman_out = None
for sub_prop_line in sub_props:
sub_prop_line = sub_prop_line.strip() # Remove any spaces
if len(sub_prop_line) == 0:
continue
sub_prop = re_parse_property.search(sub_prop_line)
if sub_prop:
if sub_prop.group(1) == "_CONDOR_DAGMAN_LOG":
dagman_out = sub_prop.group(2)
break
return dagman_out
def extract_job_info(self, kickstart_output):
"""
This function reads the output from the kickstart parser and
extracts the job information for the Stampede schema. It first
looks for an invocation record, and then for a clustered
record.
Returns None if an error occurs, True if an invocation record
was found, and False if it wasn't.
"""
# Check if we have anything
if len(kickstart_output) == 0:
return None
# Kickstart was parsed
self._kickstart_parsed = True
# PM-1157 we construct run dir from job submit dir
self._job_submit_dir
# Let's try to find an invocation record...
my_invocation_found = False
my_task_number = 0
self._stdout_text = "" # Initialize stdout
stdout_text_list = []
stdout_size = 0
for my_record in kickstart_output:
if "multipart" in my_record:
# PM-1390 convert to integrity metrics
logger.debug("Multipart record %s", my_record)
self._add_multipart_events([my_record])
elif not "invocation" in my_record:
# Not this one... skip to the next
logger.trace("Skipping %s", my_record)
continue
# Ok, we have an invocation record, extract the information we
# need. Note that this may overwrite information obtained from
# the submit file (e.g. the site_name).
# Increment task_number
my_task_number = my_task_number + 1
if not my_invocation_found:
# Things we only need to do once
if "resource" in my_record:
self._site_name = my_record["resource"]
if "user" in my_record:
self._remote_user = my_record["user"]
if "cwd" in my_record:
self._remote_working_dir = my_record["cwd"]
# We are done with this part
my_invocation_found = True
# PM-1488 for containers case we have to remap hostname for all invocation
# records in a clustered job, not just the first one. otherwise the sqlite db
# will have dangling host records in the host table that has docker container generated
# names but no jobs associated with them.
if "hostname" in my_record:
ks_hostname = my_record["hostname"]
if self._host_id is None:
# PM-1488 only set the hostname to kickstart reported one only if
# it is not determined already (PegasusLite case) by parsing the job err file
self._host_id = ks_hostname
elif self._host_id != ks_hostname:
ks_hostaddr = my_record["hostaddr"]
if self._host_ip is not None:
# for 4.9 backward compatibilty where PegasusLite does not record IP
# we keep the kickstart reported ip address to allow for database
# population as host table requires an ip to be not null
my_record["hostaddr"] = self._host_ip
my_record["hostname"] = self._host_id
logger.trace(
"For job %s preferring %s %s over kickstart reported hostname %s %s"
% (
self._exec_job_id,
my_record["hostname"],
my_record["hostaddr"],
ks_hostname,
ks_hostaddr,
)
)
# PM-1109 encode signal information if it exists
signal_message = " "
if "signalled" in my_record:
# construct our own error message
attrs = my_record["signalled"]
signal_message = "Job was "
if "action" in attrs:
signal_message += attrs["action"]
if "signal" in attrs:
signal_message += " with signal " + attrs["signal"]
# PM-641 optimization Modified string concatenation to a list join
if "stdout" in my_record:
task_output = self.split_task_output(my_record["stdout"])
self._add_additional_monitoring_events(task_output.events)
# PM-1152 we always attempt to store upto MAX_OUTPUT_LENGTH
stdout = self.get_snippet_to_populate(
task_output.user_data, my_task_number, stdout_size, "stdout"
)
if stdout is not None:
try:
stdout_text_list.append(
utils.quote("#@ %d stdout\n" % (my_task_number))
)
stdout_text_list.append(utils.quote(stdout))
stdout_text_list.append(utils.quote("\n"))
stdout_size += len(stdout) + 20
except KeyError:
logger.exception(
"Unable to parse stdout section from kickstart record for task %s from file %s "
% (my_task_number, self.get_rotated_out_filename())
)
if "stderr" in my_record:
task_error = self.split_task_output(my_record["stderr"])
# add the events to those retrieved from the application stderr
self._add_additional_monitoring_events(task_error.events)
# Note: we are populating task stderr from kickstart record to job stdout only
stderr = self.get_snippet_to_populate(
signal_message + task_error.user_data,
my_task_number,
stdout_size,
"stderr",
)
if stderr is not None:
try:
stdout_text_list.append(
utils.quote("#@ %d stderr\n" % (my_task_number))
)
stdout_text_list.append(utils.quote(stderr))
stdout_text_list.append(utils.quote("\n"))
stdout_size += len(stderr) + 20
except KeyError:
logger.exception(
"Unable to parse stderr section from kickstart record for task %s from file %s "
% (my_task_number, self.get_rotated_out_filename())
)
# PM-1398 pass cpu info
if "cpu" in my_record:
self._cpu_attribs = my_record["cpu"]
if len(stdout_text_list) > 0:
self._stdout_text = "".join(stdout_text_list)
# PM-641 optimization merged encoding above
# Now, we encode it!
# if self._stdout_text != "":
# self._stdout_text = utils.quote(self._stdout_text)
if not my_invocation_found:
logger.debug("cannot find invocation record in output")
# Look for clustered record...
my_cluster_found = False
for my_record in kickstart_output:
if not "clustered" in my_record:
# Not this one... skip to the next
continue
# Ok found it, fill in cluster parameters
if "duration" in my_record:
self._cluster_duration = my_record["duration"]
if "start" in my_record:
# Convert timestamp to EPOCH
my_start = utils.epochdate(my_record["start"])
if my_start is not None:
self._cluster_start_time = my_start
# No need to look further...
my_cluster_found = True
break
if not my_cluster_found:
logger.debug("cannot find cluster record in output")
# Done populating Job class with information from the output file
return my_invocation_found
def get_job_state(self):
"""
Returns the current job state
:return:
"""
return self._job_state
def get_rotated_out_filename(self):
"""
Returns the name of the rotated .out file for the job on the basis
of the current counter
"""
basename = self._output_file
if self._has_rotated_stdout_err_files:
basename += ".%03d" % (self._job_output_counter)
return basename
def get_rotated_err_filename(self):
"""
Returns the name of the rotated .err file for the job on the basis
of the current counter
"""
basename = self._exec_job_id + ".err"
if self._has_rotated_stdout_err_files:
basename += ".%03d" % (self._job_output_counter)
return basename
def read_job_error_file(self, store_monitoring_events=True):
"""
Reads the job error file and updates job structures to store the
the stderr of the condor job and also attempts to parse the hostname
from the stderr of the job
:param store_monitoring_events: whether to store any parsed monitoring events in the job
:return:
"""
my_max_encoded_length = MAX_OUTPUT_LENGTH - 2000
if self._error_file is None:
# This is the case for SUBDAG jobs
self._stderr_text = None
return
# Finally, read error file only
run_dir = self._job_submit_dir
basename = self.get_rotated_err_filename()
my_err_file = os.path.join(run_dir, basename)
try:
ERR = open(my_err_file)
# PM-1274 parse any monitoring events such as integrity related
# from PegasusLite .err file
job_stderr = self.split_task_output(ERR.read())
buf = job_stderr.user_data
if len(buf) > my_max_encoded_length:
buf = buf[:my_max_encoded_length]
self._stderr_text = utils.quote(buf)
if store_monitoring_events:
self._add_additional_monitoring_events(job_stderr.events)
# PM-1355 attempt to determine the hostname from the pegasus lite job
hostname_match = re_parse_pegasuslite_hostname.search(job_stderr.user_data)
if hostname_match:
# a match yes it is a PegasusLite job . gleam the hostname
self._host_id = hostname_match.group(1)
self._host_ip = hostname_match.group(2)
except OSError:
self._stderr_text = None
if not self.is_noop_job():
logger.warning(
"unable to read error file: %s, continuing..." % (my_err_file)
)
else:
ERR.close()
def read_job_out_file(self, out_file=None, store_monitoring_events=True):
"""
This function reads both stdout and stderr files and populates
these fields in the Job class.
"""
my_max_encoded_length = MAX_OUTPUT_LENGTH - 2000
if self._output_file is None:
# This is the case for SUBDAG jobs
self._stdout_text = None
if out_file is None:
# PM-1297 only construct relative path if out_file is not explicitly specified
run_dir = self._job_submit_dir
# PM-1157 output file has absolute path from submit file
# interferes with replay mode on another directory
# basename = self._output_file
basename = self._exec_job_id + ".out"
if self._has_rotated_stdout_err_files:
basename += ".%03d" % (self._job_output_counter)
out_file = os.path.join(run_dir, basename)
try:
OUT = open(out_file)
job_stdout = self.split_task_output(OUT.read())
buf = job_stdout.user_data
if len(buf) > my_max_encoded_length:
buf = buf[:my_max_encoded_length]
self._stdout_text = utils.quote("#@ 1 stdout\n" + buf)
if store_monitoring_events:
self._add_additional_monitoring_events(job_stdout.events)
except OSError:
self._stdout_text = None
if not self.is_noop_job():
logger.warning(
"unable to read output file: %s, continuing..." % (out_file)
)
else:
OUT.close()
def is_noop_job(self):
"""
A convenience method to indicate whether a job is a NOOP job or not
:return: True if a noop job else False
"""
if self._exec_job_id is not None and self._exec_job_id.startswith(
NOOP_JOB_PREFIX
):
return True
return False
def get_snippet_to_populate(
self, task_output, task_number, current_buffer_size, type
):
"""
:param task_output:
:param task number: the task number
:param current_buffer_size: the current size of the buffer that is storing job stdout for all tasks
:param type: whether stdout or stderr for logging
:return:
"""
# PM-1152 we always attempt to store upto MAX_OUTPUT_LENGTH
# 20 is the rough estimate of number of extra characters added by URL encoding
remaining = MAX_OUTPUT_LENGTH - current_buffer_size - 20
task_output_size = len(task_output)
stdout = None
if task_output_size <= remaining:
# we can store the whole stdout
stdout = task_output
else:
logger.debug(
"Only grabbing %s of %s for task %s from file %s "
% (remaining, type, task_number, self.get_rotated_out_filename())
)
if remaining > 0:
# we store only the first remaining chars
stdout = task_output[: -(task_output_size - remaining)]
return stdout
def split_task_output(self, task_output):
"""
Splits task output in to user app data and monitoring events for pegasus use
:param task_output:
:param type: whether stdout or stderr for logging
:return:
"""
events = []
start = 0
end = 0
# print task_output
start = task_output.find(MONITORING_EVENT_START_MARKER, start)
if start == -1:
# no monitoring marker found
return TaskOutput(task_output, events)
task_data = StringIO()
try:
while start != -1:
task_data.write(task_output[end:start])
end = task_output.find(MONITORING_EVENT_END_MARKER, start)
payload = task_output[start + len(MONITORING_EVENT_START_MARKER) : end]
try:
events.append(json.loads(payload))
except Exception:
logger.error("Unable to convert payload %s to JSON" % payload)
start = task_output.find(MONITORING_EVENT_START_MARKER, end)
task_data.write(task_output[end + len(MONITORING_EVENT_END_MARKER) :])
except Exception as e:
logger.error(
"Unable to parse monitoring events from job stdout for job %s"
% self._exec_job_id
)
logger.exception(e)
# return the whole task output as is
return TaskOutput(task_data.getvalue(), events)
return TaskOutput(task_data.getvalue(), events)
def create_composite_job_event(self, job_inst_kwargs):
"""
this creates a composite job event that also includes all information included in a job_inst.end event
:param my_job:
:param job_inst_kwargs:
:return:
"""
kwargs = {}
# add on events associated to populate the job_instance table
kwargs.update(job_inst_kwargs)
# count any integrity errors
error_count = 0
for metric in self._integrity_metrics:
error_count += metric.failed
kwargs["int_error_count"] = error_count
if self._host_id:
kwargs["hostname"] = self._host_id
job_type = self._get_jobtype_desc()
kwargs["jobtype"] = job_type
if self._cpu_attribs:
for key in self._cpu_attribs:
kwargs[key] = self._cpu_attribs[key]
# PM-1398 for DIBBS we want task monitoring event that has metadata
# to be included in the composite event also
if self._additional_monitoring_events:
for event in self._additional_monitoring_events:
event_name = (
event["monitoring_event"]
if "monitoring_event" in event
else "monitoring.additional"
)
if event_name == "metadata":
# flatten out metadata key values into event
"""
# sample event we want to be able to parse
{
"ts": 1437688574,
"monitoring_event": "metadata",
"payload": [
{
"name": "num_template_banks",
"value" : 3
},
{
"name": "event_name",
"value" : "binary start merger"
}
]
}
"""
payload = event["payload"] if "payload" in event else None
if payload is None:
logger.error("No payload retrieved from event %s" % event)
for m in event["payload"]:
if "name" in m and "value" in m:
kwargs["metadata__" + m["name"]] = m["value"]
else:
logger.error(
"Additional monitoring event of type metadata can only have name value pairs in payload %s"
% event
)
# sanity check
if job_type == "unknown" or job_type == "unassigned":
logger.warning(
"Job {} has unknown type {}".format(self._exec_job_id, job_type)
)
# if error_count > 0:
# print kwargs
for event in self._multipart_events:
# if len(event) != 1:
# # should be a dictionary with exactly one key and value
# logger.error("Malformed multipart event %s for job %s" %(event,self._exec_job_id))
# continue
for key in event.keys():
if key == "multipart":
continue
kwargs[key] = event[key]
return kwargs
| apache-2.0 | 6,193,976,388,861,813,000 | 39.086781 | 123 | 0.53801 | false |
mwallraf/pingthis.net | app/public/decorators.py | 1 | 1294 | from flask import current_app, request, Response
from functools import wraps
"""
Decorator to validate the streaming endpoints, see if a cookie exists.
This is not really secure and should be changed by a real authentication method.
"""
def validate_cookie(f):
cookie = "pingthis"
@wraps(f)
def decorated(*args, **kwargs):
current_app.logger.debug("validate cookie '{}'".format(cookie))
if not request.cookies.get(cookie):
return Response(), 403
return f(*args, **kwargs)
return decorated
"""
Decorator to validate the streaming endpoints to provide basic security:
Parameters:
ipv4=<ip> => validate if ipv4 ip address is provided
tcpport=<port> => validate if TCP port is provided
cookie=<cookie name> => validate if cookie is present
"""
def validate_stream(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'ipaddress' in kwargs:
current_app.logger.debug("Validate IPv4 ip address")
if not kwargs['ipaddress']:
return Response(), 400
if 'port' in kwargs:
current_app.logger.debug("Validate TCP port")
if not kwargs['port']:
return Response(), 400
return f(*args, **kwargs)
return decorated
| mit | 5,942,527,575,446,030,000 | 29.809524 | 80 | 0.633694 | false |
fujunwei/chromium-crosswalk | tools/profile_chrome/main.py | 4 | 10498 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import sys
import webbrowser
from profile_chrome import chrome_controller
from profile_chrome import flags
from profile_chrome import perf_controller
from profile_chrome import profiler
from profile_chrome import systrace_controller
from profile_chrome import ui
from pylib import android_commands
from pylib.device import device_utils
_DEFAULT_CHROME_CATEGORIES = '_DEFAULT_CHROME_CATEGORIES'
def _ComputeChromeCategories(options):
categories = []
if options.trace_frame_viewer:
categories.append('disabled-by-default-cc.debug')
if options.trace_ubercompositor:
categories.append('disabled-by-default-cc.debug*')
if options.trace_gpu:
categories.append('disabled-by-default-gpu.debug*')
if options.trace_flow:
categories.append('disabled-by-default-toplevel.flow')
if options.trace_memory:
categories.append('disabled-by-default-memory')
if options.trace_scheduler:
categories.append('disabled-by-default-blink.scheduler')
categories.append('disabled-by-default-cc.debug.scheduler')
categories.append('disabled-by-default-renderer.scheduler')
if options.chrome_categories:
categories += options.chrome_categories.split(',')
return categories
def _ComputeSystraceCategories(options):
if not options.systrace_categories:
return []
return options.systrace_categories.split(',')
def _ComputePerfCategories(options):
if not perf_controller.PerfProfilerController.IsSupported():
return []
if not options.perf_categories:
return []
return options.perf_categories.split(',')
def _OptionalValueCallback(default_value):
def callback(option, _, __, parser):
value = default_value
if parser.rargs and not parser.rargs[0].startswith('-'):
value = parser.rargs.pop(0)
setattr(parser.values, option.dest, value)
return callback
def _CreateOptionParser():
parser = optparse.OptionParser(description='Record about://tracing profiles '
'from Android browsers. See http://dev.'
'chromium.org/developers/how-tos/trace-event-'
'profiling-tool for detailed instructions for '
'profiling.')
timed_options = optparse.OptionGroup(parser, 'Timed tracing')
timed_options.add_option('-t', '--time', help='Profile for N seconds and '
'download the resulting trace.', metavar='N',
type='float')
parser.add_option_group(timed_options)
cont_options = optparse.OptionGroup(parser, 'Continuous tracing')
cont_options.add_option('--continuous', help='Profile continuously until '
'stopped.', action='store_true')
cont_options.add_option('--ring-buffer', help='Use the trace buffer as a '
'ring buffer and save its contents when stopping '
'instead of appending events into one long trace.',
action='store_true')
parser.add_option_group(cont_options)
chrome_opts = optparse.OptionGroup(parser, 'Chrome tracing options')
chrome_opts.add_option('-c', '--categories', help='Select Chrome tracing '
'categories with comma-delimited wildcards, '
'e.g., "*", "cat1*,-cat1a". Omit this option to trace '
'Chrome\'s default categories. Chrome tracing can be '
'disabled with "--categories=\'\'". Use "list" to '
'see the available categories.',
metavar='CHROME_CATEGORIES', dest='chrome_categories',
default=_DEFAULT_CHROME_CATEGORIES)
chrome_opts.add_option('--trace-cc',
help='Deprecated, use --trace-frame-viewer.',
action='store_true')
chrome_opts.add_option('--trace-frame-viewer',
help='Enable enough trace categories for '
'compositor frame viewing.', action='store_true')
chrome_opts.add_option('--trace-ubercompositor',
help='Enable enough trace categories for '
'ubercompositor frame data.', action='store_true')
chrome_opts.add_option('--trace-gpu', help='Enable extra trace categories '
'for GPU data.', action='store_true')
chrome_opts.add_option('--trace-flow', help='Enable extra trace categories '
'for IPC message flows.', action='store_true')
chrome_opts.add_option('--trace-memory', help='Enable extra trace categories '
'for memory profile. (tcmalloc required)',
action='store_true')
chrome_opts.add_option('--trace-scheduler', help='Enable extra trace '
'categories for scheduler state',
action='store_true')
parser.add_option_group(chrome_opts)
parser.add_option_group(flags.SystraceOptions(parser))
if perf_controller.PerfProfilerController.IsSupported():
perf_opts = optparse.OptionGroup(parser, 'Perf profiling options')
perf_opts.add_option('-p', '--perf', help='Capture a perf profile with '
'the chosen comma-delimited event categories. '
'Samples CPU cycles by default. Use "list" to see '
'the available sample types.', action='callback',
default='', callback=_OptionalValueCallback('cycles'),
metavar='PERF_CATEGORIES', dest='perf_categories')
parser.add_option_group(perf_opts)
parser.add_option_group(flags.OutputOptions(parser))
browsers = sorted(profiler.GetSupportedBrowsers().keys())
parser.add_option('-b', '--browser', help='Select among installed browsers. '
'One of ' + ', '.join(browsers) + ', "stable" is used by '
'default.', type='choice', choices=browsers,
default='stable')
parser.add_option('-v', '--verbose', help='Verbose logging.',
action='store_true')
parser.add_option('-z', '--compress', help='Compress the resulting trace '
'with gzip. ', action='store_true')
parser.add_option('-d', '--device', help='The Android device ID to use.'
'If not specified, only 0 or 1 connected devices are '
'supported.', default=None)
return parser
def main():
parser = _CreateOptionParser()
options, _args = parser.parse_args()
if options.trace_cc:
parser.parse_error("""--trace-cc is deprecated.
For basic jank busting uses, use --trace-frame-viewer
For detailed study of ubercompositor, pass --trace-ubercompositor.
When in doubt, just try out --trace-frame-viewer.
""")
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
devices = android_commands.GetAttachedDevices()
device = None
if options.device in devices:
device = options.device
elif not options.device and len(devices) == 1:
device = devices[0]
if not device:
parser.error('Use -d/--device to select a device:\n' + '\n'.join(devices))
device = device_utils.DeviceUtils(device)
package_info = profiler.GetSupportedBrowsers()[options.browser]
if options.chrome_categories in ['list', 'help']:
ui.PrintMessage('Collecting record categories list...', eol='')
record_categories = []
disabled_by_default_categories = []
record_categories, disabled_by_default_categories = \
chrome_controller.ChromeTracingController.GetCategories(
device, package_info)
ui.PrintMessage('done')
ui.PrintMessage('Record Categories:')
ui.PrintMessage('\n'.join('\t%s' % item \
for item in sorted(record_categories)))
ui.PrintMessage('\nDisabled by Default Categories:')
ui.PrintMessage('\n'.join('\t%s' % item \
for item in sorted(disabled_by_default_categories)))
return 0
if options.systrace_categories in ['list', 'help']:
ui.PrintMessage('\n'.join(
systrace_controller.SystraceController.GetCategories(device)))
return 0
if (perf_controller.PerfProfilerController.IsSupported() and
options.perf_categories in ['list', 'help']):
ui.PrintMessage('\n'.join(
perf_controller.PerfProfilerController.GetCategories(device)))
return 0
if not options.time and not options.continuous:
ui.PrintMessage('Time interval or continuous tracing should be specified.')
return 1
chrome_categories = _ComputeChromeCategories(options)
systrace_categories = _ComputeSystraceCategories(options)
perf_categories = _ComputePerfCategories(options)
if chrome_categories and 'webview' in systrace_categories:
logging.warning('Using the "webview" category in systrace together with '
'Chrome tracing results in duplicate trace events.')
enabled_controllers = []
if chrome_categories:
enabled_controllers.append(
chrome_controller.ChromeTracingController(device,
package_info,
chrome_categories,
options.ring_buffer,
options.trace_memory))
if systrace_categories:
enabled_controllers.append(
systrace_controller.SystraceController(device,
systrace_categories,
options.ring_buffer))
if perf_categories:
enabled_controllers.append(
perf_controller.PerfProfilerController(device,
perf_categories))
if not enabled_controllers:
ui.PrintMessage('No trace categories enabled.')
return 1
if options.output:
options.output = os.path.expanduser(options.output)
result = profiler.CaptureProfile(
enabled_controllers,
options.time if not options.continuous else 0,
output=options.output,
compress=options.compress,
write_json=options.json)
if options.view:
if sys.platform == 'darwin':
os.system('/usr/bin/open %s' % os.path.abspath(result))
else:
webbrowser.open(result)
| bsd-3-clause | -5,159,660,991,637,396,000 | 40.168627 | 80 | 0.632882 | false |
srware/upm | examples/python/bma220.py | 9 | 2002 | #!/usr/bin/python
# Author: Jon Trulson <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_bma220 as sensorObj
# Instantiate an BMA220 using default parameters (bus 0, addr 0x0a)
sensor = sensorObj.BMA220()
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
x = sensorObj.new_floatp()
y = sensorObj.new_floatp()
z = sensorObj.new_floatp()
while (1):
sensor.update()
sensor.getAccelerometer(x, y, z)
print "Accelerometer: AX:", sensorObj.floatp_value(x),
print " AY:", sensorObj.floatp_value(y),
print " AZ:", sensorObj.floatp_value(z)
time.sleep(.5)
| mit | 2,388,247,993,003,881,500 | 35.4 | 78 | 0.748252 | false |
alokotosh/mm-master | test/unit/project/project_create_tests.py | 3 | 10406 | import os
import unittest
import shutil
import mock
import test.lib.test_helper as test_helper
import test.lib.mock_helper as mock_helper
from test.lib.test_helper import MavensMateTest
from test.lib.fixtures.retrieve import MockRetrieveResponse
from mm.sfdc_client import MavensMateClient
base_test_directory = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
project_name = 'unit test project'
class ProjectCreateTest(MavensMateTest):
def test_should_notify_user_of_duplicate_project_name_in_workspace(self):
os.mkdir(os.path.join(base_test_directory, 'test_workspace', 'test_should_notify_user_of_duplicate_project_name_in_workspace'))
package = { "ApexClass" : "*" }
mock_helper.mock_login_and_describe()
mm_response = test_helper.create_project(self, 'test_should_notify_user_of_duplicate_project_name_in_workspace', package)
self.assertEquals(False, mm_response['success'])
self.assertTrue('A project with this name already exists in your workspace' in mm_response['body'])
shutil.rmtree(os.path.join(base_test_directory,"test_workspace",'test_should_notify_user_of_duplicate_project_name_in_workspace'))
def test_should_prompt_for_username(self):
stdin = {
"project_name" : 'foo'
}
mm_response = self.runCommand('new_project', stdin)
self.assertEquals(False, mm_response['success'])
self.assertTrue('Please specify a username' in mm_response['body'])
def test_should_prompt_for_password(self):
stdin = {
"project_name" : 'foo',
"username" : 'foo'
}
mm_response = self.runCommand('new_project', stdin)
self.assertEquals(False, mm_response['success'])
self.assertTrue('Please specify a password' in mm_response['body'])
def test_should_prompt_for_org_type(self):
stdin = {
"project_name" : project_name,
"username" : 'foo',
"password" : 'foo'
}
mm_response = self.runCommand('new_project', stdin)
self.assertEquals(False, mm_response['success'])
self.assertTrue('Please specify org_type' in mm_response['body'])
def test_should_except_for_empty_package_dict(self):
package = {}
mock_helper.mock_login_and_describe()
mm_response = test_helper.create_project(self, "test_should_except_for_empty_package_dict", package=package)
self.assertTrue(mm_response['success'] == False)
self.assertTrue(mm_response['body'] == 'Invalid package')
def test_should_retrieve_empty_project(self):
package = os.path.join(os.path.join(base_test_directory, 'unit', 'project', 'empty_package.xml'))
mock_helper.mock_login_and_describe()
mm_response = test_helper.create_project(self, "test_should_retrieve_empty_project", package=package)
self.assertTrue(mm_response['success'] == True)
def test_should_create_project_with_default_metadata(self):
stdin = {
"project_name" : project_name,
"username" : '[email protected]',
"password" : 'force',
"org_type" : 'developer'
}
mock_helper.mock_login_and_describe()
# mock retrieve call
MavensMateClient.retrieve = mock.Mock(return_value=MockRetrieveResponse())
MavensMateClient.get_metadata_container_id = mock.Mock(return_value='12345')
mm_response = self.runCommand('new_project', stdin)
self.assertEquals(True, mm_response['success'])
self.assertEquals('Project Retrieved and Created Successfully', mm_response['body'])
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name)))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'src')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'src', 'components')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.settings')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.session')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.describe')))
shutil.rmtree(os.path.join(base_test_directory,"test_workspace",project_name))
def test_should_create_new_project(self):
package = {
"ApexClass" : "*",
"ApexPage" : "*",
}
mock_helper.mock_login_and_describe()
# mock retrieve call
MavensMateClient.retrieve = mock.Mock(return_value=MockRetrieveResponse())
MavensMateClient.get_metadata_container_id = mock.Mock(return_value='12345')
mm_response = test_helper.create_project(self, project_name, package)
self.assertEquals(True, mm_response['success'])
self.assertEquals('Project Retrieved and Created Successfully', mm_response['body'])
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name)))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'src')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'src', 'components')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.settings')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.session')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.describe')))
shutil.rmtree(os.path.join(base_test_directory,"test_workspace",project_name))
def test_should_create_new_project_from_existing_directory(self):
package = {
"ApexClass" : "*",
"ApexPage" : "*",
}
mock_helper.mock_login_and_describe()
# mock retrieve call
MavensMateClient.retrieve = mock.Mock(return_value=MockRetrieveResponse())
MavensMateClient.get_metadata_container_id = mock.Mock(return_value='12345')
mm_response = test_helper.create_project(self, project_name, package)
self.assertEquals(True, mm_response['success'])
self.assertEquals('Project Retrieved and Created Successfully', mm_response['body'])
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name)))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'src')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'src', 'components')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.settings')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.session')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', project_name, 'config', '.describe')))
shutil.rmtree(os.path.join(base_test_directory,"test_workspace",project_name))
def test_should_create_project_from_existing_directory(self):
if os.path.exists(os.path.join(base_test_directory,"unit","project","existing-project-copy")):
shutil.rmtree(os.path.join(base_test_directory,"unit","project","existing-project-copy"))
if not os.path.exists(os.path.join(base_test_directory, 'unit', 'project', 'existing-project-copy')):
shutil.copytree(os.path.join(base_test_directory, 'unit', 'project', 'existing-project'), os.path.join(base_test_directory, 'unit', 'project', 'existing-project-copy'))
stdin = {
"project_name" : "existing-project-copy",
"username" : test_helper.get_creds()['username'],
"password" : test_helper.get_creds()['password'],
"org_type" : test_helper.get_creds()['org_type'],
"directory" : os.path.join(base_test_directory, 'unit', 'project', 'existing-project-copy'),
"workspace" : os.path.join(base_test_directory, 'test_workspace'),
"action" : "existing"
}
mock_helper.mock_login_and_describe()
MavensMateClient.retrieve = mock.Mock(return_value=MockRetrieveResponse())
MavensMateClient.get_metadata_container_id = mock.Mock(return_value='12345')
pn = 'existing-project-copy'
mm_response = self.runCommand('new_project_from_existing_directory', stdin)
self.assertTrue(mm_response['success'] == True)
self.assertTrue(mm_response['body'] == 'Project Created Successfully')
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', pn)))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', pn, 'src')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', pn, 'src', 'classes')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', pn, 'src', 'objects')))
self.assertTrue(os.path.exists(os.path.join(base_test_directory, 'test_workspace', pn, 'config')))
self.assertTrue(os.path.isfile(os.path.join(base_test_directory, 'test_workspace', pn, 'config', '.session')))
self.assertTrue(os.path.isfile(os.path.join(base_test_directory, 'test_workspace', pn, 'config', '.settings')))
self.assertTrue(os.path.isfile(os.path.join(base_test_directory, 'test_workspace', pn, 'src', 'package.xml')))
shutil.rmtree(os.path.join(base_test_directory,"test_workspace","existing-project-copy"))
@classmethod
def tearDownClass(self):
if os.path.exists(os.path.join(base_test_directory,"test_workspace",project_name)):
shutil.rmtree(os.path.join(base_test_directory,"test_workspace",project_name))
if __name__ == '__main__':
if os.path.exists(os.path.join(base_test_directory,"test_workspace",project_name)):
shutil.rmtree(os.path.join(base_test_directory,"test_workspace",project_name))
unittest.main() | gpl-2.0 | 4,593,049,106,448,929,300 | 54.951613 | 180 | 0.665385 | false |
ChimeraCoder/lobbyistsfromlastnight | server.py | 1 | 15761 | #! /usr/bin/env python
from __future__ import division, print_function
from flask import Flask
from flask import request, redirect, url_for
from flask import Response
from flask import render_template
from flask import flash
from flask import jsonify
from flask.ext.login import LoginManager, login_required, UserMixin, current_user
from functools import wraps
import os
import sunlight
import json
import time
import urllib2
import csv
import re
import pylibmc
import parsedatetime as pdt
import phonenumbers
app = Flask(__name__)
app.config.from_envvar('APP_SETTINGS')
app.secret_key = app.config["SESSION_SECRET"]
from werkzeug.contrib.cache import MemcachedCache
cache = MemcachedCache(['127.0.0.1:11211'])
from flask.ext.wtf import Form, TextField, PasswordField, validators, BooleanField
###HACK THAT FIXES PYMONGO BUG
#http://stackoverflow.com/questions/10401499/mongokit-importerror-no-module-named-objectid-error
#TODO remove this once the upstream bug is fixed
import sys
import pymongo
import bson.objectid
pymongo.objectid = bson.objectid
sys.modules["pymongo.objectid"] = bson.objectid
pymongo.binary = bson.binary
sys.modules["pymongo.binary"] = bson.binary
#### END HACK THAT WILL BE REMOVED
cache = pylibmc.Client(servers=[app.config['MEMCACHE_SERVERS']], binary=True)
cache = MemcachedCache(cache)
from flaskext.mongoalchemy import MongoAlchemy, BaseQuery
db = MongoAlchemy(app)
MEMCACHED_TIMEOUT = 10 * 60
MEMCACHED_TIMEOUT_SUNLIGHT = 3 * 60 * 60
MAX_SEARCH_RESULTS = 20
ROMNEY_CID = 'N00000286'
OBAMA_CID = 'N00009638'
sunlight.config.API_KEY = app.config['SUNLIGHT_API_KEY']
login_manager = LoginManager()
login_manager.setup_app(app)
from twilio.rest import TwilioRestClient
client = TwilioRestClient()
import logging
from logging import Formatter, FileHandler
file_handler = FileHandler('runlogs.log', mode='a')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
))
app.logger.addHandler(file_handler)
@app.route('/')
def welcome():
return render_template("home.html")
@app.route('/twitter')
def twitter():
return render_template("twitter.html")
@app.route('/about/')
def about():
return render_template("about.html")
@app.route('/contact/')
def contact():
return render_template("contact.html")
#This is called *after* the user's password is verified
#TODO remove the redundant second query and combine it with the first
@login_manager.user_loader
def load_user(userid):
#TODO check passwords!
print("loading user", userid)
#check the memcached cache first
rv = cache.get(userid)
if rv is None:
rv = MongoUser.query.filter(MongoUser.mongo_id == userid).first()
if rv is not None:
cache.set(userid, rv, MEMCACHED_TIMEOUT)
return rv
def load_user_by_username(username):
user_result = MongoUser.query.filter(MongoUser.username == username).first()
if user_result is not None:
cache.set(str(user_result.mongo_id), user_result, MEMCACHED_TIMEOUT)
return user_result
@app.route('/login/', methods = ["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
#login and validate user
login_user(form.user)
flash("Logged in successfully")
return redirect(request.args.get("next") or url_for("legislators_search"))
else:
return render_template("login.html", form=form)
@app.route('/signup/', methods = ["GET" , "POST"])
def signup():
form = RegistrationForm()
if form.validate_on_submit():
print("registering user")
username = request.form['username']
password = request.form['password']
email = request.form['email']
confirm = request.form['confirm']
zipcode = request.form['zipcode']
accept_tos = request.form['accept_tos']
new_user = MongoUser(username = username, password = bcrypt.hashpw(password, bcrypt.gensalt()), email = email, zipcode=zipcode, created_at = int(time.time()))
new_user.save()
return redirect(url_for('welcome'))
else:
return render_template("signup.html", form=form)
@app.route("/logout/")
@login_required
def logout():
logout_user()
return redirect(url_for('welcome'))
@login_manager.unauthorized_handler
def unauthorized():
return render_template("index.html", flash="unauthorized", intro_text="You need to log in to view this page")
@app.context_processor
def inject_user_authenticated():
return dict(user_authenticated = current_user.is_authenticated())
class SMSSubscription(db.Document):
phone_number = db.StringField()
legislator_id = db.StringField() #The same one that is used for the /events/<cid> route
class MongoUser(db.Document, UserMixin):
username = db.StringField()
password = db.StringField()
zipcode = db.StringField()
email = db.StringField()
created_at = db.IntField(required = True)
def get_id(self):
return str(self.mongo_id)
class RegistrationForm(Form):
username = TextField('Username', [validators.Length(min=4, max=25)])
email = TextField('Email Address', [validators.Length(min=6, max=35)])
zipcode = TextField('Zipcode', [validators.Length(min=5, max=35)])
password = PasswordField('New Password', [
validators.Required(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat Password')
accept_tos = BooleanField('I accept the TOS', [validators.Required()])
class LoginForm(Form):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
#TODO check password
user = MongoUser.query.filter(MongoUser.username == self.username.data).first()
if user is None:
return False
else:
#self.username = user.username
entered_password = self.password.data
if bcrypt.hashpw(entered_password, user.password) == user.password:
#User entered the correct password
cache.set(user.get_id(), user, MEMCACHED_TIMEOUT)
self.user = user
return True
else:
return False
@app.route('/events/<cid>/')
@app.route('/events/<cid>/<eid>/')
def events_for_legislator(cid, eid=None):
person = person_by_cid(cid)
events = events_by_cid(cid)
event_count = len(events)
for event in events:
event['suggested_tweets'] = suggested_tweets(person, event)
events = json.dumps(events, default=lambda o: o.__dict__)
title = person['title'] + ' ' + person['lastname'] + ' | Events'
return render_template('events.html', events=events, person=person, event_count=event_count, event_id=eid, title=title, cid=cid)
def events_by_cid(cid):
#check the memcached cache first
cache_key = "events_" + cid
events = cache.get(cache_key)
breakcache = request.args.get("breakcache", None)
if events is None or breakcache is not None:
try:
events = json.loads(urllib2.urlopen("http://politicalpartytime.org/json/" + cid).read())
if events is not None:
cache.set(cache_key, events, MEMCACHED_TIMEOUT_SUNLIGHT)
except urllib2.URLError:
events = []
for e in events:
e['fields']['id'] = e['pk']
events = map(lambda e: e['fields'], events)
# print(events)
# for e in events:
# if e['start_date']:
# e['start_date'] = time.strptime(e['start_date'], "%Y-%m-%d")
# print(e['start_date'])
# print(e['start_date'].tm_year)
# events.sort(key=lambda e: e['start_date'].tm_year)
for e in events:
if e['start_date']:
e['start_date'] = time.strptime(e['start_date'], "%Y-%m-%d")
e['start_date'] = time.strftime("%b %d, %Y", e['start_date'])
# events.reverse()
return events
def parse_tweet(tweet, event, person):
'''Will return None if an error occurs'''
#person will likely be a legislator
if tweet is "":
return None
try:
tweet = tweet.replace("@lawmaker", "@"+person['twitter_id'])
except TypeError:
return None
contribution_regex = re.compile("\$[\d,]+")
if event['contributions_info']:
contribution_matches = contribution_regex.match(event['contributions_info'])
if contribution_matches:
contribution_amount = contribution_matches.group()
tweet.replace("[Contributions Info]", contribution_amount)
tweet = tweet.replace("[venue name]", "venue")
tweet = tweet.replace("[start time]", "start_time")
if event.has_key("end_time"):
tweet = tweet.replace("[end time]", "end_time")
tweet = tweet.replace("[event date]", "start_date")
tweet = tweet + " #lfln"
if "[" in tweet:
return None
return tweet
def suggested_tweets(legislator, event):
#If a null legislator is provided, return an empty list
if legislator is None:
return []
suggested_tweets = []
tweets_csv = csv.reader(open('tweets.tsv', 'rb'), delimiter='\t')
for row in tweets_csv:
keyword = row[0].lower()
if event['entertainment'] == None:
continue
if keyword in event['entertainment'].lower():
for tweet in row[1:]:
suggested_tweets.append(parse_tweet(tweet, event, legislator))
elif keyword == 'obama' and legislator['lastname'] == 'Obama':
for tweet in row[1:]:
suggested_tweets.append(parse_tweet(tweet, event, legislator))
elif keyword == 'romney' and legislator['lastname'] == 'Romney':
for tweet in row[1:]:
suggested_tweets.append(parse_tweet(tweet, event, legislator))
elif keyword == 'general':
for tweet in row[1:]:
suggested_tweets.append(parse_tweet(tweet, event, legislator))
#Filter out all None (null) tweets
suggested_tweets = filter(lambda x: x is not None, suggested_tweets)
return suggested_tweets
def telephone_by_cid(cid):
person = person_by_cid(cid);
return person.get('phone', None)
def person_by_cid(cid):
#check the memcached cache first
cache_key = "person_" + cid
person = cache.get(cache_key)
breakcache = request.args.get("breakcache", None)
if person is None or breakcache is not None:
people = sunlight.congress.legislators(crp_id=cid)
if people and len(people) > 0:
person = people[0]
else:
if cid == OBAMA_CID:
person = {
'title' : 'President',
'firstname' : 'Barack',
'middlename' : 'Hussein',
'lastname' : 'Obama',
'party' : 'D',
'twitter_id' : 'BarackObama',
'phone' : '202-456-1111'
}
elif cid == ROMNEY_CID:
person = {
'title' : 'Governor',
'firstname' : 'Willard',
'middlename' : 'Mitt',
'lastname' : 'Romney',
'party' : 'R',
'twitter_id' : 'MittRomney',
'phone' : '857-288-3500'
}
else:
person = None
if person is not None:
cache.set(cache_key, person, MEMCACHED_TIMEOUT)
return person
@app.route('/legislators/search')
def legislators_search():
zipcode = request.args.get("zipcode", None)
if zipcode:
legislators = load_legislators(zipcode)
title = "Legislators for " + zipcode
return render_template('legislators.html', zipcode=zipcode, legislators=legislators, title=title)
else:
app.logger.warning("Could not load zipcode; retrying. Zipcode: " + str(request.args.get("zipcode", None)))
title = "Legislators"
return render_template('legislators_form.html', title=title)
@app.route('/legislators/')
def legislators():
return redirect(url_for('legislators_search'))
def load_legislators(zipcode):
#check the memcached cache first
cache_key = "zipcode_" + zipcode
legislators = cache.get(cache_key)
breakcache = request.args.get("breakcache", None)
if legislators is None or breakcache is not None:
legislators = sunlight.congress.legislators_for_zip(zipcode=zipcode)
senators = []
representatives = []
for person in legislators:
if person['chamber'] == 'senate':
senators.append(person)
elif person['chamber'] == 'house':
representatives.append(person)
if len(senators) == 0 and len(representatives) > 0:
senators.append({
"district": "Senior Seat",
"title": "Sen",
"in_office": True,
"state": "DC",
"crp_id": "0",
"chamber": "senate",
"party": "I",
"firstname": "Casper",
"middlename": "The Friendly",
"lastname": "Ghost",
"facebook_id": "pages/Casper-the-Friendly-Ghost/92386373162",
"gender": "M",
"twitter_id": "ThFriendlyGhost",
})
senators.append({
"district": "Junior Seat",
"title": "Sen",
"in_office": True,
"state": "DC",
"crp_id": "0",
"chamber": "senate",
"party": "I",
"firstname": "Baratunde",
"middlename": "",
"lastname": "Thurston",
"facebook_id": "baratunde",
"gender": "M",
"twitter_id": "baratunde",
})
legislators = {'Senate' : senators, 'House' : representatives}
if legislators is not None:
cache.set(cache_key, legislators, MEMCACHED_TIMEOUT)
# else:
# print("LEGS FROM CACHE")
return legislators
@app.route('/subscribe_sms', methods=['POST'])
def subscribe_sms():
phone_number = request.form.get("subscribe_number", None)
legislator_id = request.form.get("legislator_id", None)
number = phonenumbers.parse(phone_number, 'US')
number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
app.logger.debug("Sending message to %s" % number)
if phone_number and legislator_id:
# SAVE PHONE NUMBER SOMEWHERES
#TODO make sure the phone number format is correct; Twilio is picky about this.
subscription = SMSSubscription(phone_number = number, legislator_id = legislator_id)
subscription.save()
#Send a confirmation message to that number
call = client.sms.messages.create(to=phone_number,
from_= app.config['TWILIO_OUTGOING'],
body="Thank you for subscribing to LFLN alerts! If you don't want to receive these anymore, reply STOP")
#TODO implement 'reply STOP to stop!
return render_template('subscribe_result.html', success=True)
else:
return render_template('subscribe_result.html', success=False)
def search(search_query, max_results=MAX_SEARCH_RESULTS):
pass
if __name__ == '__main__':
print("port: ", app.config['PORT'])
app.run(host='0.0.0.0', port = app.config['PORT'], debug=app.config['APP_DEBUG'])
| gpl-3.0 | 2,965,796,678,822,659,600 | 33.114719 | 166 | 0.613603 | false |
DXCanas/kolibri | kolibri/core/exams/migrations/0001_initial.py | 6 | 2967 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-16 22:34
from __future__ import unicode_literals
import django.db.models.deletion
import jsonfield.fields
import morango.utils.uuids
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('kolibriauth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Exam',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('title', models.CharField(max_length=200)),
('channel_id', models.CharField(max_length=32)),
('question_count', models.IntegerField()),
('question_sources', jsonfield.fields.JSONField(blank=True, default=[])),
('seed', models.IntegerField(default=1)),
('active', models.BooleanField(default=False)),
('archive', models.BooleanField(default=False)),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exams', to='kolibriauth.Collection')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exams', to='kolibriauth.FacilityUser')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExamAssignment',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('assigned_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assigned_exams', to='kolibriauth.FacilityUser')),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assigned_exams', to='kolibriauth.Collection')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('exam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assignments', to='exams.Exam')),
],
options={
'abstract': False,
},
),
]
| mit | 4,497,115,348,556,410,400 | 50.155172 | 158 | 0.60937 | false |
shaftoe/home-assistant | tests/components/test_plant.py | 2 | 2499 | """Unit tests for platform/plant.py."""
import unittest
from tests.common import get_test_home_assistant
import homeassistant.components.plant as plant
class TestPlant(unittest.TestCase):
"""test the processing of data."""
GOOD_DATA = {
'moisture': 50,
'battery': 90,
'temperature': 23.4,
'conductivity': 777,
'brightness': 987,
}
GOOD_CONFIG = {
'sensors': {
'moisture': 'sensor.mqtt_plant_moisture',
'battery': 'sensor.mqtt_plant_battery',
'temperature': 'sensor.mqtt_plant_temperature',
'conductivity': 'sensor.mqtt_plant_conductivity',
'brightness': 'sensor.mqtt_plant_brightness',
},
'min_moisture': 20,
'max_moisture': 60,
'min_battery': 17,
'min_conductivity': 500,
'min_temperature': 15,
}
class _MockState(object):
def __init__(self, state=None):
self.state = state
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_valid_data(self):
"""Test processing valid data."""
self.sensor = plant.Plant('my plant', self.GOOD_CONFIG)
self.sensor.hass = self.hass
for reading, value in self.GOOD_DATA.items():
self.sensor.state_changed(
self.GOOD_CONFIG['sensors'][reading], None,
TestPlant._MockState(value))
self.assertEqual(self.sensor.state, 'ok')
attrib = self.sensor.state_attributes
for reading, value in self.GOOD_DATA.items():
# battery level has a different name in
# the JSON format than in hass
self.assertEqual(attrib[reading], value)
def test_low_battery(self):
"""Test processing with low battery data and limit set."""
self.sensor = plant.Plant(self.hass, self.GOOD_CONFIG)
self.sensor.hass = self.hass
self.assertEqual(self.sensor.state_attributes['problem'], 'none')
self.sensor.state_changed('sensor.mqtt_plant_battery',
TestPlant._MockState(45),
TestPlant._MockState(10))
self.assertEqual(self.sensor.state, 'problem')
self.assertEqual(self.sensor.state_attributes['problem'],
'battery low')
| apache-2.0 | 8,137,944,999,598,630,000 | 33.232877 | 73 | 0.579832 | false |
devdattakulkarni/test-solum | solum/tests/api/handlers/test_extension.py | 1 | 2858 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.api.handlers import extension_handler as extension
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
@mock.patch('solum.objects.registry')
class TestExtensionHandler(base.BaseTestCase):
def setUp(self):
super(TestExtensionHandler, self).setUp()
self.ctx = utils.dummy_context()
def test_extension_get(self, mock_registry):
mock_registry.Extension.get_by_uuid.return_value = {}
handler = extension.ExtensionHandler(self.ctx)
res = handler.get('test_id')
self.assertIsNotNone(res)
mock_registry.Extension.get_by_uuid.assert_called_once_with(self.ctx,
'test_id')
def test_extension_get_all(self, mock_registry):
mock_registry.ExtensionList.get_all.return_value = {}
handler = extension.ExtensionHandler(self.ctx)
res = handler.get_all()
self.assertIsNotNone(res)
mock_registry.ExtensionList.get_all.assert_called_once_with(self.ctx)
def test_extension_update(self, mock_registry):
data = {'name': 'new_name'}
handler = extension.ExtensionHandler(self.ctx)
handler.update('test_id', data)
mock_registry.Extension.update_and_save.assert_called_once_with(
self.ctx, 'test_id', data)
def test_extension_create(self, mock_registry):
data = {'name': 'new_name',
'uuid': 'input_uuid'}
db_obj = fakes.FakeExtension()
mock_registry.Extension.return_value = db_obj
handler = extension.ExtensionHandler(self.ctx)
res = handler.create(data)
db_obj.update.assert_called_once_with(data)
db_obj.create.assert_called_once_with(self.ctx)
self.assertEqual(db_obj, res)
def test_extension_delete(self, mock_registry):
db_obj = fakes.FakeExtension()
mock_registry.Extension.get_by_uuid.return_value = db_obj
handler = extension.ExtensionHandler(self.ctx)
handler.delete('test_id')
db_obj.destroy.assert_called_once_with(self.ctx)
mock_registry.Extension.get_by_uuid.assert_called_once_with(self.ctx,
'test_id')
| apache-2.0 | 5,688,577,803,210,274,000 | 40.42029 | 78 | 0.654654 | false |
kevin-coder/tensorflow-fork | tensorflow/tools/pip_package/setup.py | 1 | 10540 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow is an open source machine learning framework for everyone.
TensorFlow is an open source software library for high performance numerical
computation. Its flexible architecture allows easy deployment of computation
across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters
of servers to mobile and edge devices.
Originally developed by researchers and engineers from the Google Brain team
within Google's AI organization, it comes with strong support for machine
learning and deep learning and the flexible numerical computation core is used
across many other scientific domains.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
DOCLINES = __doc__.split('\n')
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
# Also update tensorflow/tensorflow.bzl and
# tensorflow/core/public/version.h
_VERSION = '1.13.1'
REQUIRED_PACKAGES = [
'absl-py >= 0.7.0',
'astor >= 0.6.0',
'gast >= 0.2.0',
'google_pasta >= 0.1.2',
'keras_applications >= 1.0.6',
'keras_preprocessing >= 1.0.5',
'numpy >= 1.14.5, < 2.0',
'six >= 1.10.0',
'protobuf >= 3.6.1',
'tensorboard >= 1.13.0, < 1.14.0',
'tensorflow_estimator >= 1.13.0rc0, < 1.14.0rc0',
'termcolor >= 1.1.0',
]
if sys.byteorder == 'little':
# grpcio does not build correctly on big-endian machines due to lack of
# BoringSSL support.
# See https://github.com/tensorflow/tensorflow/issues/17882.
REQUIRED_PACKAGES.append('grpcio >= 1.8.6')
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# tf-nightly should depend on tb-nightly
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
REQUIRED_PACKAGES[i] = 'tb-nightly >= 1.14.0a0, < 1.15.0a0'
elif 'tensorflow_estimator' in pkg and '2.0' in project_name:
REQUIRED_PACKAGES[i] = 'tensorflow-estimator-2.0-preview'
elif 'tensorflow_estimator' in pkg:
REQUIRED_PACKAGES[i] = 'tf-estimator-nightly'
# weakref.finalize and enum were introduced in Python 3.4
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
REQUIRED_PACKAGES.append('enum34 >= 1.1.6')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main',
'toco_from_protos = tensorflow.lite.toco.python.toco_from_protos:main',
'tflite_convert = tensorflow.lite.python.tflite_convert:main',
'toco = tensorflow.lite.python.tflite_convert:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:run_main',
'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main',
]
# pylint: enable=line-too-long
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
# Copy external code headers into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
external_header_locations = [
'tensorflow/include/external/eigen_archive/',
'tensorflow/include/external/com_google_absl/',
]
for location in external_header_locations:
if location in install_dir:
extra_dir = install_dir.replace(location, '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(dirpath, filename)
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
matches = []
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
)
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (
list(find_files('*.h', 'tensorflow/core')) + list(
find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) + list(
find_files('*', 'third_party/eigen3')) + list(
find_files('*.h', 'tensorflow/include/external/com_google_absl')) +
list(find_files('*.inc', 'tensorflow/include/external/com_google_absl')) +
list(find_files('*', 'tensorflow/include/external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/',
download_url='https://github.com/tensorflow/tensorflow/tags',
author='Google Inc.',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',
)
| apache-2.0 | 5,861,947,977,951,402,000 | 34.250836 | 80 | 0.669355 | false |
StackPointCloud/libcloud | libcloud/test/file_fixtures.py | 7 | 3514 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Helper class for loading large fixture data
from __future__ import with_statement
import os
import codecs
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import u
FIXTURES_ROOT = {
'common': 'common/fixtures',
'compute': 'compute/fixtures',
'storage': 'storage/fixtures',
'loadbalancer': 'loadbalancer/fixtures',
'dns': 'dns/fixtures',
'backup': 'backup/fixtures',
'openstack': 'compute/fixtures/openstack',
'container': 'container/fixtures'
}
class FileFixtures(object):
def __init__(self, fixtures_type, sub_dir=''):
script_dir = os.path.abspath(os.path.split(__file__)[0])
self.root = os.path.join(script_dir, FIXTURES_ROOT[fixtures_type],
sub_dir)
def load(self, file):
path = os.path.join(self.root, file)
if os.path.exists(path):
if PY3:
with open(path, 'r', encoding='utf-8') as fh:
content = fh.read()
return u(content)
else:
with codecs.open(path, 'r', 'utf-8') as fh:
content = fh.read()
return content
else:
raise IOError(path)
class ComputeFileFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(ComputeFileFixtures, self).__init__(fixtures_type='compute',
sub_dir=sub_dir)
class StorageFileFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(StorageFileFixtures, self).__init__(fixtures_type='storage',
sub_dir=sub_dir)
class LoadBalancerFileFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(LoadBalancerFileFixtures, self).__init__(
fixtures_type='loadbalancer',
sub_dir=sub_dir)
class DNSFileFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(DNSFileFixtures, self).__init__(fixtures_type='dns',
sub_dir=sub_dir)
class OpenStackFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(OpenStackFixtures, self).__init__(fixtures_type='openstack',
sub_dir=sub_dir)
class ContainerFileFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(ContainerFileFixtures, self).__init__(fixtures_type='container',
sub_dir=sub_dir)
class BackupFileFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(BackupFileFixtures, self).__init__(fixtures_type='backup',
sub_dir=sub_dir)
| apache-2.0 | 8,873,299,008,145,987,000 | 34.857143 | 78 | 0.600455 | false |
baylee-d/osf.io | api/requests/permissions.py | 11 | 4589 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import permissions as drf_permissions
from api.base.utils import get_user_auth
from osf.models.action import NodeRequestAction, PreprintRequestAction
from osf.models.mixins import NodeRequestableMixin, PreprintRequestableMixin
from osf.models.node import Node
from osf.models.preprint import Preprint
from osf.utils.workflows import DefaultTriggers
from osf.utils import permissions as osf_permissions
class NodeRequestPermission(drf_permissions.BasePermission):
def has_object_permission(self, request, view, obj):
auth = get_user_auth(request)
if auth.user is None:
return False
target = None
if isinstance(obj, NodeRequestAction):
target = obj.target
node = obj.target.target
trigger = request.data.get('trigger', None)
elif isinstance(obj, NodeRequestableMixin):
target = obj
node = obj.target
# Creating a Request is "submitting"
trigger = request.data.get('trigger', DefaultTriggers.SUBMIT.value if request.method not in drf_permissions.SAFE_METHODS else None)
elif isinstance(obj, Node):
node = obj
trigger = DefaultTriggers.SUBMIT.value if request.method not in drf_permissions.SAFE_METHODS else None
else:
raise ValueError('Not a request-related model: {}'.format(obj))
if not node.access_requests_enabled:
return False
is_requester = target is not None and target.creator == auth.user or trigger == DefaultTriggers.SUBMIT.value
is_node_admin = node.has_permission(auth.user, osf_permissions.ADMIN)
has_view_permission = is_requester or is_node_admin
if request.method in drf_permissions.SAFE_METHODS:
# Requesters and node admins can view actions
return has_view_permission
else:
if not has_view_permission:
return False
if trigger in [DefaultTriggers.ACCEPT.value, DefaultTriggers.REJECT.value]:
# Node admins can only approve or reject requests
return is_node_admin
if trigger in [DefaultTriggers.EDIT_COMMENT.value, DefaultTriggers.SUBMIT.value]:
# Requesters may not be contributors
# Requesters may edit their comment or submit their request
return is_requester and auth.user not in node.contributors
return False
class PreprintRequestPermission(drf_permissions.BasePermission):
def has_object_permission(self, request, view, obj):
auth = get_user_auth(request)
if auth.user is None:
return False
target = None
if isinstance(obj, PreprintRequestAction):
target = obj.target
preprint = obj.target.target
trigger = request.data.get('trigger', None)
elif isinstance(obj, PreprintRequestableMixin):
target = obj
preprint = obj.target
# Creating a Request is "submitting"
trigger = request.data.get('trigger', DefaultTriggers.SUBMIT.value if request.method not in drf_permissions.SAFE_METHODS else None)
elif isinstance(obj, Preprint):
preprint = obj
trigger = DefaultTriggers.SUBMIT.value if request.method not in drf_permissions.SAFE_METHODS else None
else:
raise ValueError('Not a request-related model: {}'.format(obj))
is_requester = target is not None and target.creator == auth.user or trigger == DefaultTriggers.SUBMIT.value
is_preprint_admin = preprint.has_permission(auth.user, osf_permissions.ADMIN)
is_moderator = auth.user.has_perm('withdraw_submissions', preprint.provider)
has_view_permission = is_requester or is_preprint_admin or is_moderator
if request.method in drf_permissions.SAFE_METHODS:
# Requesters, moderators, and preprint admins can view actions
return has_view_permission
else:
if not has_view_permission:
return False
if trigger in [DefaultTriggers.ACCEPT.value, DefaultTriggers.REJECT.value]:
# Only moderators can approve or reject requests
return is_moderator
if trigger in [DefaultTriggers.EDIT_COMMENT.value, DefaultTriggers.SUBMIT.value]:
# Requesters may edit their comment or submit their request
return is_requester
return False
| apache-2.0 | 1,315,313,926,301,481,200 | 44.435644 | 143 | 0.659403 | false |
cyli/volatility | volatility/plugins/linux/netfilter.py | 1 | 3055 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.lsmod as linux_lsmod
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class linux_netfilter(linux_common.AbstractLinuxCommand):
"""Lists Netfilter hooks"""
def calculate(self):
linux_common.set_plugin_members(self)
hook_names = ["PRE_ROUTING", "LOCAL_IN", "FORWARD", "LOCAL_OUT", "POST_ROUTING"]
proto_names = ["", "", "IPV4", "", "", "", "", "", "", "", "" , "", "", ""]
# struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]
# NFPROTO_NUMPROTO = 12
# NF_MAX_HOOKS = 7
nf_hooks_addr = self.addr_space.profile.get_symbol("nf_hooks")
if nf_hooks_addr == None:
debug.error("Unable to analyze NetFilter. It is either disabled or compiled as a module.")
modules = linux_lsmod.linux_lsmod(self._config).get_modules()
list_head_size = self.addr_space.profile.get_obj_size("list_head")
for outer in range(13):
arr = nf_hooks_addr + (outer * (list_head_size * 8))
for inner in range(7):
list_head = obj.Object("list_head", offset = arr + (inner * list_head_size), vm = self.addr_space)
for hook_ops in list_head.list_of_type("nf_hook_ops", "list"):
if self.is_known_address(hook_ops.hook.v(), modules):
hooked = "False"
else:
hooked = "True"
yield proto_names[outer], hook_names[inner], hook_ops.hook.v(), hooked
def unified_output(self, data):
return TreeGrid([("Proto", str),
("Hook", str),
("Handler", Address),
("IsHooked", str)],
self.generator(data))
def generator(self, data):
for outer, inner, hook_addr, hooked in data:
yield (0, [str(outer), str(inner), Address(hook_addr), str(hooked)])
| gpl-2.0 | 8,085,362,096,401,136,000 | 36.256098 | 114 | 0.613421 | false |
vicky2135/lucious | oscar/lib/python2.7/site-packages/phonenumbers/phonemetadata.py | 1 | 34018 | """PhoneMetadata object definitions"""
# Based on original Java code and protocol buffer:
# resources/phonemetadata.proto
# java/src/com/google/i18n/phonenumbers/Phonemetadata.java
# Copyright (C) 2010-2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .util import UnicodeMixin, ImmutableMixin, mutating_method
from .util import u, unicod, rpr, force_unicode
REGION_CODE_FOR_NON_GEO_ENTITY = u("001")
class NumberFormat(UnicodeMixin, ImmutableMixin):
"""Representation of way that a phone number can be formatted for output"""
@mutating_method
def __init__(self,
pattern=None,
format=None,
leading_digits_pattern=None,
national_prefix_formatting_rule=None,
national_prefix_optional_when_formatting=None,
domestic_carrier_code_formatting_rule=None):
# pattern is a regex that is used to match the national (significant)
# number. For example, the pattern "(20)(\d{4})(\d{4})" will match
# number "2070313000", which is the national (significant) number for
# Google London. Note the presence of the parentheses, which are
# capturing groups what specifies the grouping of numbers.
self.pattern = force_unicode(pattern) # Unicode string holding regexp
# format specifies how the national (significant) number matched by
# pattern should be formatted. Using the same example as above, format
# could contain "$1 $2 $3", meaning that the number should be
# formatted as "20 7031 3000". Each $x is replaced by the numbers
# captured by group x in the regex specified by pattern.
self.format = force_unicode(format) # None or Unicode string
# This field is a regex that is used to match a certain number of
# digits at the beginning of the national (significant) number. When
# the match is successful, the accompanying pattern and format should
# be used to format this number. For example, if
# leading_digits="[1-3]|44", then all the national numbers starting
# with 1, 2, 3 or 44 should be formatted using the accompanying
# pattern and format.
#
# The first leading_digits_pattern matches up to the first three digits
# of the national (significant) number; the next one matches the first
# four digits, then the first five and so on, until the
# leading_digits_pattern can uniquely identify one pattern and format
# to be used to format the number.
#
# In the case when only one formatting pattern exists, no
# leading_digits_pattern is needed.
self.leading_digits_pattern = [] # list of Unicode strings holding regexps
if leading_digits_pattern is not None:
self.leading_digits_pattern = [force_unicode(p) for p in leading_digits_pattern]
# This field specifies how the national prefix ($NP) together with the
# first group ($FG) in the national significant number should be
# formatted in the NATIONAL format when a national prefix exists for a
# certain country. For example, when this field contains "($NP$FG)", a
# number from Beijing, China (whose $NP = 0), which would by default
# be formatted without national prefix as 10 1234 5678 in NATIONAL
# format, will instead be formatted as (010) 1234 5678; to format it
# as (0)10 1234 5678, the field would contain "($NP)$FG". Note $FG
# should always be present in this field, but $NP can be omitted. For
# example, having "$FG" could indicate the number should be formatted
# in NATIONAL format without the national prefix. This is commonly
# used to override the rule specified for the territory in the XML
# file.
#
# When this field is missing, a number will be formatted without
# national prefix in NATIONAL format. This field does not affect how a
# number is formatted in other formats, such as INTERNATIONAL.
self.national_prefix_formatting_rule = force_unicode(national_prefix_formatting_rule) # None or Unicode string
# This field specifies whether the $NP can be omitted when formatting
# a number in national format, even though it usually wouldn't be. For
# example, a UK number would be formatted by our library as 020 XXXX
# XXXX. If we have commonly seen this number written by people without
# the leading 0, for example as (20) XXXX XXXX, this field would be
# set to true. This will be inherited from the value set for the
# territory in the XML file, unless a national_prefix_formatting_rule
# is defined specifically for this NumberFormat.
if national_prefix_optional_when_formatting is not None:
self.national_prefix_optional_when_formatting = bool(national_prefix_optional_when_formatting)
else:
self.national_prefix_optional_when_formatting = None
# This field specifies how any carrier code ($CC) together with the
# first group ($FG) in the national significant number should be
# formatted when format_with_carrier_code is called, if carrier codes
# are used for a certain country.
self.domestic_carrier_code_formatting_rule = force_unicode(domestic_carrier_code_formatting_rule) # None or Unicode string
def merge_from(self, other):
"""Merge information from another NumberFormat object into this one."""
if other.pattern is not None:
self.pattern = other.pattern
if other.format is not None:
self.format = other.format
self.leading_digits_pattern.extend(other.leading_digits_pattern)
if other.national_prefix_formatting_rule is not None:
self.national_prefix_formatting_rule = other.national_prefix_formatting_rule
if other.national_prefix_optional_when_formatting is not None:
self.national_prefix_optional_when_formatting = other.national_prefix_optional_when_formatting
if other.domestic_carrier_code_formatting_rule is not None:
self.domestic_carrier_code_formatting_rule = other.domestic_carrier_code_formatting_rule
def __eq__(self, other):
if not isinstance(other, NumberFormat):
return False
return (repr(self) == repr(other))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __unicode__(self):
# Generate a string that is valid Python input for the constructor.
# Note that we use rpr (variant of repr), which generates its own quotes.
result = unicod("NumberFormat(pattern=%s, format=%s") % (rpr(self.pattern), rpr(self.format))
if self.leading_digits_pattern:
result += (unicod(", leading_digits_pattern=[%s]") %
unicod(", ").join([rpr(ld) for ld in self.leading_digits_pattern]))
if self.national_prefix_formatting_rule is not None:
result += unicod(", national_prefix_formatting_rule=%s") % rpr(self.national_prefix_formatting_rule)
if self.national_prefix_optional_when_formatting is not None:
result += unicod(", national_prefix_optional_when_formatting=%s") % str(self.national_prefix_optional_when_formatting)
if self.domestic_carrier_code_formatting_rule is not None:
result += unicod(", domestic_carrier_code_formatting_rule=%s") % rpr(self.domestic_carrier_code_formatting_rule)
result += unicod(")")
return result
class PhoneNumberDesc(UnicodeMixin, ImmutableMixin):
"""Class representing the description of a set of phone numbers."""
@mutating_method
def __init__(self,
national_number_pattern=None,
possible_number_pattern=None,
example_number=None,
possible_length=None,
possible_length_local_only=None):
# The national_number_pattern is the pattern that a valid national
# significant number would match. This specifies information such as
# its total length and leading digits.
self.national_number_pattern = force_unicode(national_number_pattern) # None or Unicode string holding regexp
# The possible_number_pattern represents what a potentially valid
# phone number for this region may be written as. This is a superset
# of the national_number_pattern above and includes numbers that have
# the area code omitted. Typically the only restrictions here are in
# the number of digits. This could be used to highlight tokens in a
# text that may be a phone number, or to quickly prune numbers that
# could not possibly be a phone number for this locale.
self.possible_number_pattern = force_unicode(possible_number_pattern) # None or Unicode string holding regexp
# An example national significant number for the specific type. It
# should not contain any formatting information.
self.example_number = force_unicode(example_number) # None or Unicode string
# These represent the lengths a phone number from this region can be. They
# will be sorted from smallest to biggest. Note that these lengths are for
# the full number, without country calling code or national prefix. For
# example, for the Swiss number +41789270000, in local format 0789270000,
# this would be 9.
# This could be used to highlight tokens in a text that may be a phone
# number, or to quickly prune numbers that could not possibly be a phone
# number for this locale.
if possible_length is None:
possible_length = ()
self.possible_length = possible_length # sequence of int
# These represent the lengths that only local phone numbers (without an area
# code) from this region can be. They will be sorted from smallest to
# biggest. For example, since the American number 456-1234 may be locally
# diallable, although not diallable from outside the area, 7 could be a
# possible value.
# This could be used to highlight tokens in a text that may be a phone
# number.
# To our knowledge, area codes are usually only relevant for some fixed-line
# and mobile numbers, so this field should only be set for those types of
# numbers (and the general description) - however there are exceptions for
# NANPA countries.
if possible_length_local_only is None:
possible_length_local_only = ()
self.possible_length_local_only = possible_length_local_only # sequence of int
def merge_from(self, other):
"""Merge information from another PhoneNumberDesc object into this one."""
if other.national_number_pattern is not None:
self.national_number_pattern = other.national_number_pattern
if other.possible_number_pattern is not None:
self.possible_number_pattern = other.possible_number_pattern
if other.example_number is not None:
self.example_number = other.example_number
def __eq__(self, other):
if not isinstance(other, PhoneNumberDesc):
return False
return (repr(self) == repr(other))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __unicode__(self):
# Generate a string that is valid Python input for constructor
result = unicod("PhoneNumberDesc(")
sep = unicod("")
if self.national_number_pattern is not None:
result += unicod("%snational_number_pattern=%s") % (sep, rpr(self.national_number_pattern))
sep = unicod(", ")
if self.possible_number_pattern is not None:
result += unicod("%spossible_number_pattern=%s") % (sep, rpr(self.possible_number_pattern))
sep = unicod(", ")
if self.example_number is not None:
result += unicod("%sexample_number=%s") % (sep, rpr(self.example_number))
sep = unicod(", ")
if self.possible_length:
result += unicod("%spossible_length=%s") % (sep, tuple(self.possible_length))
sep = unicod(", ")
if self.possible_length_local_only:
result += unicod("%spossible_length_local_only=%s") % (sep, tuple(self.possible_length_local_only))
sep = unicod(", ")
result += unicod(")")
return result
class PhoneMetadata(UnicodeMixin, ImmutableMixin):
"""Class representing metadata for international telephone numbers for a region.
This class is hand created based on phonemetadata.proto. Please refer to that file
for detailed descriptions of the meaning of each field.
WARNING: This API isn't stable. It is considered libphonenumber-internal
and can change at any time. We only declare it as public for easy
inclusion in our build tools not in this package. Clients should not
refer to this file, we do not commit to support backwards-compatibility or
to warn about breaking changes.
"""
# If a region code is a key in this dict, metadata for that region is available.
# The corresponding value of the map is either:
# - a function which loads the region's metadata
# - None, to indicate that the metadata is already loaded
_region_available = {} # ISO 3166-1 alpha 2 => function or None
# Likewise for short number metadata.
_short_region_available = {} # ISO 3166-1 alpha 2 => function or None
# Likewise for non-geo country calling codes.
_country_code_available = {} # country calling code (as int) => function or None
_region_metadata = {} # ISO 3166-1 alpha 2 => PhoneMetadata
_short_region_metadata = {} # ISO 3166-1 alpha 2 => PhoneMetadata
# A mapping from a country calling code for a non-geographical entity to
# the PhoneMetadata for that country calling code. Examples of the country
# calling codes include 800 (International Toll Free Service) and 808
# (International Shared Cost Service).
_country_code_metadata = {} # country calling code (as int) => PhoneMetadata
@classmethod
def metadata_for_region(kls, region_code, default=None):
loader = kls._region_available.get(region_code, None)
if loader is not None:
# Region metadata is available but has not yet been loaded. Do so now.
loader(region_code)
kls._region_available[region_code] = None
return kls._region_metadata.get(region_code, default)
@classmethod
def short_metadata_for_region(kls, region_code, default=None):
loader = kls._short_region_available.get(region_code, None)
if loader is not None:
# Region short number metadata is available but has not yet been loaded. Do so now.
loader(region_code)
kls._short_region_available[region_code] = None
return kls._short_region_metadata.get(region_code, default)
@classmethod
def metadata_for_nongeo_region(kls, country_code, default=None):
loader = kls._country_code_available.get(country_code, None)
if loader is not None:
# Region metadata is available but has not yet been loaded. Do so now.
loader(country_code)
kls._country_code_available[country_code] = None
return kls._country_code_metadata.get(country_code, default)
@classmethod
def metadata_for_region_or_calling_code(kls, country_calling_code, region_code):
if region_code == REGION_CODE_FOR_NON_GEO_ENTITY:
return kls.metadata_for_nongeo_region(country_calling_code, None)
else:
return kls.metadata_for_region(region_code, None)
@classmethod
def register_region_loader(kls, region_code, loader):
kls._region_available[region_code] = loader
@classmethod
def register_short_region_loader(kls, region_code, loader):
kls._short_region_available[region_code] = loader
@classmethod
def register_nongeo_region_loader(kls, country_code, loader):
kls._country_code_available[country_code] = loader
@classmethod
def load_all(kls):
"""Force immediate load of all metadata"""
# Force expansion of contents to lists because we invalidate the iterator
for region_code, loader in list(kls._region_available.items()):
if loader is not None: # pragma no cover
loader(region_code)
kls._region_available[region_code] = None
for country_code, loader in list(kls._country_code_available.items()):
if loader is not None:
loader(country_code)
kls._country_code_available[region_code] = None
@mutating_method
def __init__(self,
id,
general_desc=None,
fixed_line=None,
mobile=None,
toll_free=None,
premium_rate=None,
shared_cost=None,
personal_number=None,
voip=None,
pager=None,
uan=None,
emergency=None,
voicemail=None,
short_code=None,
standard_rate=None,
carrier_specific=None,
no_international_dialling=None,
country_code=None,
international_prefix=None,
preferred_international_prefix=None,
national_prefix=None,
preferred_extn_prefix=None,
national_prefix_for_parsing=None,
national_prefix_transform_rule=None,
number_format=None,
intl_number_format=None,
main_country_for_code=False,
leading_digits=None,
leading_zero_possible=False,
mobile_number_portable_region=False,
short_data=False,
register=True):
# The general_desc contains information which is a superset of
# descriptions for all types of phone numbers. If any element is
# missing in the description of a specific type of number, the element
# will inherit from its counterpart in the general_desc. Every locale
# is assumed to have fixed line and mobile numbers - if these types
# are missing altogether, they will inherit all fields from the
# general_desc. For all other types, if the whole type is missing and
# it is relevant for the metadata, it will be given a
# national_number_pattern of "NA" and a possible_number_pattern of
# "NA".
self.general_desc = general_desc # None or PhoneNumberDesc
self.fixed_line = fixed_line # None or PhoneNumberDesc
self.mobile = mobile # None or PhoneNumberDesc
self.toll_free = toll_free # None or PhoneNumberDesc
self.premium_rate = premium_rate # None or PhoneNumberDesc
self.shared_cost = shared_cost # None or PhoneNumberDesc
self.personal_number = personal_number # None or PhoneNumberDesc
self.voip = voip # None or PhoneNumberDesc
self.pager = pager # None or PhoneNumberDesc
self.uan = uan # None or PhoneNumberDesc
self.emergency = emergency # None or PhoneNumberDesc
self.voicemail = voicemail # None or PhoneNumberDesc
self.short_code = short_code # None or PhoneNumberDesc
self.standard_rate = standard_rate # None or PhoneNumberDesc
self.carrier_specific = carrier_specific # None or PhoneNumberDesc
# The rules here distinguish the numbers that are only able to be
# dialled nationally.
self.no_international_dialling = no_international_dialling # None or PhoneNumberDesc
# The ISO 3166-1 alpha-2 representation of a country/region, with the
# exception of "country calling codes" used for non-geographical
# entities, such as Universal International Toll Free Number
# (+800). These are all given the ID "001", since this is the numeric
# region code for the world according to UN M.49:
# http://en.wikipedia.org/wiki/UN_M.49
self.id = force_unicode(id) # None or Unicode string
# The country calling code that one would dial from overseas when
# trying to dial a phone number in this country. For example, this
# would be "64" for New Zealand.
self.country_code = country_code # None or int
# The international_prefix of country A is the number that needs to be
# dialled from country A to another country (country B). This is
# followed by the country code for country B. Note that some countries
# may have more than one international prefix, and for those cases, a
# regular expression matching the international prefixes will be
# stored in this field.
self.international_prefix = force_unicode(international_prefix) # None or Unicode string
# If more than one international prefix is present, a preferred prefix
# can be specified here for out-of-country formatting purposes. If
# this field is not present, and multiple international prefixes are
# present, then "+" will be used instead.
self.preferred_international_prefix = force_unicode(preferred_international_prefix) # None or Unicode string
# The national prefix of country A is the number that needs to be
# dialled before the national significant number when dialling
# internally. This would not be dialled when dialling
# internationally. For example, in New Zealand, the number that would
# be locally dialled as 09 345 3456 would be dialled from overseas as
# +64 9 345 3456. In this case, 0 is the national prefix.
self.national_prefix = force_unicode(national_prefix) # None or Unicode string
# The preferred prefix when specifying an extension in this
# country. This is used for formatting only, and if this is not
# specified, a suitable default should be used instead. For example,
# if you wanted extensions to be formatted in the following way: 1
# (365) 345 445 ext. 2345 " ext. " should be the preferred extension
# prefix.
self.preferred_extn_prefix = force_unicode(preferred_extn_prefix) # None or Unicode string
# This field is used for cases where the national prefix of a country
# contains a carrier selection code, and is written in the form of a
# regular expression. For example, to dial the number 2222-2222 in
# Fortaleza, Brazil (area code 85) using the long distance carrier Oi
# (selection code 31), one would dial 0 31 85 2222 2222. Assuming the
# only other possible carrier selection code is 32, the field will
# contain "03[12]".
#
# When it is missing, this field inherits the value of national_prefix,
# if that is present.
self.national_prefix_for_parsing = force_unicode(national_prefix_for_parsing) # None or Unicode string holding regexp
# This field is only populated and used under very rare situations.
# For example, mobile numbers in Argentina are written in two
# completely different ways when dialed in-country and out-of-country
# (e.g. 0343 15 555 1212 is exactly the same number as +54 9 343 555
# 1212). This field is used together with national_prefix_for_parsing
# to transform the number into a particular representation for storing
# in the PhoneNumber class in those rare cases.
self.national_prefix_transform_rule = force_unicode(national_prefix_transform_rule) # None or Unicode string
# Specifies whether the mobile and fixed-line patterns are the same or
# not. This is used to speed up determining phone number type in
# countries where these two types of phone numbers can never be
# distinguished.
self.same_mobile_and_fixed_line_pattern = (self.mobile == self.fixed_line)
# Note that the number format here is used for formatting only, not
# parsing. Hence all the varied ways a user *may* write a number need
# not be recorded - just the ideal way we would like to format it for
# them. When this element is absent, the national significant number
# will be formatted as a whole without any formatting applied.
self.number_format = [] # List of NumberFormat objects
if number_format is not None:
self.number_format = number_format
# This field is populated only when the national significant number is
# formatted differently when it forms part of the INTERNATIONAL format
# and NATIONAL format. A case in point is mobile numbers in Argentina:
# The number, which would be written in INTERNATIONAL format as
# +54 9 343 555 1212, will be written as 0343 15 555 1212 for NATIONAL
# format. In this case, the prefix 9 is inserted when dialling from
# overseas, but otherwise the prefix 0 and the carrier selection code
# 15 (inserted after the area code of 343) is used.
# Note: this field is populated by setting a value for <intlFormat>
# inside the <numberFormat> tag in the XML file. If <intlFormat> is
# not set then it defaults to the same value as the <format> tag.
#
# Examples:
# To set the <intlFormat> to a different value than the <format>:
# <numberFormat pattern=....>
# <format>$1 $2 $3</format>
# <intlFormat>$1-$2-$3</intlFormat>
# </numberFormat>
#
# To have a format only used for national formatting, set <intlFormat> to
# "NA":
# <numberFormat pattern=....>
# <format>$1 $2 $3</format>
# <intlFormat>NA</intlFormat>
# </numberFormat>
self.intl_number_format = [] # List of NumberFormat objects
if intl_number_format is not None:
self.intl_number_format = intl_number_format
# This field is set when this country is considered to be the main
# country for a calling code. It may not be set by more than one
# country with the same calling code, and it should not be set by
# countries with a unique calling code. This can be used to indicate
# that "GB" is the main country for the calling code "44" for example,
# rather than Jersey or the Isle of Man.
self.main_country_for_code = bool(main_country_for_code)
# This field is populated only for countries or regions that share a
# country calling code. If a number matches this pattern, it could
# belong to this region. This is not intended as a replacement for
# is_valid_for_region, and does not mean the number must come from this
# region (for example, 800 numbers are valid for all NANPA countries.)
# This field should be a regular expression of the expected prefix
# match.
self.leading_digits = force_unicode(leading_digits) # None or Unicode string holding regexp
# The leading zero in a phone number is meaningful in some countries
# (e.g. Italy). This means they cannot be dropped from the national
# number when converting into international format. If leading zeros
# are possible for valid international numbers for this region/country
# then set this to true. This only needs to be set for the region
# that is the main_country_for_code and all regions associated with
# that calling code will use the same setting.
self.leading_zero_possible = bool(leading_zero_possible)
# This field is set when this country has implemented mobile number
# portability. This means that transferring mobile numbers between
# carriers is allowed. A consequence of this is that phone prefix to
# carrier mapping is less reliable.
self.mobile_number_portable_region = mobile_number_portable_region # bool
# Record whether this metadata is for short numbers or normal numbers.
self.short_data = short_data # bool
if register:
# Register this instance with the relevant class-wide map
if self.id == REGION_CODE_FOR_NON_GEO_ENTITY:
kls_map = PhoneMetadata._country_code_metadata
id = self.country_code
elif self.short_data:
kls_map = PhoneMetadata._short_region_metadata
id = self.id
else:
kls_map = PhoneMetadata._region_metadata
id = self.id
if id in kls_map:
other = kls_map[id]
if self != other:
raise Exception("Duplicate PhoneMetadata for %s (from %s:%s)" % (id, self.id, self.country_code))
else:
kls_map[id] = self
def __eq__(self, other):
if not isinstance(other, PhoneMetadata):
return False
return (repr(self) == repr(other))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __unicode__(self):
# Generate a string that is valid Python input for the constructor
result = (unicod("PhoneMetadata(id='%s', country_code=%r, international_prefix=%s") %
(self.id, self.country_code, rpr(self.international_prefix)))
result += unicod(",\n general_desc=%s") % self.general_desc
if self.fixed_line is not None:
result += unicod(",\n fixed_line=%s") % self.fixed_line
if self.mobile is not None:
result += unicod(",\n mobile=%s") % self.mobile
if self.toll_free is not None:
result += unicod(",\n toll_free=%s") % self.toll_free
if self.premium_rate is not None:
result += unicod(",\n premium_rate=%s") % self.premium_rate
if self.shared_cost is not None:
result += unicod(",\n shared_cost=%s") % self.shared_cost
if self.personal_number is not None:
result += unicod(",\n personal_number=%s") % self.personal_number
if self.voip is not None:
result += unicod(",\n voip=%s") % self.voip
if self.pager is not None:
result += unicod(",\n pager=%s") % self.pager
if self.uan is not None:
result += unicod(",\n uan=%s") % self.uan
if self.emergency is not None:
result += unicod(",\n emergency=%s") % self.emergency
if self.voicemail is not None:
result += unicod(",\n voicemail=%s") % self.voicemail
if self.short_code is not None:
result += unicod(",\n short_code=%s") % self.short_code
if self.standard_rate is not None:
result += unicod(",\n standard_rate=%s") % self.standard_rate
if self.carrier_specific is not None:
result += unicod(",\n carrier_specific=%s") % self.carrier_specific
if self.no_international_dialling is not None:
result += unicod(",\n no_international_dialling=%s") % self.no_international_dialling
if self.preferred_international_prefix is not None:
result += unicod(",\n preferred_international_prefix=%s") % rpr(self.preferred_international_prefix)
if self.national_prefix is not None:
result += unicod(",\n national_prefix=%s") % rpr(self.national_prefix)
if self.preferred_extn_prefix is not None:
result += unicod(",\n preferred_extn_prefix=%s") % rpr(self.preferred_extn_prefix)
if self.national_prefix_for_parsing is not None:
result += unicod(",\n national_prefix_for_parsing=%s") % rpr(self.national_prefix_for_parsing)
if self.national_prefix_transform_rule is not None:
# Note that we use rpr() on self.national_prefix_transform_rule, which generates its own quotes
result += unicod(",\n national_prefix_transform_rule=%s") % rpr(self.national_prefix_transform_rule)
if self.number_format:
result += unicod(",\n number_format=[%s]") % unicod(',\n ').join(map(u, self.number_format))
if self.intl_number_format:
result += unicod(",\n intl_number_format=[%s]") % unicod(',\n ').join(map(u, self.intl_number_format))
if self.main_country_for_code:
result += unicod(",\n main_country_for_code=True")
if self.leading_digits is not None:
result += unicod(",\n leading_digits='%s'") % self.leading_digits
if self.leading_zero_possible:
result += unicod(",\n leading_zero_possible=True")
if self.mobile_number_portable_region:
result += unicod(",\n mobile_number_portable_region=True")
if self.short_data:
result += unicod(",\n short_data=True")
result += unicod(")")
return result
| bsd-3-clause | -115,852,506,862,116,930 | 52.656151 | 131 | 0.646834 | false |
marinkaz/orange3 | Orange/canvas/help/manager.py | 4 | 11423 | """
"""
import sys
import os
import string
import itertools
import logging
import email
from distutils.version import StrictVersion
from operator import itemgetter
import pkg_resources
from . import provider
from PyQt4.QtCore import QObject, QUrl, QDir
log = logging.getLogger(__name__)
class HelpManager(QObject):
def __init__(self, parent=None):
QObject.__init__(self, parent)
self._registry = None
self._initialized = False
self._providers = {}
def set_registry(self, registry):
"""
Set the widget registry for which the manager should
provide help.
"""
if self._registry is not registry:
self._registry = registry
self._initialized = False
self.initialize()
def registry(self):
"""
Return the previously set with set_registry.
"""
return self._registry
def initialize(self):
if self._initialized:
return
reg = self._registry
all_projects = set(desc.project_name for desc in reg.widgets())
providers = []
for project in set(all_projects) - set(self._providers.keys()):
provider = None
try:
dist = pkg_resources.get_distribution(project)
provider = get_help_provider_for_distribution(dist)
except Exception:
log.exception("Error while initializing help "
"provider for %r", desc.project_name)
if provider:
providers.append((project, provider))
provider.setParent(self)
self._providers.update(dict(providers))
self._initialized = True
def get_help(self, url):
"""
"""
self.initialize()
if url.scheme() == "help" and url.authority() == "search":
return self.search(qurl_query_items(url))
else:
return url
def description_by_id(self, desc_id):
reg = self._registry
return get_by_id(reg, desc_id)
def search(self, query):
self.initialize()
if isinstance(query, QUrl):
query = qurl_query_items(query)
query = dict(query)
desc_id = query["id"]
desc = self.description_by_id(desc_id)
provider = None
if desc.project_name:
provider = self._providers.get(desc.project_name)
# TODO: Ensure initialization of the provider
if provider:
return provider.search(desc)
else:
raise KeyError(desc_id)
def get_by_id(registry, descriptor_id):
for desc in registry.widgets():
if desc.id == descriptor_id:
return desc
raise KeyError(descriptor_id)
def qurl_query_items(url):
items = []
for key, value in url.queryItems():
items.append((str(key), str(value)))
return items
def get_help_provider_for_description(desc):
if desc.project_name:
dist = pkg_resources.get_distribution(desc.project_name)
return get_help_provider_for_distribution(dist)
def is_develop_egg(dist):
"""
Is the distribution installed in development mode (setup.py develop)
"""
meta_provider = dist._provider
egg_info_dir = os.path.dirname(meta_provider.egg_info)
egg_name = pkg_resources.to_filename(dist.project_name)
return meta_provider.egg_info.endswith(egg_name + ".egg-info") \
and os.path.exists(os.path.join(egg_info_dir, "setup.py"))
def left_trim_lines(lines):
"""
Remove all unnecessary leading space from lines.
"""
lines_striped = list(zip(lines[1:], list(map(str.lstrip, lines[1:]))))
lines_striped = list(filter(itemgetter(1), lines_striped))
indent = min([len(line) - len(striped) \
for line, striped in lines_striped] + [sys.maxsize])
if indent < sys.maxsize:
return [line[indent:] for line in lines]
else:
return list(lines)
def trim_trailing_lines(lines):
"""
Trim trailing blank lines.
"""
lines = list(lines)
while lines and not lines[-1]:
lines.pop(-1)
return lines
def trim_leading_lines(lines):
"""
Trim leading blank lines.
"""
lines = list(lines)
while lines and not lines[0]:
lines.pop(0)
return lines
def trim(string):
"""
Trim a string in PEP-256 compatible way
"""
lines = string.expandtabs().splitlines()
lines = list(map(str.lstrip, lines[:1])) + left_trim_lines(lines[1:])
return "\n".join(trim_leading_lines(trim_trailing_lines(lines)))
# Fields allowing multiple use (from PEP-0345)
MULTIPLE_KEYS = ["Platform", "Supported-Platform", "Classifier",
"Requires-Dist", "Provides-Dist", "Obsoletes-Dist",
"Project-URL"]
def parse_meta(contents):
message = email.message_from_string(contents)
meta = {}
for key in set(message.keys()):
if key in MULTIPLE_KEYS:
meta[key] = message.get_all(key)
else:
meta[key] = message.get(key)
version = StrictVersion(meta["Metadata-Version"])
if version >= StrictVersion("1.3") and "Description" not in meta:
desc = message.get_payload()
if desc:
meta["Description"] = desc
return meta
def get_meta_entry(dist, name):
"""
Get the contents of the named entry from the distributions PKG-INFO file
"""
meta = get_dist_meta(dist)
return meta.get(name)
def get_dist_url(dist):
"""
Return the 'url' of the distribution (as passed to setup function)
"""
return get_meta_entry(dist, "Home-page")
def get_dist_meta(dist):
if dist.has_metadata("PKG-INFO"):
# egg-info
contents = dist.get_metadata("PKG-INFO")
elif dist.has_metadata("METADATA"):
# dist-info
contents = dist.get_metadata("METADATA")
else:
contents = None
if contents is not None:
return parse_meta(contents)
else:
return {}
def _replacements_for_dist(dist):
replacements = {"PROJECT_NAME": dist.project_name,
"PROJECT_NAME_LOWER": dist.project_name.lower(),
"PROJECT_VERSION": dist.version}
try:
replacements["URL"] = get_dist_url(dist)
except KeyError:
pass
if is_develop_egg(dist):
replacements["DEVELOP_ROOT"] = dist.location
return replacements
def qurl_from_path(urlpath):
if QDir(urlpath).isAbsolute():
# deal with absolute paths including windows drive letters
return QUrl.fromLocalFile(urlpath)
return QUrl(urlpath, QUrl.TolerantMode)
def create_intersphinx_provider(entry_point):
locations = entry_point.load()
replacements = _replacements_for_dist(entry_point.dist)
formatter = string.Formatter()
for target, inventory in locations:
# Extract all format fields
format_iter = formatter.parse(target)
if inventory:
format_iter = itertools.chain(format_iter,
formatter.parse(inventory))
# Names used in both target and inventory
fields = {name for _, name, _, _ in format_iter if name}
if not set(fields) <= set(replacements.keys()):
log.warning("Invalid replacement fields %s",
set(fields) - set(replacements.keys()))
continue
target = formatter.format(target, **replacements)
if inventory:
inventory = formatter.format(inventory, **replacements)
targeturl = qurl_from_path(target)
if not targeturl.isValid():
continue
if targeturl.isLocalFile():
if os.path.exists(os.path.join(target, "objects.inv")):
inventory = QUrl.fromLocalFile(
os.path.join(target, "objects.inv"))
else:
log.info("Local doc root '%s' does not exist.", target)
continue
else:
if not inventory:
# Default inventory location
inventory = targeturl.resolved(QUrl("objects.inv"))
if inventory is not None:
return provider.IntersphinxHelpProvider(
inventory=inventory, target=target)
return None
def create_html_provider(entry_point):
locations = entry_point.load()
replacements = _replacements_for_dist(entry_point.dist)
formatter = string.Formatter()
for target in locations:
# Extract all format fields
format_iter = formatter.parse(target)
fields = {name for _, name, _, _ in format_iter if name}
if not set(fields) <= set(replacements.keys()):
log.warning("Invalid replacement fields %s",
set(fields) - set(replacements.keys()))
continue
target = formatter.format(target, **replacements)
targeturl = qurl_from_path(target)
if not targeturl.isValid():
continue
if targeturl.isLocalFile():
if not os.path.exists(target):
log.info("Local doc root '%s' does not exist.", target)
continue
if target:
return provider.SimpleHelpProvider(
baseurl=QUrl.fromLocalFile(target))
return None
def create_html_inventory_provider(entry_point):
locations = entry_point.load()
replacements = _replacements_for_dist(entry_point.dist)
formatter = string.Formatter()
for target, xpathquery in locations:
if isinstance(target, (tuple, list)):
pass
# Extract all format fields
format_iter = formatter.parse(target)
fields = {name for _, name, _, _ in format_iter if name}
if not set(fields) <= set(replacements.keys()):
log.warning("Invalid replacement fields %s",
set(fields) - set(replacements.keys()))
continue
target = formatter.format(target, **replacements)
targeturl = qurl_from_path(target)
if not targeturl.isValid():
continue
if targeturl.isLocalFile():
if not os.path.exists(target):
log.info("Local doc root '%s' does not exist", target)
continue
inventory = QUrl.fromLocalFile(target)
else:
inventory = QUrl(target)
return provider.HtmlIndexProvider(
inventory=inventory, xpathquery=xpathquery)
return None
_providers = {
"intersphinx": create_intersphinx_provider,
"html-simple": create_html_provider,
"html-index": create_html_inventory_provider,
}
def get_help_provider_for_distribution(dist):
entry_points = dist.get_entry_map().get("orange.canvas.help", {})
provider = None
for name, entry_point in list(entry_points.items()):
create = _providers.get(name, None)
if create:
try:
provider = create(entry_point)
except pkg_resources.DistributionNotFound as err:
log.warning("Unsatisfied dependencies (%r)", err)
continue
except Exception:
log.exception("Exception")
if provider:
log.info("Created %s provider for %s",
type(provider), dist)
break
return provider
| bsd-2-clause | 6,094,168,143,269,389,000 | 27.204938 | 76 | 0.592401 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/encodings/__init__.py | 4 | 2512 | """ Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to standard lower-case
encoding names with hyphens mapped to underscores, e.g. 'utf-8' is
implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> (encoder, decoder, stream_reader, stream_writer)
The getregentry() API must return callable objects which adhere to
the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be standard encoding
names as defined above (lower-case, hyphens converted to
underscores).
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs,aliases
_cache = {}
_unknown = '--unknown--'
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding,_unknown)
if entry is not _unknown:
return entry
# Import the module
modname = encoding.replace('-', '_')
modname = aliases.aliases.get(modname,modname)
try:
mod = __import__(modname,globals(),locals(),'*')
except ImportError,why:
# cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
try:
entry = tuple(mod.getregentry())
except AttributeError:
entry = ()
if len(entry) != 4:
raise SystemError,\
'module "%s.%s" failed to register' % \
(__name__,modname)
for obj in entry:
if not callable(obj):
raise SystemError,\
'incompatible codecs in module "%s.%s"' % \
(__name__,modname)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if not aliases.aliases.has_key(alias):
aliases.aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)
| mit | -7,532,348,157,153,434,000 | 28.209302 | 73 | 0.647293 | false |
johnttaylor/Outcast | bin/scm/none/sync.py | 1 | 1120 | """
Script that sync's the current workspace with the SCM Repository
===============================================================================
usage: evie [common-opts] sync [options]
evie [common-opts] sync [options] <label>
Arguments:
<label> SCM Label to use as the source of the Sync. The default
operation is to sync to the latest/newest files
Options:
-h, --help Display help for this command
Notes:
o Returns zero if all files where succesfully synch'd; else non-zero is
is returned.
"""
import os
import utils
from docopt.docopt import docopt
#---------------------------------------------------------------------------------------------------------
def display_summary():
print("{:<13}{}".format( 'sync', "Sync's the workspace with the SCM Repository" ))
#------------------------------------------------------------------------------
def run( common_args, cmd_argv ):
args = docopt(__doc__, argv=cmd_argv)
# Return 'error' since this is just a stub
exit(1)
| bsd-3-clause | 1,294,512,981,065,959,700 | 28.473684 | 106 | 0.464286 | false |
jseabold/scipy | benchmarks/benchmarks/interpolate.py | 37 | 6314 | from __future__ import division, absolute_import, print_function
import numpy as np
from .common import run_monitored, set_mem_rlimit, Benchmark
try:
from scipy.stats import spearmanr
except ImportError:
pass
try:
import scipy.interpolate as interpolate
except ImportError:
pass
class Leaks(Benchmark):
unit = "relative increase with repeats"
def track_leaks(self):
set_mem_rlimit()
# Setup temp file, make it fit in memory
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
else:
print("PROBABLY NO MEMORY LEAK")
return max(peak_mems) / min(peak_mems)
class BenchPPoly(Benchmark):
def setup(self):
np.random.seed(1234)
m, k = 55, 3
x = np.sort(np.random.random(m+1))
c = np.random.random((3, m))
self.pp = interpolate.PPoly(c, x)
npts = 100
self.xp = np.linspace(0, 1, npts)
def time_evaluation(self):
self.pp(self.xp)
class GridData(Benchmark):
param_names = ['n_grids', 'method']
params = [
[10j, 100j, 1000j],
['nearest', 'linear', 'cubic']
]
def setup(self, n_grids, method):
self.func = lambda x, y: x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
self.grid_x, self.grid_y = np.mgrid[0:1:n_grids, 0:1:n_grids]
self.points = np.random.rand(1000, 2)
self.values = self.func(self.points[:,0], self.points[:,1])
def time_evaluation(self, n_grids, method):
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
class Interpolate1d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'],
]
def setup(self, n_samples, method):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
def time_interpolate(self, n_samples, method):
interpolate.interp1d(self.x, self.y, kind=method)
class Interpolate2d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'cubic', 'quintic'],
]
def setup(self, n_samples, method):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.arange(-r_samples, r_samples, 0.25)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
def time_interpolate(self, n_samples, method):
interpolate.interp2d(self.x, self.y, self.z, kind=method)
class Rbf(Benchmark):
param_names = ['n_samples', 'function']
params = [
[10, 50, 100],
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
]
def setup(self, n_samples, function):
self.x = np.arange(n_samples)
self.y = np.sin(self.x)
r_samples = n_samples / 2.
self.X = np.arange(-r_samples, r_samples, 0.25)
self.Y = np.arange(-r_samples, r_samples, 0.25)
self.z = np.exp(-self.X**2-self.Y**2)
def time_rbf_1d(self, n_samples, function):
interpolate.Rbf(self.x, self.y, function=function)
def time_rbf_2d(self, n_samples, function):
interpolate.Rbf(self.X, self.Y, self.z, function=function)
class UnivariateSpline(Benchmark):
param_names = ['n_samples', 'degree']
params = [
[10, 50, 100],
[3, 4, 5]
]
def setup(self, n_samples, degree):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.exp(-self.x**2) + 0.1 * np.random.randn(*self.x.shape)
def time_univariate_spline(self, n_samples, degree):
interpolate.UnivariateSpline(self.x, self.y, k=degree)
class BivariateSpline(Benchmark):
"""
Author: josef-pktd and scipy mailinglist example
'http://scipy-user.10969.n7.nabble.com/BivariateSpline-examples\
-and-my-crashing-python-td14801.html'
"""
param_names = ['n_samples']
params = [
[10, 20, 30]
]
def setup(self, n_samples):
x = np.arange(0, n_samples, 0.5)
y = np.arange(0, n_samples, 0.5)
x, y = np.meshgrid(x, y)
x = x.ravel()
y = y.ravel()
xmin = x.min()-1
xmax = x.max()+1
ymin = y.min()-1
ymax = y.max()+1
s = 1.1
self.yknots = np.linspace(ymin+s,ymax-s,10)
self.xknots = np.linspace(xmin+s,xmax-s,10)
self.z = np.sin(x) + 0.1*np.random.normal(size=x.shape)
self.x = x
self.y = y
def time_smooth_bivariate_spline(self, n_samples):
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
def time_lsq_bivariate_spline(self, n_samples):
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
class Interpolate(Benchmark):
"""
Linear Interpolate in scipy and numpy
"""
param_names = ['n_samples', 'module']
params = [
[10, 50, 100],
['numpy', 'scipy']
]
def setup(self, n_samples, module):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.z = np.random.normal(size=self.x.shape)
def time_interpolate(self, n_samples, module):
if module == 'scipy':
interpolate.interp1d(self.x, self.y, kind="linear")
else:
np.interp(self.z, self.x, self.y)
| bsd-3-clause | -2,782,780,859,870,846,500 | 27.7 | 98 | 0.56446 | false |
chipaca/snapcraft | tests/unit/commands/__init__.py | 2 | 16188 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2021 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from pathlib import PosixPath
import subprocess
from textwrap import dedent
from unittest import mock
import fixtures
from click.testing import CliRunner
from snapcraft import storeapi
from snapcraft.cli._runner import run
from snapcraft.storeapi.v2.channel_map import ChannelMap
from snapcraft.storeapi.v2.releases import Releases
from tests import fixture_setup, unit
_sample_keys = [
{
"name": "default",
"sha3-384": "vdEeQvRxmZ26npJCFaGnl-VfGz0lU2jZZkWp_s7E-RxVCNtH2_mtjcxq2NkDKkIp",
},
{
"name": "another",
"sha3-384": "JsfToV5hO2eN9l89pYYCKXUioTERrZIIHUgQQd47jW8YNNBskupiIjWYd3KXLY_D",
},
]
def get_sample_key(name):
for key in _sample_keys:
if key["name"] == name:
return key
raise KeyError(name)
original_check_output = subprocess.check_output
def mock_check_output(command, *args, **kwargs):
if isinstance(command[0], PosixPath):
command[0] = str(command[0])
if command[0].endswith("unsquashfs") or command[0].endswith("xdelta3"):
return original_check_output(command, *args, **kwargs)
elif command[0].endswith("snap") and command[1:] == ["keys", "--json"]:
return json.dumps(_sample_keys)
elif command[0].endswith("snap") and command[1] == "export-key":
if not command[2].startswith("--account="):
raise AssertionError("Unhandled command: {}".format(command))
account_id = command[2][len("--account=") :]
name = command[3]
# This isn't a full account-key-request assertion, but it's enough
# for testing.
return dedent(
"""\
type: account-key-request
account-id: {account_id}
name: {name}
public-key-sha3-384: {sha3_384}
"""
).format(
account_id=account_id, name=name, sha3_384=get_sample_key(name)["sha3-384"]
)
elif command[0].endswith("snap") and command[1:] == [
"create-key",
"new-key",
]:
pass
else:
raise AssertionError("Unhandled command: {}".format(command))
class CommandBaseTestCase(unit.TestCase):
def setUp(self):
super().setUp()
self.runner = CliRunner()
def run_command(self, args, **kwargs):
# For click testing, runner will overwrite the descriptors for stdio -
# ensure TTY always appears connected.
self.useFixture(
fixtures.MockPatch("snapcraft.cli.echo.is_tty_connected", return_value=True)
)
with mock.patch("sys.argv", args):
return self.runner.invoke(run, args, catch_exceptions=False, **kwargs)
class LifecycleCommandsBaseTestCase(CommandBaseTestCase):
def setUp(self):
super().setUp()
self.useFixture(fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT"))
self.fake_lifecycle_clean = fixtures.MockPatch(
"snapcraft.internal.lifecycle.clean"
)
self.useFixture(self.fake_lifecycle_clean)
self.fake_lifecycle_execute = fixtures.MockPatch(
"snapcraft.internal.lifecycle.execute"
)
self.useFixture(self.fake_lifecycle_execute)
self.fake_pack = fixtures.MockPatch("snapcraft.cli.lifecycle._pack")
self.useFixture(self.fake_pack)
self.snapcraft_yaml = fixture_setup.SnapcraftYaml(
self.path,
parts={
"part0": {"plugin": "nil"},
"part1": {"plugin": "nil"},
"part2": {"plugin": "nil"},
},
)
self.useFixture(self.snapcraft_yaml)
self.provider_class_mock = mock.MagicMock()
self.provider_mock = mock.MagicMock()
self.provider_class_mock.return_value.__enter__.return_value = (
self.provider_mock
)
self.fake_get_provider_for = fixtures.MockPatch(
"snapcraft.internal.build_providers.get_provider_for",
return_value=self.provider_class_mock,
)
self.useFixture(self.fake_get_provider_for)
def assert_clean_not_called(self):
self.fake_lifecycle_clean.mock.assert_not_called()
self.provider_mock.clean.assert_not_called()
self.provider_mock.clean_project.assert_not_called()
class StoreCommandsBaseTestCase(CommandBaseTestCase):
def setUp(self):
super().setUp()
self.fake_store = fixture_setup.FakeStore()
self.useFixture(self.fake_store)
self.client = storeapi.StoreClient()
class FakeStoreCommandsBaseTestCase(CommandBaseTestCase):
def setUp(self):
super().setUp()
# Our experimental environment variable is sticky
self.useFixture(
fixtures.EnvironmentVariable(
"SNAPCRAFT_EXPERIMENTAL_PROGRESSIVE_RELEASES", None
)
)
self.fake_store_login = fixtures.MockPatchObject(storeapi.StoreClient, "login")
self.useFixture(self.fake_store_login)
self.fake_store_register = fixtures.MockPatchObject(
storeapi._dashboard_api.DashboardAPI, "register"
)
self.useFixture(self.fake_store_register)
self.fake_store_account_info = fixtures.MockPatchObject(
storeapi._dashboard_api.DashboardAPI,
"get_account_information",
return_value={
"account_id": "abcd",
"account_keys": list(),
"snaps": {
"16": {
"snap-test": {
"snap-id": "snap-test-snap-id",
"status": "Approved",
"private": False,
"since": "2016-12-12T01:01Z",
"price": "0",
},
"basic": {
"snap-id": "basic-snap-id",
"status": "Approved",
"private": False,
"since": "2016-12-12T01:01Z",
"price": "0",
},
}
},
},
)
self.useFixture(self.fake_store_account_info)
self.fake_store_status = fixtures.MockPatchObject(
storeapi._dashboard_api.DashboardAPI, "snap_status", return_value=dict()
)
self.useFixture(self.fake_store_status)
self.fake_store_release = fixtures.MockPatchObject(
storeapi.StoreClient, "release"
)
self.useFixture(self.fake_store_release)
self.fake_store_register_key = fixtures.MockPatchObject(
storeapi._dashboard_api.DashboardAPI, "register_key"
)
self.useFixture(self.fake_store_register_key)
# channel-map endpoint
self.channel_map = ChannelMap.unmarshal(
{
"channel-map": [
{
"architecture": "amd64",
"channel": "2.1/beta",
"expiration-date": None,
"revision": 19,
"progressive": {
"paused": None,
"percentage": None,
"current-percentage": None,
},
},
{
"architecture": "amd64",
"channel": "2.0/beta",
"expiration-date": None,
"revision": 18,
"progressive": {
"paused": None,
"percentage": None,
"current-percentage": None,
},
},
],
"revisions": [
{"architectures": ["amd64"], "revision": 19, "version": "10"},
{"architectures": ["amd64"], "revision": 18, "version": "10"},
],
"snap": {
"name": "snap-test",
"channels": [
{
"branch": None,
"fallback": None,
"name": "2.1/stable",
"risk": "stable",
"track": "2.1",
},
{
"branch": None,
"fallback": "2.1/stable",
"name": "2.1/candidate",
"risk": "candidate",
"track": "2.1",
},
{
"branch": None,
"fallback": "2.1/candidate",
"name": "2.1/beta",
"risk": "beta",
"track": "2.1",
},
{
"branch": None,
"fallback": "2.1/beta",
"name": "2.1/edge",
"risk": "edge",
"track": "2.1",
},
{
"branch": None,
"fallback": None,
"name": "2.0/stable",
"risk": "stable",
"track": "2.0",
},
{
"branch": None,
"fallback": "2.0/stable",
"name": "2.0/candidate",
"risk": "candidate",
"track": "2.0",
},
{
"branch": None,
"fallback": "2.0/candidate",
"name": "2.0/beta",
"risk": "beta",
"track": "2.0",
},
{
"branch": None,
"fallback": "2.0/beta",
"name": "2.0/edge",
"risk": "edge",
"track": "2.0",
},
],
"default-track": "2.1",
"tracks": [
{
"name": "2.0",
"status": "default",
"creation-date": "2019-10-17T14:11:59Z",
"version-pattern": "2\\.*",
},
{
"name": "latest",
"status": "active",
"creation-date": None,
"version-pattern": None,
},
],
},
}
)
self.fake_store_get_snap_channel_map = fixtures.MockPatchObject(
storeapi.StoreClient, "get_snap_channel_map", return_value=self.channel_map
)
self.useFixture(self.fake_store_get_snap_channel_map)
self.releases = Releases.unmarshal(
{
"revisions": [
{
"architectures": ["i386"],
"base": "core20",
"build_url": None,
"confinement": "strict",
"created_at": " 2016-09-27T19:23:40Z",
"grade": "stable",
"revision": 2,
"sha3-384": "a9060ef4872ccacbfa440617a76fcd84967896b28d0d1eb7571f00a1098d766e7e93353b084ba6ad841d7b14b95ede48",
"size": 20,
"status": "Published",
"version": "2.0.1",
},
{
"architectures": ["amd64"],
"base": "core20",
"build_url": None,
"confinement": "strict",
"created_at": "2016-09-27T18:38:43Z",
"grade": "stable",
"revision": 1,
"sha3-384": "a9060ef4872ccacbfa440617a76fcd84967896b28d0d1eb7571f00a1098d766e7e93353b084ba6ad841d7b14b95ede48",
"size": 20,
"status": "Published",
"version": "2.0.2",
},
],
"releases": [
{
"architecture": "amd64",
"branch": None,
"channel": "latest/stable",
"expiration-date": None,
"revision": 1,
"risk": "stable",
"track": "latest",
"when": "2020-02-12T17:51:40.891996Z",
},
{
"architecture": "i386",
"branch": None,
"channel": "latest/stable",
"expiration-date": None,
"revision": None,
"risk": "stable",
"track": "latest",
"when": "2020-02-11T17:51:40.891996Z",
},
{
"architecture": "amd64",
"branch": None,
"channel": "latest/edge",
"expiration-date": None,
"revision": 1,
"risk": "stable",
"track": "latest",
"when": "2020-01-12T17:51:40.891996Z",
},
],
}
)
self.fake_store_get_releases = fixtures.MockPatchObject(
storeapi.StoreClient, "get_snap_releases", return_value=self.releases
)
self.useFixture(self.fake_store_get_releases)
# Uploading
self.mock_tracker = mock.Mock(storeapi._status_tracker.StatusTracker)
self.mock_tracker.track.return_value = {
"code": "ready_to_release",
"processed": True,
"can_release": True,
"url": "/fake/url",
"revision": 19,
}
self.fake_store_upload_precheck = fixtures.MockPatchObject(
storeapi.StoreClient, "upload_precheck"
)
self.useFixture(self.fake_store_upload_precheck)
self.fake_store_upload = fixtures.MockPatchObject(
storeapi.StoreClient, "upload", return_value=self.mock_tracker
)
self.useFixture(self.fake_store_upload)
# Mock the snap command, pass through a select few.
self.fake_check_output = fixtures.MockPatch(
"subprocess.check_output", side_effect=mock_check_output
)
self.useFixture(self.fake_check_output)
# Pretend that the snap command is available
self.fake_package_installed = fixtures.MockPatch(
"snapcraft.internal.repo.Repo.is_package_installed", return_value=True
)
self.useFixture(self.fake_package_installed)
| gpl-3.0 | -7,026,892,609,570,802,000 | 36.472222 | 135 | 0.452866 | false |
TheGU/deep_trading_notebook | omstang_lib/gym2.py | 1 | 6392 | from random import random
import numpy as np
import math
import gym
from gym import spaces
class MarketEnv(gym.Env):
PENALTY = 1 # 0.999756079
def __init__(self, dir_path, target_codes, input_codes, start_date, end_date,
scope=60, sudden_death=-1., cumulative_reward=False):
self.startDate = start_date
self.endDate = end_date
self.scope = scope
self.sudden_death = sudden_death
self.cumulative_reward = cumulative_reward
self.inputCodes = []
self.targetCodes = []
self.dataMap = {}
for code in (target_codes + input_codes):
fn = dir_path + "./" + code + ".csv"
data = {}
lastClose = 0
lastVolume = 0
try:
f = open(fn, "r")
for line in f:
if line.strip() != "":
dt, openPrice, high, low, close, volume = line.strip().split(",")
try:
if dt >= start_date:
high = float(high) if high != "" else float(close)
low = float(low) if low != "" else float(close)
close = float(close)
volume = int(volume)
if lastClose > 0 and close > 0 and lastVolume > 0:
close_ = (close - lastClose) / lastClose
high_ = (high - close) / close
low_ = (low - close) / close
volume_ = (volume - lastVolume) / lastVolume
data[dt] = (high_, low_, close_, volume_)
lastClose = close
lastVolume = volume
except Exception, e:
print
e, line.strip().split(",")
f.close()
except Exception, e:
print
e
if len(data.keys()) > scope:
self.dataMap[code] = data
if code in target_codes:
self.targetCodes.append(code)
if code in input_codes:
self.inputCodes.append(code)
self.actions = [
"LONG",
"SHORT",
]
self.action_space = spaces.Discrete(len(self.actions))
self.observation_space = spaces.Box(np.ones(scope * (len(input_codes) + 1)) * -1,
np.ones(scope * (len(input_codes) + 1)))
self.reset()
self._seed()
def _step(self, action):
if self.done:
return self.state, self.reward, self.done, {}
self.reward = 0
if self.actions[action] == "LONG":
if sum(self.boughts) < 0:
for b in self.boughts:
self.reward += -(b + 1)
if self.cumulative_reward:
self.reward = self.reward / max(1, len(self.boughts))
if self.sudden_death * len(self.boughts) > self.reward:
self.done = True
self.boughts = []
self.boughts.append(1.0)
elif self.actions[action] == "SHORT":
if sum(self.boughts) > 0:
for b in self.boughts:
self.reward += b - 1
if self.cumulative_reward:
self.reward = self.reward / max(1, len(self.boughts))
if self.sudden_death * len(self.boughts) > self.reward:
self.done = True
self.boughts = []
self.boughts.append(-1.0)
else:
pass
vari = self.target[self.targetDates[self.currentTargetIndex]][2]
self.cum = self.cum * (1 + vari)
for i in xrange(len(self.boughts)):
self.boughts[i] = self.boughts[i] * MarketEnv.PENALTY * (1 + vari * (-1 if sum(self.boughts) < 0 else 1))
self.defineState()
self.currentTargetIndex += 1
if self.currentTargetIndex >= len(self.targetDates) or self.endDate <= self.targetDates[
self.currentTargetIndex]:
self.done = True
if self.done:
for b in self.boughts:
self.reward += (b * (1 if sum(self.boughts) > 0 else -1)) - 1
if self.cumulative_reward:
self.reward = self.reward / max(1, len(self.boughts))
self.boughts = []
return self.state, self.reward, self.done, {"dt": self.targetDates[self.currentTargetIndex], "cum": self.cum,
"code": self.targetCode}
def _reset(self):
self.targetCode = self.targetCodes[int(random() * len(self.targetCodes))]
self.target = self.dataMap[self.targetCode]
self.targetDates = sorted(self.target.keys())
self.currentTargetIndex = self.scope
self.boughts = []
self.cum = 1.
self.done = False
self.reward = 0
self.defineState()
return self.state
def _render(self, mode='human', close=False):
if close:
return
return self.state
'''
def _close(self):
pass
def _configure(self):
pass
'''
def _seed(self):
return int(random() * 100)
def defineState(self):
tmpState = []
budget = (sum(self.boughts) / len(self.boughts)) if len(self.boughts) > 0 else 1.
size = math.log(max(1., len(self.boughts)), 100)
position = 1. if sum(self.boughts) > 0 else 0.
tmpState.append([[budget, size, position]])
subject = []
subjectVolume = []
for i in xrange(self.scope):
try:
subject.append([self.target[self.targetDates[self.currentTargetIndex - 1 - i]][2]])
subjectVolume.append([self.target[self.targetDates[self.currentTargetIndex - 1 - i]][3]])
except Exception, e:
print
self.targetCode, self.currentTargetIndex, i, len(self.targetDates)
self.done = True
tmpState.append([[subject, subjectVolume]])
tmpState = [np.array(i) for i in tmpState]
self.state = tmpState | mit | -8,782,043,516,984,602,000 | 33.187166 | 117 | 0.482478 | false |
hcrlab/access_teleop | limb_manipulation/src/limb_ar_demo.py | 1 | 1647 | #! /usr/bin/env python
from ar_track_alvar_msgs.msg import AlvarMarkers
from geometry_msgs.msg import PoseStamped
import fetch_api
import rospy
def wait_for_time():
"""Wait for simulated time to begin.
"""
while rospy.Time().now().to_sec() == 0:
pass
class ArTagReader(object):
def __init__(self):
self.markers = []
def callback(self, msg):
self.markers = msg.markers
def main():
rospy.init_node('limb_ar_demo')
wait_for_time()
start = PoseStamped()
start.header.frame_id = 'base_link'
start.pose.position.x = 0.5
start.pose.position.y = 0.5
start.pose.position.z = 0.75
arm = fetch_api.Arm()
arm.move_to_pose(start)
reader = ArTagReader()
sub = rospy.Subscriber(
'ar_pose_marker', AlvarMarkers, callback=reader.callback)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
while len(reader.markers) == 0:
rospy.sleep(0.1)
# for marker in reader.markers:
# pose = marker.pose
# pose.header.frame_id = marker.header.frame_id
# error = arm.move_to_pose(pose)
# if error is None:
# rospy.loginfo('Moved to marker {}'.format(marker.id))
# return
# else:
# rospy.logwarn('Failed to move to marker {}'.format(marker.id))
# rospy.logerr('Failed to move to any markers!')
for marker in reader.markers:
pose = marker.pose
pose.header.frame_id = marker.header.frame_id
print(pose)
rate.sleep()
if __name__ == '__main__':
main()
| mit | 2,230,747,371,148,255,000 | 24.338462 | 80 | 0.571949 | false |
alxgu/ansible | lib/ansible/executor/module_common.py | 3 | 49181 | # (c) 2013-2014, Michael DeHaan <[email protected]>
# (c) 2015 Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import re
import pkgutil
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
# HACK: keep Python 2.6 controller tests happy in CI until they're properly split
try:
from importlib import import_module
except ImportError:
import_module = __import__
# if we're on a Python that doesn't have FNFError, redefine it as IOError (since that's what we'll see)
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
%(rlimit)s
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import imp
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
bytes = str
MOD_DESC = ('.py', 'U', imp.PY_SOURCE)
PY3 = False
else:
unicode = str
MOD_DESC = ('.py', 'r', imp.PY_SOURCE)
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
# Note: Remove the following section when we switch to zipimport
# Write the module to disk for imp.load_module
module = os.path.join(temp_path, '__main__.py')
with open(module, 'wb') as f:
f.write(z.read('__main__.py'))
f.close()
# End pre-zipimport section
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
with open(module, 'rb') as mod:
imp.load_module('__main__', mod, module, MOD_DESC)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, '__main__.py')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
import imp
with open(script_path, 'r') as f:
importer = imp.load_module('__main__', f, script_path, ('.py', 'r', imp.PY_SOURCE))
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
try:
import coverage
except ImportError:
print('{"msg": "Could not import `coverage` module.", "failed": true}')
sys.exit(1)
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = '''
try:
imp.find_module('coverage')
except ImportError:
print('{"msg": "Could not find `coverage` module.", "failed": true}')
sys.exit(1)
'''
ANSIBALLZ_RLIMIT_TEMPLATE = '''
import resource
existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# adjust soft limit subject to existing hard limit
requested_soft = min(existing_hard, %(rlimit_nofile)d)
if requested_soft != existing_soft:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
except ValueError:
# some platforms (eg macOS) lie about their hard limit
pass
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in node.names:
if alias.name.startswith('ansible.module_utils.'):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
elif alias.name.startswith('ansible_collections.'):
# keep 'ansible_collections.' as a sentinel prefix to trigger collection-loaded MU path
self.submodules.add(tuple(alias.name.split('.')))
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
elif node.module.startswith('ansible_collections.'):
# TODO: finish out the subpackage et al cases
if node.module.endswith('plugins.module_utils'):
# from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname]
py_mod = tuple(node.module.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname]
self.submodules.add(tuple(node.module.split('.')))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_name = os.path.basename(interpreter).strip()
# FUTURE: add logical equivalence for python3 in the case of py3-only modules
# check for first-class interpreter config
interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
if C.config.get_configuration_definitions().get(interpreter_config_key):
# a config def exists for this interpreter type; consult config for the value
interpreter_out = C.config.get_config_value(interpreter_config_key, variables=task_vars)
discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
interpreter_out = templar.template(interpreter_out.strip())
facts_from_task_vars = task_vars.get('ansible_facts', {})
# handle interpreter discovery if requested
if interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
if discovered_interpreter_config not in facts_from_task_vars:
# interpreter discovery is desired, but has not been run for this host
raise InterpreterDiscoveryRequiredError("interpreter discovery needed",
interpreter_name=interpreter_name,
discovery_mode=interpreter_out)
else:
interpreter_out = facts_from_task_vars[discovered_interpreter_config]
else:
# a config def does not exist for this interpreter type; consult vars for a possible direct override
interpreter_config = u'ansible_%s_interpreter' % interpreter_name
if interpreter_config not in task_vars:
return None, interpreter
interpreter_out = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter_out
if args:
shebang = shebang + u' ' + u' '.join(args)
return shebang, interpreter_out
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
try:
tree = ast.parse(data)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
elif py_module_name[0] == 'ansible_collections':
# FIXME: replicate module name resolution like below for granular imports
# this is a collection-hosted MU; look it up with get_data
package_name = '.'.join(py_module_name[:-1])
resource_name = py_module_name[-1] + '.py'
try:
# FIXME: need this in py2 for some reason TBD, but we shouldn't (get_data delegates to wrong loader without it)
pkg = import_module(package_name)
module_info = pkgutil.get_data(package_name, resource_name)
except FileNotFoundError:
# FIXME: implement package fallback code
raise AnsibleError('unable to load collection-hosted module_util {0}.{1}'.format(to_native(package_name),
to_native(resource_name)))
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if isinstance(module_info, bytes): # collection-hosted, just the code
# HACK: maybe surface collection dirs in here and use existing find_module code?
normalized_name = py_module_name
normalized_data = module_info
normalized_path = os.path.join(*py_module_name)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# HACK: walk back up the package hierarchy to pick up package inits; this won't do the right thing
# for actual packages yet...
accumulated_pkg_name = []
for pkg in py_module_name[:-1]:
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name[:] + ['__init__']) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
normalized_path = os.path.join(*accumulated_pkg_name)
# HACK: possibly preserve some of the actual package file contents; problematic for extend_paths and others though?
normalized_data = ''
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_path = os.path.join(module_info[1], '__init__.py')
normalized_data = _slurp(normalized_path)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('basic',) not in py_module_names:
pkg_dir_info = imp.find_module('basic', module_utils_paths)
normalized_modules.add(('basic',))
py_module_cache[('basic',)] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
# HACK: this seems to work as a way to identify a collections-based import, but a stronger identifier would be better
if not py_module_cache[py_module_name][1].startswith('/'):
dir_prefix = ''
else:
dir_prefix = 'ansible/module_utils'
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join(dir_prefix,
py_module_file_name), py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
# FUTURE: combined regex for this stuff, or a "looks like Python, let's inspect further" mechanism
elif b'from ansible.module_utils.' in b_module_data or b'from ansible_collections.' in b_module_data\
or b'import ansible_collections.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
try:
python_repred_params = repr(json.dumps(params))
except TypeError as e:
raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('__main__.py', b_module_data)
py_module_cache = {('__init__',): (b'', '[builtin]')}
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
with open(cached_module_filename, 'rb') as f:
zipdata = f.read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar.template(rlimit_nofile))
if rlimit_nofile:
rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
rlimit_nofile=rlimit_nofile,
)
else:
rlimit = ''
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
if coverage_output:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=coverage_output,
)
else:
# Verify coverage is available without importing it.
# This will detect when a module would fail with coverage enabled with minimal overhead.
coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
rlimit=rlimit,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_args, environment, async_timeout, become,
become_method, become_user, become_password, become_flags,
module_substyle
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
| gpl-3.0 | -6,045,728,623,201,084,000 | 45.705603 | 157 | 0.619142 | false |
ianmiell/OLD-shutitdist | icu/icu.py | 1 | 1189 | """ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class icu(ShutItModule):
def is_installed(self, shutit):
return shutit.file_exists('/root/shutit_build/module_record/' + self.module_id + '/built')
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/icu')
shutit.send('cd /tmp/build/icu')
shutit.send('curl -L http://download.icu-project.org/files/icu4c/53.1/icu4c-53_1-src.tgz | tar -zxf -')
shutit.send('cd icu')
shutit.send('cd source')
shutit.send('./configure --prefix=/usr')
shutit.send('make')
#shutit.send('make check') # fails - TODO check this
shutit.send('make install')
return True
#def get_config(self, shutit):
# shutit.get_config(self.module_id,'item','default')
# return True
#def check_ready(self, shutit):
# return True
#def start(self, shutit):
# return True
#def stop(self, shutit):
# return True
#def finalize(self, shutit):
# return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return icu(
'shutit.tk.sd.icu.icu', 158844782.0032,
description='',
maintainer='',
depends=['shutit.tk.sd.pkg_config.pkg_config']
)
| gpl-2.0 | 1,729,111,919,955,782,100 | 21.018519 | 105 | 0.675357 | false |
Trust-Code/trust-addons | payment_cielo/__init__.py | 1 | 1595 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from . import models
from . import controllers
| agpl-3.0 | 1,203,072,121,613,529,600 | 65.458333 | 79 | 0.386207 | false |
mrphlip/lrrbot | common/pubsub.py | 2 | 5758 | import aiohttp
import asyncio
import blinker
import uuid
import logging
import sqlalchemy
import json
import random
from common import http
from common import utils
from common.config import config
__all__ = ["PubSub", "signals"]
signals = blinker.Namespace()
log = logging.getLogger('common.pubsub')
class Topic:
def __init__(self, as_user):
self.as_user = as_user
self.refcount = 1
class PubSub:
def __init__(self, engine, metadata, loop):
self.engine = engine
self.metadata = metadata
self.loop = loop
self.topics = {}
self.task = None
self.stream = None
self.ping_task = None
self.disconnect_task = None
def _token_for(self, user):
users = self.metadata.tables["users"]
with self.engine.begin() as conn:
row = conn.execute(sqlalchemy.select([users.c.twitch_oauth]).where(users.c.name == user)).first()
if row is not None:
return row[0]
raise Exception("User %r not found" % user)
async def _send(self, message):
log.debug("Sending: %r", message)
await self.stream.send_json(message)
async def _listen(self, topics, user):
message_id = uuid.uuid4().hex
log.debug("Listening for topics %r as %r, message %s", topics, user, message_id)
await self._send({
'type': "LISTEN",
'nonce': message_id,
'data': {
'topics': topics,
'auth_token': self._token_for(user),
}
})
async def _unlisten(self, topics):
message_id = uuid.uuid4().hex
log.debug("Unlistening topics %r, message %s", topics, message_id)
await self._send({
'type': 'UNLISTEN',
'nonce': message_id,
'data': {
'topics': topics,
}
})
def subscribe(self, topics, as_user=None):
if as_user is None:
as_user = config['username']
new_topics = []
for topic in topics:
if topic not in self.topics:
self.topics[topic] = Topic(as_user)
new_topics.append(topic)
else:
if self.topics[topic].as_user != as_user:
raise Exception("Already listening for %r as %r", topic, self.topics[topic].as_user)
self.topics[topic].refcount += 1
if len(new_topics) > 0:
if self.stream is not None:
self.loop.run_until_complete(self._listen(new_topics, as_user))
elif self.task is None:
self.task = asyncio.ensure_future(self.message_pump(), loop=self.loop)
def unsubscribe(self, topics):
orphan_topics = []
for topic in topics:
self.topics[topic].refcount -= 1
if self.topics[topic].refcount <= 0:
del self.topics[topic]
orphan_topics.append(topic)
if len(orphan_topics) > 0 and self.stream is not None:
self.loop.run_until_complete(self._unlisten(orphan_topics))
def close(self):
if self.task is not None:
self.task.cancel()
if self.ping_task is not None:
self.ping_task.cancel()
if self.disconnect_task is not None:
self.disconnect_task.cancel()
async def _ping(self):
timeout = 5 * 60
while True:
next_timeout = random.gauss(3 * timeout / 4, timeout / 8)
next_timeout = max(1, min(next_timeout, timeout))
log.debug("Sending a PING in %f seconds", next_timeout)
await asyncio.sleep(next_timeout)
log.debug("Sending a PING.")
await self._send({
'type': 'PING',
})
self.disconnect_task = asyncio.ensure_future(self._disconnect(), loop=self.loop)
self.disconnect_task.add_done_callback(utils.check_exception)
async def _disconnect(self):
try:
await asyncio.sleep(10)
log.debug("Disconnecting due to missed PONG.")
if self.stream is not None:
await self.stream.close()
self.disconnect_task = None
except asyncio.CancelledError:
return
async def message_pump(self):
next_timeout = 1
error = False
while True:
try:
log.debug("Connecting to wss://pubsub-edge.twitch.tv")
async with http.http_request_session.ws_connect("wss://pubsub-edge.twitch.tv") as pubsub:
log.debug("Connected to wss://pubsub-edge.twitch.tv")
self.stream = pubsub
self.ping_task = asyncio.ensure_future(self._ping(), loop=self.loop)
self.ping_task.add_done_callback(utils.check_exception)
# TODO: coalesce topics
for_user = {}
for topic, data in self.topics.items():
for_user.setdefault(data.as_user, []).append(topic)
for user, topics in for_user.items():
await self._listen(topics, user)
async for message in pubsub:
if message.type == aiohttp.WSMsgType.TEXT:
next_timeout = 1
msg = json.loads(message.data)
log.debug("New message: %r", msg)
if msg['type'] == 'RESPONSE':
if msg['error']:
log.error("Error in response to message %s: %s", msg['nonce'], msg['error'])
elif msg['type'] == 'MESSAGE':
signals.signal(msg['data']['topic']).send(self, message=json.loads(msg['data']['message']))
elif msg['type'] == 'RECONNECT':
await pubsub.close()
error = False
break
elif msg['type'] == 'PONG':
log.debug("Received a PONG")
self.disconnect_task.cancel()
self.disconnect_task = None
elif message.type == aiohttp.WSMsgType.CLOSED:
error = True
break
elif message.type == aiohttp.WSMsgType.ERROR:
raise Exception("Error reading message") from pubsub.exception()
except utils.PASSTHROUGH_EXCEPTIONS:
raise
except Exception:
log.exception("Exception in PubSub message task")
error = True
finally:
if self.ping_task is not None:
self.ping_task.cancel()
self.ping_task = None
if self.disconnect_task is not None:
self.disconnect_task.cancel()
self.disconnect_task = None
self.stream = None
jitter = random.gauss(0, next_timeout / 4)
jitter = max(-next_timeout, min(jitter, next_timeout))
await asyncio.sleep(max(1, next_timeout + jitter))
if error:
next_timeout = min(next_timeout * 2, 120)
| apache-2.0 | -4,721,509,091,613,444,000 | 28.377551 | 100 | 0.657867 | false |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/keyvault/security_domain/byte_shares.py | 3 | 2245 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import array
from azure.cli.command_modules.keyvault.security_domain.mod_math import ModMath
class Share:
def __init__(self, x, v):
self.x = x
self.v = v
@staticmethod
def from_uint16(w):
x = w >> 9
v = w & 0x1ff
return Share(x, v)
def to_uint16(self):
return (self.x << 8) | self.v
class ByteShares:
def __init__(self, required, secret_byte):
self.coefficients = ByteShares.init_coefficients(required, secret_byte)
@staticmethod
def init_coefficients(required, secret_byte):
coefficients = array.array('H')
for _ in range(required - 1):
coefficients.append(ModMath.get_random())
coefficients.append(secret_byte)
return coefficients
def set_secret_byte(self, secret_byte):
self.coefficients[-1] = secret_byte
def make_share(self, x):
v = ModMath.multiply(self.coefficients[0], x)
v = ModMath.add(v, self.coefficients[1])
for i in range(2, len(self.coefficients)):
v = ModMath.multiply(v, x)
v = ModMath.add(v, self.coefficients[i])
return Share(x, v)
@staticmethod
def get_secret(shares, required):
secret = 0
for i in range(required):
numerator = denominator = 1
si = Share.from_uint16(shares[i])
for j in range(required):
if i == j:
continue
sj = Share.from_uint16(shares[j])
numerator = ModMath.multiply(numerator, sj.x)
diff = ModMath.subtract(sj.x, si.x)
denominator = ModMath.multiply(diff, denominator)
invert = ModMath.invert(denominator)
ci = ModMath.multiply(numerator, invert)
tmp = ModMath.multiply(ci, si.v)
secret = ModMath.add(secret, tmp)
return secret
| mit | 4,014,143,641,700,083,000 | 31.536232 | 94 | 0.53853 | false |
tersmitten/ansible | test/units/modules/network/f5/test_bigip_device_sshd.py | 16 | 3767 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_sshd import ApiParameters
from library.modules.bigip_device_sshd import ModuleParameters
from library.modules.bigip_device_sshd import ModuleManager
from library.modules.bigip_device_sshd import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_sshd import ApiParameters
from ansible.modules.network.f5.bigip_device_sshd import ModuleParameters
from ansible.modules.network.f5.bigip_device_sshd import ModuleManager
from ansible.modules.network.f5.bigip_device_sshd import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
allow=['all'],
banner='enabled',
banner_text='asdf',
inactivity_timeout='100',
log_level='debug',
login='enabled',
port=1010,
)
p = ModuleParameters(params=args)
assert p.allow == ['all']
assert p.banner == 'enabled'
assert p.banner_text == 'asdf'
assert p.inactivity_timeout == 100
assert p.log_level == 'debug'
assert p.login == 'enabled'
assert p.port == 1010
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update_settings(self, *args):
set_module_args(dict(
allow=['all'],
banner='enabled',
banner_text='asdf',
inactivity_timeout='100',
log_level='debug',
login='enabled',
port=1010,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allow=['172.27.1.1']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['allow'] == ['all']
| gpl-3.0 | 3,368,598,239,624,918,500 | 27.976923 | 91 | 0.62729 | false |
lukeiwanski/tensorflow-opencl | tensorflow/python/kernel_tests/scalar_strict_test.py | 23 | 4922 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for scalar strictness and scalar leniency."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import control_imports
from tensorflow.python.platform import test
class ScalarStrictTest(test.TestCase):
def check(self, op, args, error, correct=None):
# Within Google, the switch to scalar strict occurred at version 6.
if control_imports.USE_OSS:
lenient = []
strict = [5, 6]
else:
lenient = [5]
strict = [6]
# Use placeholders to bypass shape inference, since only the C++
# GraphDef level is ever scalar lenient.
def placeholders(args, feed):
if isinstance(args, tuple):
return [placeholders(x, feed) for x in args]
else:
x = ops.convert_to_tensor(args).eval()
fake = array_ops.placeholder(np.asarray(x).dtype)
feed[fake] = x
return fake
# Test various GraphDef versions
for version in strict + lenient:
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = version
with self.test_session(graph=g) as sess:
feed = {}
xs = placeholders(args, feed)
x = op(*xs)
if version in strict:
with self.assertRaisesOpError(error):
sess.run(x, feed_dict=feed)
else:
r = sess.run(x, feed_dict=feed)
if correct is not None:
self.assertAllEqual(r, correct)
def testConcat(self):
self.check(array_ops.concat, (([2], [3], [7]), [0]),
'axis tensor should be a scalar integer', [2, 3, 7])
for data in (2, 3, 7), (2, [3], 7), (2, 3, [7]):
self.check(array_ops.concat, (data, 0),
r'Expected \w+ dimensions in the range \[0, 0\)', [2, 3, 7])
for data in ([2], 3, 7), ([2], [3], 7):
self.check(array_ops.concat, (data, 0),
r'Ranks of all input tensors should match', [2, 3, 7])
def testFill(self):
self.check(array_ops.fill, (2, 3), 'dims must be a vector', [3, 3])
self.check(array_ops.fill, ([2], [3]), 'value must be a scalar', [3, 3])
def testPad(self):
self.check(array_ops.pad, (7, [[1, 2]]),
'The first dimension of paddings must be the rank of inputs',
[0, 7, 0, 0])
def testRandom(self):
self.check(random_ops.random_uniform, (3,), 'shape must be a vector')
def testReshape(self):
self.check(array_ops.reshape, (7, 1), 'sizes input must be 1-D', [7])
def testShardedFilename(self):
self.check(gen_io_ops._sharded_filename, ('foo', 4, [100]),
'must be a scalar', b'foo-00004-of-00100')
def testShardedFilespec(self):
self.check(gen_io_ops._sharded_filespec, ('foo', [100]), 'must be a scalar',
b'foo-?????-of-00100')
def testUnsortedSegmentSum(self):
self.check(math_ops.unsorted_segment_sum, (7, 1, [4]),
'num_segments should be a scalar', [0, 7, 0, 0])
def testRange(self):
self.check(math_ops.range, ([0], 3, 2), 'start must be a scalar', [0, 2])
self.check(math_ops.range, (0, [3], 2), 'limit must be a scalar', [0, 2])
self.check(math_ops.range, (0, 3, [2]), 'delta must be a scalar', [0, 2])
def testSlice(self):
data = np.arange(10)
error = 'Expected begin and size arguments to be 1-D tensors'
self.check(array_ops.slice, (data, 2, 3), error, [2, 3, 4])
self.check(array_ops.slice, (data, [2], 3), error, [2, 3, 4])
self.check(array_ops.slice, (data, 2, [3]), error, [2, 3, 4])
def testSparseToDense(self):
self.check(sparse_ops.sparse_to_dense, (1, 4, 7),
'output_shape should be a vector', [0, 7, 0, 0])
def testTile(self):
self.check(array_ops.tile, ([7], 2), 'Expected multiples to be 1-D', [7, 7])
if __name__ == '__main__':
test.main()
| apache-2.0 | -5,330,028,995,674,322,000 | 36.861538 | 80 | 0.617838 | false |
drpeteb/scipy | scipy/io/tests/test_mmio.py | 29 | 14542 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from tempfile import mkdtemp, mktemp
import os
import shutil
from numpy import array,transpose, pi
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_array_almost_equal, rand)
import scipy.sparse
from scipy.io.mmio import mminfo, mmread, mmwrite
class TestMMIOArray(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def check(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(a, b)
def test_simple_integer(self):
self.check([[1, 2], [3, 4]],
(2, 2, 4, 'array', 'integer', 'general'))
def test_simple_upper_triangle_integer(self):
self.check([[0, 1], [0, 0]],
(2, 2, 4, 'array', 'integer', 'general'))
def test_simple_lower_triangle_integer(self):
self.check([[0, 0], [1, 0]],
(2, 2, 4, 'array', 'integer', 'general'))
def test_simple_rectangular_integer(self):
self.check([[1, 2, 3], [4, 5, 6]],
(2, 3, 6, 'array', 'integer', 'general'))
def test_simple_rectangular_float(self):
self.check([[1, 2], [3.5, 4], [5, 6]],
(3, 2, 6, 'array', 'real', 'general'))
def test_simple_float(self):
self.check([[1, 2], [3, 4.0]],
(2, 2, 4, 'array', 'real', 'general'))
def test_simple_complex(self):
self.check([[1, 2], [3, 4j]],
(2, 2, 4, 'array', 'complex', 'general'))
def test_simple_symmetric_integer(self):
self.check([[1, 2], [2, 4]],
(2, 2, 4, 'array', 'integer', 'symmetric'))
def test_simple_skew_symmetric_integer(self):
self.check([[1, 2], [-2, 4]],
(2, 2, 4, 'array', 'integer', 'skew-symmetric'))
def test_simple_skew_symmetric_float(self):
self.check(array([[1, 2], [-2.0, 4]], 'f'),
(2, 2, 4, 'array', 'real', 'skew-symmetric'))
def test_simple_hermitian_complex(self):
self.check([[1, 2+3j], [2-3j, 4]],
(2, 2, 4, 'array', 'complex', 'hermitian'))
def test_random_symmetric_float(self):
sz = (20, 20)
a = rand(*sz)
a = a + transpose(a)
self.check(a, (20, 20, 400, 'array', 'real', 'symmetric'))
def test_random_rectangular_float(self):
sz = (20, 15)
a = rand(*sz)
self.check(a, (20, 15, 300, 'array', 'real', 'general'))
class TestMMIOSparseCSR(TestMMIOArray):
def setUp(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def check(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(a.todense(), b.todense())
def test_simple_integer(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4]]),
(2, 2, 4, 'coordinate', 'integer', 'general'))
def test_simple_upper_triangle_integer(self):
self.check(scipy.sparse.csr_matrix([[0, 1], [0, 0]]),
(2, 2, 1, 'coordinate', 'integer', 'general'))
def test_simple_lower_triangle_integer(self):
self.check(scipy.sparse.csr_matrix([[0, 0], [1, 0]]),
(2, 2, 1, 'coordinate', 'integer', 'general'))
def test_simple_rectangular_integer(self):
self.check(scipy.sparse.csr_matrix([[1, 2, 3], [4, 5, 6]]),
(2, 3, 6, 'coordinate', 'integer', 'general'))
def test_simple_rectangular_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3.5, 4], [5, 6]]),
(3, 2, 6, 'coordinate', 'real', 'general'))
def test_simple_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4.0]]),
(2, 2, 4, 'coordinate', 'real', 'general'))
def test_simple_complex(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4j]]),
(2, 2, 4, 'coordinate', 'complex', 'general'))
def test_simple_symmetric_integer(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [2, 4]]),
(2, 2, 3, 'coordinate', 'integer', 'symmetric'))
def test_simple_skew_symmetric_integer(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [-2, 4]]),
(2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'))
def test_simple_skew_symmetric_float(self):
self.check(scipy.sparse.csr_matrix(array([[1, 2], [-2.0, 4]], 'f')),
(2, 2, 3, 'coordinate', 'real', 'skew-symmetric'))
def test_simple_hermitian_complex(self):
self.check(scipy.sparse.csr_matrix([[1, 2+3j], [2-3j, 4]]),
(2, 2, 3, 'coordinate', 'complex', 'hermitian'))
def test_random_symmetric_float(self):
sz = (20, 20)
a = rand(*sz)
a = a + transpose(a)
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 20, 210, 'coordinate', 'real', 'symmetric'))
def test_random_rectangular_float(self):
sz = (20, 15)
a = rand(*sz)
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 15, 300, 'coordinate', 'real', 'general'))
_general_example = '''\
%%MatrixMarket matrix coordinate real general
%=================================================================================
%
% This ASCII file represents a sparse MxN matrix with L
% nonzeros in the following Matrix Market format:
%
% +----------------------------------------------+
% |%%MatrixMarket matrix coordinate real general | <--- header line
% |% | <--+
% |% comments | |-- 0 or more comment lines
% |% | <--+
% | M N L | <--- rows, columns, entries
% | I1 J1 A(I1, J1) | <--+
% | I2 J2 A(I2, J2) | |
% | I3 J3 A(I3, J3) | |-- L lines
% | . . . | |
% | IL JL A(IL, JL) | <--+
% +----------------------------------------------+
%
% Indices are 1-based, i.e. A(1,1) is the first element.
%
%=================================================================================
5 5 8
1 1 1.000e+00
2 2 1.050e+01
3 3 1.500e-02
1 4 6.000e+00
4 2 2.505e+02
4 4 -2.800e+02
4 5 3.332e+01
5 5 1.200e+01
'''
_hermitian_example = '''\
%%MatrixMarket matrix coordinate complex hermitian
5 5 7
1 1 1.0 0
2 2 10.5 0
4 2 250.5 22.22
3 3 1.5e-2 0
4 4 -2.8e2 0
5 5 12. 0
5 4 0 33.32
'''
_skew_example = '''\
%%MatrixMarket matrix coordinate real skew-symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 0
'''
_symmetric_example = '''\
%%MatrixMarket matrix coordinate real symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 8
'''
_symmetric_pattern_example = '''\
%%MatrixMarket matrix coordinate pattern symmetric
5 5 7
1 1
2 2
4 2
3 3
4 4
5 5
5 4
'''
class TestMMIOCoordinate(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def check_read(self, example, a, info):
f = open(self.fn, 'w')
f.write(example)
f.close()
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_read_general(self):
a = [[1, 0, 0, 6, 0],
[0, 10.5, 0, 0, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 33.32],
[0, 0, 0, 0, 12]]
self.check_read(_general_example, a,
(5, 5, 8, 'coordinate', 'real', 'general'))
def test_read_hermitian(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5 - 22.22j, 0],
[0, 0, .015, 0, 0],
[0, 250.5 + 22.22j, 0, -280, -33.32j],
[0, 0, 0, 33.32j, 12]]
self.check_read(_hermitian_example, a,
(5, 5, 7, 'coordinate', 'complex', 'hermitian'))
def test_read_skew(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, -250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 0],
[0, 0, 0, 0, 12]]
self.check_read(_skew_example, a,
(5, 5, 7, 'coordinate', 'real', 'skew-symmetric'))
def test_read_symmetric(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 8],
[0, 0, 0, 8, 12]]
self.check_read(_symmetric_example, a,
(5, 5, 7, 'coordinate', 'real', 'symmetric'))
def test_read_symmetric_pattern(self):
a = [[1, 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 0, 1, 1]]
self.check_read(_symmetric_pattern_example, a,
(5, 5, 7, 'coordinate', 'pattern', 'symmetric'))
def test_empty_write_read(self):
# http://projects.scipy.org/scipy/ticket/883
b = scipy.sparse.coo_matrix((10, 10))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(10, 10, 0, 'coordinate', 'real', 'symmetric'))
a = b.todense()
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_bzip2_py3(self):
# test if fix for #2152 works
try:
# bz2 module isn't always built when building Python.
import bz2
except:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
fn_bzip2 = "%s.bz2" % self.fn
with open(self.fn, 'rb') as f_in:
f_out = bz2.BZ2File(fn_bzip2, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_bzip2).todense()
assert_array_almost_equal(a, b.todense())
def test_gzip_py3(self):
# test if fix for #2152 works
try:
# gzip module can be missing from Python installation
import gzip
except:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
fn_gzip = "%s.gz" % self.fn
with open(self.fn, 'rb') as f_in:
f_out = gzip.open(fn_gzip, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_gzip).todense()
assert_array_almost_equal(a, b.todense())
def test_real_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(5, 5, 8, 'coordinate', 'real', 'general'))
a = b.todense()
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_complex_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(5, 5, 8, 'coordinate', 'complex', 'general'))
a = b.todense()
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_sparse_formats(self):
mats = []
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
for mat in mats:
expected = mat.todense()
for fmt in ['csr', 'csc', 'coo']:
fn = mktemp(dir=self.tmpdir) # safe, we own tmpdir
mmwrite(fn, mat.asformat(fmt))
result = mmread(fn).todense()
assert_array_almost_equal(result, expected)
def test_precision(self):
test_values = [pi] + [10**(i) for i in range(0, -10, -1)]
test_precisions = range(1, 10)
for value in test_values:
for precision in test_precisions:
# construct sparse matrix with test value at last main diagonal
n = 10**precision + 1
A = scipy.sparse.dok_matrix((n, n))
A[n-1, n-1] = value
# write matrix with test precision and read again
mmwrite(self.fn, A, precision=precision)
A = scipy.io.mmread(self.fn)
# check for right entries in matrix
assert_array_equal(A.row, [n-1])
assert_array_equal(A.col, [n-1])
assert_array_almost_equal(A.data,
[float('%%.%dg' % precision % value)])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -1,825,421,499,409,047,800 | 32.353211 | 82 | 0.46933 | false |
saghul/erequests | erequests.py | 1 | 5386 | # -*- coding: utf-8 -*-
"""
erequests
~~~~~~~~~
This module contains an asynchronous replica of ``requests.api``, powered
by eventlet.
"""
__version__ = '0.4.1'
import eventlet
# Monkey-patch.
requests = eventlet.patcher.import_patched('requests.__init__')
__all__ = ['__version__', 'map', 'imap', 'get', 'options', 'head', 'post', 'put', 'patch', 'delete', 'request', 'AsyncRequest']
# Export same items as vanilla requests
__requests_imports__ = ['utils', 'session', 'Session', 'codes', 'RequestException', 'Timeout', 'URLRequired', 'TooManyRedirects', 'HTTPError', 'ConnectionError']
eventlet.patcher.slurp_properties(requests, globals(), srckeys=__requests_imports__)
__all__.extend(__requests_imports__)
del requests, __requests_imports__
class AsyncRequest(object):
""" Asynchronous request.
Accept same parameters as ``Session.request`` and some additional:
:param session: Session which will do request
:param callback: Callback called on response. Same as passing ``hooks={'response': callback}``
"""
def __init__(self, method, url, session=None):
self.method = method
self.url = url
self.session = session or Session()
self._prepared_kwargs = None
def prepare(self, **kwargs):
assert self._prepared_kwargs is None, 'cannot call prepare multiple times'
self._prepared_kwargs = kwargs
def send(self, **kwargs):
kw = self._prepared_kwargs or {}
kw.update(kwargs)
return self.session.request(self.method, self.url, **kw)
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.method)
class AsyncRequestFactory(object):
""" Factory for AsyncRequests. Serious business yo!
"""
request_cls = AsyncRequest
@classmethod
def request(cls, method, url, **kwargs):
session = kwargs.pop('session', None)
r = cls.request_cls(method, url, session)
r.prepare(**kwargs)
return r
@classmethod
def get(cls, url, **kwargs):
kwargs.setdefault('allow_redirects', True)
return cls.request('GET', url, **kwargs)
@classmethod
def options(cls, url, **kwargs):
kwargs.setdefault('allow_redirects', True)
return cls.request('OPTIONS', url, **kwargs)
@classmethod
def head(cls, url, **kwargs):
kwargs.setdefault('allow_redirects', False)
return cls.request('HEAD', url, **kwargs)
@classmethod
def post(cls, url, data=None, **kwargs):
return cls.request('POST', url, data=data, **kwargs)
@classmethod
def put(cls, url, data=None, **kwargs):
return cls.request('PUT', url, data=data, **kwargs)
@classmethod
def patch(cls, url, data=None, **kwargs):
return cls.request('PATCH', url, data=data, **kwargs)
@classmethod
def delete(cls, url, **kwargs):
return cls.request('DELETE', url, **kwargs)
def request(method, url, **kwargs):
req = AsyncRequest(method, url)
return eventlet.spawn(req.send, **kwargs).wait()
def get(url, **kwargs):
kwargs.setdefault('allow_redirects', True)
return request('GET', url, **kwargs)
def options(url, **kwargs):
kwargs.setdefault('allow_redirects', True)
return request('OPTIONS', url, **kwargs)
def head(url, **kwargs):
kwargs.setdefault('allow_redirects', False)
return request('HEAD', url, **kwargs)
def post(url, data=None, **kwargs):
return request('POST', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
return request('PUT', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
return request('PATCH', url, data=data, **kwargs)
def delete(url, **kwargs):
return request('DELETE', url, **kwargs)
def request_async(*args, **kwargs): return AsyncRequestFactory.request(*args, **kwargs)
def get_async(*args, **kwargs): return AsyncRequestFactory.get(*args, **kwargs)
def options_async(*args, **kwargs): return AsyncRequestFactory.options(*args, **kwargs)
def head_async(*args, **kwargs): return AsyncRequestFactory.head(*args, **kwargs)
def post_async(*args, **kwargs): return AsyncRequestFactory.post(*args, **kwargs)
def put_async(*args, **kwargs): return AsyncRequestFactory.put(*args, **kwargs)
def patch_async(*args, **kwargs): return AsyncRequestFactory.patch(*args, **kwargs)
def delete_async(*args, **kwargs): return AsyncRequestFactory.delete(*args, **kwargs)
def map(requests, size=10):
"""Concurrently converts a sequence of AsyncRequest objects to Responses.
:param requests: a collection of Request objects.
:param size: Specifies the number of requests to make at a time, defaults to 10.
"""
def send(req):
try:
return req.send()
except Exception as e:
return e
pool = eventlet.GreenPool(size)
jobs = [pool.spawn(send, r) for r in requests]
for j in jobs:
yield j.wait()
def imap(requests, size=10):
"""Concurrently converts a sequence of AsyncRequest objects to Responses.
:param requests: a generator of Request objects.
:param size: Specifies the number of requests to make at a time. defaults to 10.
"""
pool = eventlet.GreenPool(size)
def send(r):
try:
return r.send()
except Exception as e:
return e
for r in pool.imap(send, requests):
yield r
| bsd-2-clause | -6,429,521,311,120,439,000 | 27.347368 | 161 | 0.644449 | false |
humangeo/preflyt | preflyt/checkers/environment.py | 1 | 1105 | """Checks for environment variables"""
import os
from preflyt.base import BaseChecker
class EnvironmentChecker(BaseChecker):
"""Verify that an environment variable is present and, if so, it has a specific value."""
checker_name = "env"
def __init__(self, name, value=None):
"""Initialize the checker
:param name: The name of the environment variable to check
:param value: The optional value of the variable.
"""
super().__init__()
self._name = name
self._value = value
def check(self):
val = os.getenv(self._name)
if val is None:
return False, "The environment variable '{}' is not defined".format(self._name)
elif self._value is not None:
if self._value == val:
return True, "The environment variable '{}' is defined with the correct value.".format(self._name)
return False, "The environment variable '{}' is defined with the incorrect value.".format(self._name)
return True, "The environment variable '{}' is defined.".format(self._name)
| mit | 498,212,279,542,649,500 | 34.645161 | 114 | 0.625339 | false |
ChyauAng/DNN-Composer | src/Preprocess/filesProcessor.py | 1 | 1170 | from Preprocess import dataPreprocess
class FilesProcess:
"""
Gather all ABC files to dataset.dat
Properties
----------
dir_path
The path of the directory.
data
The ABC files information.
"""
def __init__(self, dir_path, data = []):
self.dir_path = dir_path
self.data = data
def main(self, file_name):
data = []
test = dataPreprocess.ABCPreprocess(self.dir_path, self.data)
test.processFolder(file_name)
def plusEnding(self, file_name):
"""
Preprocess the 24th dimension of pitch and duration.
"""
with open(file_name, 'r') as f:
lines = f.readlines()
data = []
i = 0
for line in lines:
if line[0 : 2] == 'X:' and i != 0:
data += '|#ending|\n'
data += line
i = i + 1
data += '|%ending|\n'
file_object = open(file_name, 'w')
file_object.writelines(data)
file_object.close()
| mit | 3,828,874,354,370,804,000 | 25.209302 | 69 | 0.450427 | false |
sylnsfar/qrcode | amzqr/terminal.py | 1 | 2551 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from amzqr.amzqr import run
import os
def main():
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('Words', help = 'The words to produce you QR-code picture, like a URL or a sentence. Please read the README file for the supported characters.')
argparser.add_argument('-v', '--version', type = int, choices = range(1,41), default = 1, help = 'The version means the length of a side of the QR-Code picture. From little size to large is 1 to 40.')
argparser.add_argument('-l', '--level', choices = list('LMQH'), default = 'H', help = 'Use this argument to choose an Error-Correction-Level: L(Low), M(Medium) or Q(Quartile), H(High). Otherwise, just use the default one: H')
argparser.add_argument('-p', '--picture', help = 'the picture e.g. example.jpg')
argparser.add_argument('-c', '--colorized', action = 'store_true', help = "Produce a colorized QR-Code with your picture. Just works when there is a correct '-p' or '--picture'.")
argparser.add_argument('-con', '--contrast', type = float, default = 1.0, help = 'A floating point value controlling the enhancement of contrast. Factor 1.0 always returns a copy of the original image, lower factors mean less color (brightness, contrast, etc), and higher values more. There are no restrictions on this value. Default: 1.0')
argparser.add_argument('-bri', '--brightness', type = float, default = 1.0, help = 'A floating point value controlling the enhancement of brightness. Factor 1.0 always returns a copy of the original image, lower factors mean less color (brightness, contrast, etc), and higher values more. There are no restrictions on this value. Default: 1.0')
argparser.add_argument('-n', '--name', help = "The filename of output tailed with one of {'.jpg', '.png', '.bmp', '.gif'}. eg. exampl.png")
argparser.add_argument('-d', '--directory', default = os.getcwd(), help = 'The directory of output.')
args = argparser.parse_args()
if args.picture and args.picture[-4:]=='.gif':
print('It may take a while, please wait for minutes...')
try:
ver, ecl, qr_name = run(
args.Words,
args.version,
args.level,
args.picture,
args.colorized,
args.contrast,
args.brightness,
args.name,
args.directory
)
print('Succeed! \nCheck out your', str(ver) + '-' + str(ecl), 'QR-code:', qr_name)
except:
raise | gpl-3.0 | -5,886,211,228,467,910,000 | 66.157895 | 348 | 0.649549 | false |
frigg/frigg-hq | frigg/builds/migrations/0004_auto_20141114_1336.py | 1 | 2251 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import basis.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('builds', '0003_auto_20141029_2158'),
]
operations = [
migrations.AddField(
model_name='build',
name='created_at',
field=models.DateTimeField(editable=False, default=basis.models._now),
preserve_default=True,
),
migrations.AddField(
model_name='build',
name='updated_at',
field=models.DateTimeField(editable=False, default=basis.models._now),
preserve_default=True,
),
migrations.AddField(
model_name='buildresult',
name='created_at',
field=models.DateTimeField(editable=False, default=basis.models._now),
preserve_default=True,
),
migrations.AddField(
model_name='buildresult',
name='updated_at',
field=models.DateTimeField(editable=False, default=basis.models._now),
preserve_default=True,
),
migrations.AddField(
model_name='project',
name='created_at',
field=models.DateTimeField(editable=False, default=basis.models._now),
preserve_default=True,
),
migrations.AddField(
model_name='project',
name='updated_at',
field=models.DateTimeField(editable=False, default=basis.models._now),
preserve_default=True,
),
migrations.AlterField(
model_name='build',
name='branch',
field=models.CharField(max_length=100, default='master'),
preserve_default=True,
),
migrations.AlterField(
model_name='project',
name='user',
field=models.ForeignKey(
related_name='authx1_projects',
help_text='A user with access to the repository.',
null=True,
to=settings.AUTH_USER_MODEL,
blank=True
),
preserve_default=True,
),
]
| mit | -5,956,405,167,287,960,000 | 31.157143 | 82 | 0.552643 | false |
girving/tensorflow | tensorflow/contrib/checkpoint/python/split_dependency.py | 28 | 5773 | """Utility for creating multiple dependencies with synchronized save/restore."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.checkpointable import base as checkpointable
class _CallbackSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
"""Wraps save and restore callbacks as a `SaveableObject`."""
def __init__(self, name, dtype, save_callback, restore_callback):
self._restore_callback = restore_callback
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=save_callback,
slice_spec="",
name=name,
dtype=dtype)
super(_CallbackSaveable, self).__init__(
save_callback, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return self._restore_callback(tensor)
class _SplitDependency(checkpointable.CheckpointableBase):
"""Looks like a regular variable while synchronizing save/restores."""
def __init__(self, save_buffer, restore_buffer, name, dtype, num_components,
fill_save_buffer_fn, consume_restore_buffer_fn):
self._save_buffer = save_buffer
self._restore_buffer = restore_buffer
self._name = name
self._dtype = dtype
self._num_components = num_components
self._fill_save_buffer_fn = fill_save_buffer_fn
self._consume_restore_buffer_fn = consume_restore_buffer_fn
def _save(self):
"""Pull from the shared buffer, populating it if necessary."""
if self._name not in self._save_buffer:
if self._save_buffer:
raise AssertionError(
("Split dependency %s (%s) unsynchronized. Split dependencies must "
"be saved together.") % (self._name, self))
self._fill_save_buffer_fn(self._save_buffer)
return self._save_buffer.pop(self._name)
def _restore(self, tensor):
"""Push into the shared buffer, flushing it if necessary."""
if self._name in self._restore_buffer:
raise AssertionError(
("Split dependency %s (%s) unsynchronized. Split dependencies must "
"be restored together.") % (self._name, self))
self._restore_buffer[self._name] = tensor
if len(self._restore_buffer) == self._num_components:
op = self._consume_restore_buffer_fn(self._restore_buffer)
self._restore_buffer.clear()
return op
else:
return control_flow_ops.no_op()
def _gather_saveables_for_checkpoint(self):
"""Looks to Checkpointable like a regular variable."""
return {
checkpointable.VARIABLE_VALUE_KEY:
functools.partial(_CallbackSaveable,
dtype=self._dtype,
save_callback=self._save,
restore_callback=self._restore)
}
def split_dependency(component_names, component_dtypes,
fill_save_buffer_fn, consume_restore_buffer_fn):
"""Creates multiple dependencies with a synchronized save/restore.
Useful when a single op produces `Tensor`s which should each be saved under
different objects, or when `Tensor`s saved with many different objects need to
be restored together as inputs to a single op (i.e. an object which uses a
single fused op may be swapped out for a subgraph of objects, and these two
programs are checkpoint compatible).
Args:
component_names: A sequence of names for the split
dependencies. `fill_save_buffer_fn` must add these keys to the dictionary
it is passed, and `consume_restore_buffer_fn` will receive a dictionary
with these keys.
component_dtypes: Data types for the `Tensor`s being saved and restored, a
sequence corresponding to `component_names`.
fill_save_buffer_fn: A function which takes an empty dictionary as an
argument and adds `Tensor`s with `component_names` as keys. These
`Tensor`s will be saved as if they were individual variables.
consume_restore_buffer_fn: A function which takes a dictionary with
`component_names` as keys mapping to restored individual `Tensor`s and
returns a restore op (or if executing eagerly, runs the restoration and
may return `None`).
Returns:
A dictionary mapping from names to Checkpointable objects. If one is
reachable from an object as a dependency, the others should be too; adding
dependencies on some but not all of the objects will result in errors.
"""
save_buffer = {}
restore_buffer = {}
split_dependencies = {}
for name, dtype in zip(component_names, component_dtypes):
split_dependencies[name] = _SplitDependency(
save_buffer=save_buffer,
restore_buffer=restore_buffer,
name=name,
dtype=dtype,
num_components=len(component_names),
fill_save_buffer_fn=fill_save_buffer_fn,
consume_restore_buffer_fn=consume_restore_buffer_fn)
return split_dependencies
| apache-2.0 | -5,282,227,811,205,066,000 | 41.448529 | 80 | 0.690282 | false |
developerworks/horizon | horizon/forms/views.py | 2 | 2715 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from django.views import generic
class ModalFormView(generic.TemplateView):
form_class = None
initial = {}
context_form_name = "form"
context_object_name = "object"
def get_template_names(self):
if self.request.is_ajax():
if not hasattr(self, "ajax_template_name"):
# Transform standard template name to ajax name (leading "_")
bits = list(os.path.split(self.template_name))
bits[1] = "".join(("_", bits[1]))
self.ajax_template_name = os.path.join(*bits)
template = self.ajax_template_name
else:
template = self.template_name
return template
def get_object(self, *args, **kwargs):
return None
def get_initial(self):
return self.initial
def get_form_kwargs(self):
kwargs = {'initial': self.get_initial()}
return kwargs
def maybe_handle(self):
if not self.form_class:
raise AttributeError('You must specify a SelfHandlingForm class '
'for the "form_class" attribute on %s.'
% self.__class__.__name__)
if not hasattr(self, "form"):
form = self.form_class
kwargs = self.get_form_kwargs()
self.form, self.handled = form.maybe_handle(self.request, **kwargs)
return self.form, self.handled
def get(self, request, *args, **kwargs):
self.object = self.get_object(*args, **kwargs)
form, handled = self.maybe_handle()
if handled:
return handled
context = self.get_context_data(**kwargs)
context[self.context_form_name] = form
context[self.context_object_name] = self.object
if self.request.is_ajax():
context['hide'] = True
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
""" Placeholder to allow POST; handled the same as GET. """
return self.get(self, request, *args, **kwargs)
| apache-2.0 | 4,181,724,044,338,363,400 | 35.2 | 79 | 0.60884 | false |
paulmadore/Eric-IDE | 6-6.0.9/eric/Network/IRC/IrcIdentitiesEditDialog.py | 2 | 17535 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing the identities management dialog.
"""
from __future__ import unicode_literals
import copy
from PyQt5.QtCore import pyqtSlot, Qt, QEvent, QItemSelectionModel
from PyQt5.QtWidgets import QDialog, QInputDialog, QLineEdit
from E5Gui import E5MessageBox
from .Ui_IrcIdentitiesEditDialog import Ui_IrcIdentitiesEditDialog
from .IrcNetworkManager import IrcIdentity
import Utilities
import UI.PixmapCache
class IrcIdentitiesEditDialog(QDialog, Ui_IrcIdentitiesEditDialog):
"""
Class implementing the identities management dialog.
"""
def __init__(self, manager, identityName, parent=None):
"""
Constructor
@param manager reference to the IRC network manager object
(IrcNetworkManager)
@param identityName name of the identity to be selected (string)
@param parent reference to the parent widget (QWidget)
"""
super(IrcIdentitiesEditDialog, self).__init__(parent)
self.setupUi(self)
self.addButton.setIcon(UI.PixmapCache.getIcon("plus.png"))
self.copyButton.setIcon(UI.PixmapCache.getIcon("editCopy.png"))
self.renameButton.setIcon(UI.PixmapCache.getIcon("editRename.png"))
self.deleteButton.setIcon(UI.PixmapCache.getIcon("minus.png"))
self.nicknameAddButton.setIcon(UI.PixmapCache.getIcon("plus.png"))
self.nicknameDeleteButton.setIcon(UI.PixmapCache.getIcon("minus.png"))
self.nicknameUpButton.setIcon(UI.PixmapCache.getIcon("1uparrow.png"))
self.nicknameDownButton.setIcon(
UI.PixmapCache.getIcon("1downarrow.png"))
self.__manager = manager
self.__identities = self.__manager.getIdentities()
self.__currentIdentity = None
identities = list(sorted(self.__manager.getIdentityNames()))
identities[identities.index(IrcIdentity.DefaultIdentityName)] = \
IrcIdentity.DefaultIdentityDisplay
self.identitiesCombo.addItems(identities)
if identityName == IrcIdentity.DefaultIdentityName:
identityName = IrcIdentity.DefaultIdentityDisplay
index = self.identitiesCombo.findText(identityName)
if index == -1:
index = 0
identityName = self.identitiesCombo.itemText(0)
self.identitiesCombo.setCurrentIndex(index)
self.on_identitiesCombo_currentIndexChanged(identityName)
self.nicknameEdit.installEventFilter(self)
def eventFilter(self, obj, evt):
"""
Public method to handle events for other objects.
@param obj reference to the object (QObject)
@param evt reference to the event (QEvent)
@return flag indicating that the event should be filtered out (boolean)
"""
if obj == self.nicknameEdit and evt.type() == QEvent.KeyPress:
if evt.key() in [Qt.Key_Enter, Qt.Key_Return]:
self.on_nicknameAddButton_clicked()
return True
return super(IrcIdentitiesEditDialog, self).eventFilter(obj, evt)
def __updateIdentitiesButtons(self):
"""
Private slot to update the status of the identity related buttons.
"""
enable = (self.identitiesCombo.currentText() !=
IrcIdentity.DefaultIdentityDisplay)
self.renameButton.setEnabled(enable)
self.deleteButton.setEnabled(enable)
@pyqtSlot(str)
def on_identitiesCombo_currentIndexChanged(self, identity):
"""
Private slot to handle the selection of an identity.
@param identity selected identity (string)
"""
if identity == IrcIdentity.DefaultIdentityDisplay:
identity = IrcIdentity.DefaultIdentityName
self.__updateIdentitiesButtons()
if self.__currentIdentity and not self.__checkCurrentIdentity():
return
self.__refreshCurrentIdentity()
self.__currentIdentity = self.__identities[identity]
# General Tab
self.realnameEdit.setText(self.__currentIdentity.getRealName())
self.nicknamesList.clear()
self.nicknamesList.addItems(self.__currentIdentity.getNickNames())
self.serviceEdit.setText(self.__currentIdentity.getServiceName())
self.passwordEdit.setText(self.__currentIdentity.getPassword())
# Away Tab
self.rememberPosOnAwayCheckBox.setChecked(
self.__currentIdentity.rememberAwayPosition())
self.awayEdit.setText(self.__currentIdentity.getAwayMessage())
# Advanced Tab
self.identEdit.setText(self.__currentIdentity.getIdent())
self.quitEdit.setText(self.__currentIdentity.getQuitMessage())
self.partEdit.setText(self.__currentIdentity.getPartMessage())
self.__updateIdentitiesButtons()
self.__updateNicknameUpDownButtons()
self.__updateNicknameButtons()
self.identityTabWidget.setCurrentIndex(0)
def __refreshCurrentIdentity(self):
"""
Private method to read back the data for the current identity.
"""
if self.__currentIdentity is None:
return
# General Tab
self.__currentIdentity.setRealName(self.realnameEdit.text())
self.__currentIdentity.setNickNames(
[self.nicknamesList.item(row).text()
for row in range(self.nicknamesList.count())])
self.__currentIdentity.setServiceName(self.serviceEdit.text())
self.__currentIdentity.setPassword(self.passwordEdit.text())
# Away Tab
self.__currentIdentity.setRememberAwayPosition(
self.rememberPosOnAwayCheckBox.isChecked())
self.__currentIdentity.setAwayMessage(self.awayEdit.text())
# Advanced Tab
self.__currentIdentity.setIdent(self.identEdit.text())
self.__currentIdentity.setQuitMessage(self.quitEdit.text())
self.__currentIdentity.setPartMessage(self.partEdit.text())
def __checkCurrentIdentity(self):
"""
Private method to check the data for the current identity.
@return flag indicating a successful check (boolean)
"""
if self.nicknamesList.count() == 0:
E5MessageBox.critical(
self,
self.tr("Edit Identity"),
self.tr(
"""The identity must contain at least one nick name."""))
block = self.identitiesCombo.blockSignals(True)
identity = self.__currentIdentity.getName()
if identity == IrcIdentity.DefaultIdentityName:
identity = IrcIdentity.DefaultIdentityDisplay
self.identitiesCombo.setCurrentIndex(
self.identitiesCombo.findText(identity))
self.identitiesCombo.blockSignals(block)
self.identityTabWidget.setCurrentIndex(0)
self.nicknameEdit.setFocus()
return False
if not self.realnameEdit.text():
E5MessageBox.critical(
self,
self.tr("Edit Identity"),
self.tr("""The identity must have a real name."""))
block = self.identitiesCombo.blockSignals(True)
identity = self.__currentIdentity.getName()
if identity == IrcIdentity.DefaultIdentityName:
identity = IrcIdentity.DefaultIdentityDisplay
self.identitiesCombo.setCurrentIndex(
self.identitiesCombo.findText(identity))
self.identitiesCombo.blockSignals(block)
self.identityTabWidget.setCurrentIndex(0)
self.realnameEdit.setFocus()
return False
return True
@pyqtSlot()
def on_addButton_clicked(self):
"""
Private slot to add a new idntity.
"""
name, ok = QInputDialog.getText(
self,
self.tr("Add Identity"),
self.tr("Identity Name:"),
QLineEdit.Normal)
if ok:
if name:
if name in self.__identities:
E5MessageBox.critical(
self,
self.tr("Add Identity"),
self.tr(
"""An identity named <b>{0}</b> already exists."""
""" You must provide a different name.""").format(
name))
self.on_addButton_clicked()
else:
identity = IrcIdentity(name)
identity.setIdent(Utilities.getUserName())
identity.setRealName(Utilities.getRealName())
self.__identities[name] = identity
self.identitiesCombo.addItem(name)
self.identitiesCombo.setCurrentIndex(
self.identitiesCombo.count() - 1)
else:
E5MessageBox.critical(
self,
self.tr("Add Identity"),
self.tr("""The identity has to have a name."""))
self.on_addButton_clicked()
@pyqtSlot()
def on_copyButton_clicked(self):
"""
Private slot to copy the selected identity.
"""
currentIdentity = self.identitiesCombo.currentText()
name, ok = QInputDialog.getText(
self,
self.tr("Copy Identity"),
self.tr("Identity Name:"),
QLineEdit.Normal,
currentIdentity)
if ok:
if name:
if name in self.__identities:
E5MessageBox.critical(
self,
self.tr("Copy Identity"),
self.tr(
"""An identity named <b>{0}</b> already exists."""
""" You must provide a different name.""").format(
name))
self.on_copyButton_clicked()
else:
identity = copy.deepcopy(self.__currentIdentity)
identity.setName(name)
self.__identities[name] = identity
self.identitiesCombo.addItem(name)
self.identitiesCombo.setCurrentIndex(
self.identitiesCombo.count() - 1)
else:
E5MessageBox.critical(
self,
self.tr("Copy Identity"),
self.tr("""The identity has to have a name."""))
self.on_copyButton_clicked()
@pyqtSlot()
def on_renameButton_clicked(self):
"""
Private slot to rename the selected identity.
"""
currentIdentity = self.identitiesCombo.currentText()
name, ok = QInputDialog.getText(
self,
self.tr("Rename Identity"),
self.tr("Identity Name:"),
QLineEdit.Normal,
currentIdentity)
if ok and name != currentIdentity:
if name:
if name in self.__identities:
E5MessageBox.critical(
self,
self.tr("Rename Identity"),
self.tr(
"""An identity named <b>{0}</b> already exists."""
""" You must provide a different name.""").format(
name))
self.on_renameButton_clicked()
else:
del self.__identities[currentIdentity]
self.__currentIdentity.setName(name)
self.__identities[name] = self.__currentIdentity
self.identitiesCombo.setItemText(
self.identitiesCombo.currentIndex(), name)
else:
E5MessageBox.critical(
self,
self.tr("Copy Identity"),
self.tr("""The identity has to have a name."""))
self.on_renameButton_clicked()
@pyqtSlot()
def on_deleteButton_clicked(self):
"""
Private slot to rename the selected identity.
"""
currentIdentity = self.identitiesCombo.currentText()
if currentIdentity == IrcIdentity.DefaultIdentityDisplay:
return
inUse = False
for networkName in self.__manager.getNetworkNames():
inUse = (
self.__manager.getNetwork(networkName).getIdentityName() ==
currentIdentity)
if inUse:
break
if inUse:
msg = self.tr(
"""This identity is in use. If you remove it, the network"""
""" settings using it will fall back to the default"""
""" identity. Should it be deleted anyway?""")
else:
msg = self.tr(
"""Do you really want to delete all information for"""
""" this identity?""")
res = E5MessageBox.yesNo(
self,
self.tr("Delete Identity"),
msg,
icon=E5MessageBox.Warning)
if res:
del self.__identities[currentIdentity]
self.identitiesCombo.removeItem(
self.identitiesCombo.findText(currentIdentity))
def __updateNicknameUpDownButtons(self):
"""
Private method to set the enabled state of the nick name up and
down buttons.
"""
if len(self.nicknamesList.selectedItems()) == 0:
self.nicknameUpButton.setEnabled(False)
self.nicknameDownButton.setEnabled(False)
else:
if self.nicknamesList.currentRow() == 0:
self.nicknameUpButton.setEnabled(False)
self.nicknameDownButton.setEnabled(True)
elif self.nicknamesList.currentRow() == \
self.nicknamesList.count() - 1:
self.nicknameUpButton.setEnabled(True)
self.nicknameDownButton.setEnabled(False)
else:
self.nicknameUpButton.setEnabled(True)
self.nicknameDownButton.setEnabled(True)
def __updateNicknameButtons(self):
"""
Private slot to update the nick name buttons except the up and
down buttons.
"""
self.nicknameDeleteButton.setEnabled(
len(self.nicknamesList.selectedItems()) != 0)
self.nicknameAddButton.setEnabled(self.nicknameEdit.text() != "")
@pyqtSlot(str)
def on_nicknameEdit_textEdited(self, nick):
"""
Private slot handling a change of the nick name.
@param nick new nick name (string)
"""
sel = self.nicknamesList.selectedItems()
if sel:
sel[0].setText(nick)
self.__updateNicknameButtons()
@pyqtSlot()
def on_nicknamesList_itemSelectionChanged(self):
"""
Private slot handling the selection of a nick name.
"""
items = self.nicknamesList.selectedItems()
if items:
self.nicknameEdit.setText(items[0].text())
self.__updateNicknameUpDownButtons()
self.__updateNicknameButtons()
self.nicknameEdit.setFocus()
@pyqtSlot()
def on_nicknameAddButton_clicked(self):
"""
Private slot to add a new nickname.
"""
nick = self.nicknameEdit.text()
if nick not in [self.nicknamesList.item(row).text()
for row in range(self.nicknamesList.count())]:
self.nicknamesList.insertItem(0, nick)
self.nicknamesList.setCurrentRow(0, QItemSelectionModel.Clear)
self.nicknameEdit.clear()
self.__updateNicknameButtons()
@pyqtSlot()
def on_nicknameDeleteButton_clicked(self):
"""
Private slot to delete a nick name.
"""
itm = self.nicknamesList.takeItem(self.nicknamesList.currentRow())
del itm
self.__updateNicknameButtons()
@pyqtSlot()
def on_nicknameUpButton_clicked(self):
"""
Private slot to move the selected entry up one row.
"""
row = self.nicknamesList.currentRow()
if row > 0:
itm = self.nicknamesList.takeItem(row)
row -= 1
self.nicknamesList.insertItem(row, itm)
self.nicknamesList.setCurrentItem(itm)
@pyqtSlot()
def on_nicknameDownButton_clicked(self):
"""
Private slot to move the selected entry down one row.
"""
row = self.nicknamesList.currentRow()
if row < self.nicknamesList.count() - 1:
itm = self.nicknamesList.takeItem(row)
row += 1
self.nicknamesList.insertItem(row, itm)
self.nicknamesList.setCurrentItem(itm)
def accept(self):
"""
Public slot handling the acceptance of the dialog.
"""
if not self.__checkCurrentIdentity():
return
self.__refreshCurrentIdentity()
self.__manager.setIdentities(self.__identities)
super(IrcIdentitiesEditDialog, self).accept()
| gpl-3.0 | -3,774,107,163,037,571,000 | 36.628755 | 79 | 0.570117 | false |
attente/snapcraft | snapcraft/tests/test_commands_push.py | 2 | 7654 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import glob
import logging
import os
import os.path
from unittest import mock
import docopt
import fixtures
from snapcraft import (
storeapi,
tests
)
from snapcraft.main import main
from snapcraft.tests import fixture_setup
class PushCommandTestCase(tests.TestCase):
def setUp(self):
super().setUp()
self.fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(self.fake_logger)
patcher = mock.patch('snapcraft.internal.lifecycle.ProgressBar')
patcher.start()
self.addCleanup(patcher.stop)
def test_push_without_snap_must_raise_exception(self):
with self.assertRaises(docopt.DocoptExit) as raised:
main(['push'])
self.assertTrue('Usage:' in str(raised.exception))
def test_push_a_snap(self):
self.useFixture(fixture_setup.FakeTerminal())
mock_tracker = mock.Mock(storeapi.StatusTracker)
mock_tracker.track.return_value = {
'code': 'ready_to_release',
'processed': True,
'can_release': True,
'url': '/fake/url',
'revision': 9,
}
patcher = mock.patch.object(storeapi.StoreClient, 'upload')
mock_upload = patcher.start()
self.addCleanup(patcher.stop)
mock_upload.return_value = mock_tracker
# Create a snap
main(['init'])
main(['snap'])
snap_file = glob.glob('*.snap')[0]
# Upload
with mock.patch('snapcraft.storeapi.StatusTracker') as mock_tracker:
main(['push', snap_file])
self.assertIn(
'Uploading my-snap_0_amd64.snap.\n'
'Revision 9 of \'my-snap\' created.',
self.fake_logger.output)
mock_upload.assert_called_once_with('my-snap', snap_file)
def test_push_without_login_must_raise_exception(self):
snap_path = os.path.join(
os.path.dirname(tests.__file__), 'data',
'test-snap.snap')
with self.assertRaises(SystemExit):
main(['push', snap_path])
self.assertIn(
'No valid credentials found. Have you run "snapcraft login"?\n',
self.fake_logger.output)
def test_push_nonexisting_snap_must_raise_exception(self):
with self.assertRaises(SystemExit):
main(['push', 'test-unexisting-snap'])
def test_upload_raises_deprecation_warning(self):
self.useFixture(fixture_setup.FakeTerminal())
mock_tracker = mock.Mock(storeapi.StatusTracker)
mock_tracker.track.return_value = {
'code': 'ready_to_release',
'processed': True,
'can_release': True,
'url': '/fake/url',
'revision': 9,
}
patcher = mock.patch.object(storeapi.StoreClient, 'upload')
mock_upload = patcher.start()
self.addCleanup(patcher.stop)
mock_upload.return_value = mock_tracker
# Create a snap
main(['init'])
main(['snap'])
snap_file = glob.glob('*.snap')[0]
# Upload
with mock.patch('snapcraft.storeapi.StatusTracker') as mock_tracker:
main(['upload', snap_file])
self.assertIn(
'Uploading my-snap_0_amd64.snap.\n'
'Revision 9 of \'my-snap\' created.',
self.fake_logger.output)
mock_upload.assert_called_once_with('my-snap', snap_file)
def test_push_and_release_a_snap(self):
self.useFixture(fixture_setup.FakeTerminal())
mock_tracker = mock.Mock(storeapi.StatusTracker)
mock_tracker.track.return_value = {
'code': 'ready_to_release',
'processed': True,
'can_release': True,
'url': '/fake/url',
'revision': 9,
}
patcher = mock.patch.object(storeapi.StoreClient, 'upload')
mock_upload = patcher.start()
self.addCleanup(patcher.stop)
mock_upload.return_value = mock_tracker
patcher = mock.patch.object(storeapi.StoreClient, 'release')
mock_release = patcher.start()
self.addCleanup(patcher.stop)
mock_release.return_value = {
'opened_channels': ['beta'],
'channel_map': [
{'channel': 'stable', 'info': 'none'},
{'channel': 'candidate', 'info': 'none'},
{'revision': 9, 'channel': 'beta', 'version': '0',
'info': 'specific'},
{'channel': 'edge', 'info': 'tracking'}
]
}
# Create a snap
main(['init'])
main(['snap'])
snap_file = glob.glob('*.snap')[0]
# Upload
with mock.patch('snapcraft.storeapi.StatusTracker') as mock_tracker:
main(['push', snap_file, '--release', 'beta'])
self.assertIn(
'Uploading my-snap_0_amd64.snap.\n'
'Revision 9 of \'my-snap\' created.',
self.fake_logger.output)
mock_upload.assert_called_once_with('my-snap', snap_file)
mock_release.assert_called_once_with('my-snap', 9, ['beta'])
def test_push_and_release_a_snap_to_N_channels(self):
self.useFixture(fixture_setup.FakeTerminal())
mock_tracker = mock.Mock(storeapi.StatusTracker)
mock_tracker.track.return_value = {
'code': 'ready_to_release',
'processed': True,
'can_release': True,
'url': '/fake/url',
'revision': 9,
}
patcher = mock.patch.object(storeapi.StoreClient, 'upload')
mock_upload = patcher.start()
self.addCleanup(patcher.stop)
mock_upload.return_value = mock_tracker
patcher = mock.patch.object(storeapi.StoreClient, 'release')
mock_release = patcher.start()
self.addCleanup(patcher.stop)
mock_release.return_value = {
'opened_channels': ['beta,edge,candidate'],
'channel_map': [
{'channel': 'stable', 'info': 'none'},
{'revision': 9, 'channel': 'candidate', 'version': '0',
'info': 'specific'},
{'revision': 9, 'channel': 'beta', 'version': '0',
'info': 'specific'},
{'revision': 9, 'channel': 'edge', 'version': '0',
'info': 'specific'},
]
}
# Create a snap
main(['init'])
main(['snap'])
snap_file = glob.glob('*.snap')[0]
# Upload
with mock.patch('snapcraft.storeapi.StatusTracker') as mock_tracker:
main(['push', snap_file, '--release', 'edge,beta,candidate'])
self.assertIn(
'Uploading my-snap_0_amd64.snap.\n'
'Revision 9 of \'my-snap\' created.',
self.fake_logger.output)
mock_upload.assert_called_once_with('my-snap', snap_file)
mock_release.assert_called_once_with('my-snap', 9,
['edge', 'beta', 'candidate'])
| gpl-3.0 | -626,307,768,006,419,300 | 33.169643 | 76 | 0.573687 | false |
gvilardo/parser | parser_v2.py | 1 | 2112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
parser.py - Copyright (C) 2015 - Giorgio Vilardo
parser.py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
parser.py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with parser.py. If not, see http://www.gnu.org/licenses/.
"""
#imports
import os
import sys
import socket
import struct
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
#functions
def main(argv):
if len(sys.argv) == 1:
quit("supply a path to a directory containing .nessus files. quitting.")
listafilessporca = os.listdir(argv[1])
listafilespulita = cleanuplist(listafilessporca)
for file in listafilespulita:
parse(file, argv[1])
def parse(justfile, path):
file = path + "\\" + justfile
tree = ET.parse(file)
root = tree.getroot()
IPs = root.findall(".//*[@name='host-ip']")
ip_long_list = []
ip_list = []
for x in xrange(0, len(IPs)):
ip_long_list.append(ip2long(IPs[x].text))
ip_long_list.sort()
for x in xrange(0, len(ip_long_list)):
ip_list.append(long2ip(ip_long_list[x]))
writetofile(ip_list, justfile)
def ip2long(ip_string):
return struct.unpack("!L", socket.inet_aton(ip_string))[0]
def long2ip(ip_long):
return socket.inet_ntoa(struct.pack('!L', ip_long))
def cleanuplist(listafiles):
output = []
for file in listafiles:
if ".nessus" in file:
output.append(file)
return output
def writetofile(stufftowrite, filename):
file = open(sys.argv[1] + "\\" + filename + ".txt", "w")
for x in xrange(0, len(stufftowrite)):
file.write(stufftowrite[x] + "\n")
file.close
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 | 5,031,054,356,693,498,000 | 28.333333 | 74 | 0.693182 | false |
Subsets and Splits