repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
realsaiko/odoo | addons/base_import_module/controllers/main.py | 354 | 1518 | # -*- coding: utf-8 -*-
import functools
import openerp
from openerp.http import Controller, route, request, Response
def webservice(f):
@functools.wraps(f)
def wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
return Response(response=str(e), status=500)
return wrap
class ImportModule(Controller):
def check_user(self, uid=None):
if uid is None:
uid = request.uid
is_admin = request.registry['res.users'].has_group(request.cr, uid, 'base.group_erp_manager')
if not is_admin:
raise openerp.exceptions.AccessError("Only administrators can upload a module")
@route('/base_import_module/login', type='http', auth='none', methods=['POST'])
@webservice
def login(self, login, password, db=None):
if db and db != request.db:
raise Exception("Could not select database '%s'" % db)
uid = request.session.authenticate(request.db, login, password)
if not uid:
return Response(response="Wrong login/password", status=401)
self.check_user(uid)
return "ok"
@route('/base_import_module/upload', type='http', auth='user', methods=['POST'])
@webservice
def upload(self, mod_file=None, force='', **kw):
self.check_user()
force = True if force == '1' else False
return request.registry['ir.module.module'].import_zipfile(request.cr, request.uid, mod_file, force=force, context=request.context)[0]
| agpl-3.0 |
t0mk/ansible | lib/ansible/modules/network/lenovo/cnos_backup.py | 3 | 11204 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Backup Config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cnos_backup
author: "Dave Kasberg (@dkasberg)"
short_description: Backup the current running or startup configuration to a remote server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a
way to back up the running or startup configurations of a switch to a
remote server. This is achieved by periodically saving a copy of the
startup or running configuration of the network device to a remote server
using FTP, SFTP, TFTP, or SCP. The first step is to create a directory from
where the remote server can be reached. The next step is to provide the
full file path of the location where the configuration will be backed up.
Authentication details required by the remote server must be provided as
well. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_backup.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This specifies what type of configuration will be backed up. The
choices are the running or startup configurations. There is no
default value, so it will result in an error if the input is
incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server to where to upload the backup
configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is not specified,
there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
-This specifies the IP Address of the remote server to where the
configuration will be backed up.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path where the configuration file
will be copied on the remote server. In case the relative path is
used as the variable value, the root folder for the user of the
server needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify the username for the server relating to the protocol
used.
required: Yes
default: Null
serverpassword:
description:
- Specify the password for the server relating to the protocol
used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_backup. These are written in the main.yml file of the tasks directory.
---
- name: Test Running Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Running Config Backup -TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup - TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
---
return value: |
On successful execution, the method returns a message in JSON format
[Config file tranferred to server]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
host = module.params['host']
deviceType = module.params['deviceType']
configType = module.params['configType']
protocol = module.params['protocol'].lower()
rcserverip = module.params['serverip']
rcpath = module.params['rcpath']
serveruser = module.params['serverusername']
serverpwd = module.params['serverpassword']
output = ""
timeout = 90
tftptimeout = 450
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(host, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Invoke method for config transfer from server
if(configType == 'running-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doRunningConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureRunningConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
else:
transfer_status = "Invalid Protocol option"
elif(configType == 'startup-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doStartupConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureStartupConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser, serverpwd,
remote_conn)
else:
transfer_status = "Invalid Protocol option"
else:
transfer_status = "Invalid configType Option"
output = output + "\n Config Back Up status \n" + transfer_status
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to server")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
iemejia/beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 5 | 4390 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
# pytype: skip-file
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in range(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (
pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) // max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument(
'--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument(
'--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
def x_coord_key(x_y_i):
(x, y, i) = x_y_i
return (x, (x, y, i))
# Group each coordinate triplet by its x value, then write the coordinates
# to the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(
coordinates
| 'x coord key' >> beam.Map(x_coord_key)
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda k_coords: ' '.join('(%s, %s, %s)' % c for c in k_coords[1]))
| WriteToText(known_args.coordinate_output))
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
jinlmsft/kubernetes | hack/verify-flags-underscore.py | 169 | 9059 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <[email protected]>
@author: Jorge Orpinel <[email protected]>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if 'staging' in dirs:
dirs.remove('staging')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if '.make' in dirs:
dirs.remove('.make')
if 'BUILD' in files:
files.remove('BUILD')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', '_gopath', 'third_party', '.git', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
if f.endswith(".md"):
continue
if f.endswith(".yaml"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
print("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false negatives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
jazkarta/edx-platform-for-isc | common/lib/xmodule/xmodule/modulestore/tests/test_contentstore.py | 87 | 8284 | """
Test contentstore.mongo functionality
"""
import logging
from uuid import uuid4
import unittest
import mimetypes
from tempfile import mkdtemp
import path
import shutil
from opaque_keys.edx.locator import CourseLocator, AssetLocator
from opaque_keys.edx.keys import AssetKey
from xmodule.tests import DATA_DIR
from xmodule.contentstore.mongo import MongoContentStore
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
import ddt
from __builtin__ import delattr
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
log = logging.getLogger(__name__)
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
@ddt.ddt
class TestContentstore(unittest.TestCase):
"""
Test the methods in contentstore.mongo using deprecated and non-deprecated keys
"""
# don't use these 2 class vars as they restore behavior once the tests are done
asset_deprecated = None
ssck_deprecated = None
@classmethod
def tearDownClass(cls):
"""
Restores deprecated values
"""
if cls.asset_deprecated is not None:
setattr(AssetLocator, 'deprecated', cls.asset_deprecated)
else:
delattr(AssetLocator, 'deprecated')
if cls.ssck_deprecated is not None:
setattr(CourseLocator, 'deprecated', cls.ssck_deprecated)
else:
delattr(CourseLocator, 'deprecated')
return super(TestContentstore, cls).tearDownClass()
def set_up_assets(self, deprecated):
"""
Setup contentstore w/ proper overriding of deprecated.
"""
# since MongoModuleStore and MongoContentStore are basically assumed to be together, create this class
# as well
self.contentstore = MongoContentStore(HOST, DB, port=PORT)
self.addCleanup(self.contentstore._drop_database) # pylint: disable=protected-access
setattr(AssetLocator, 'deprecated', deprecated)
setattr(CourseLocator, 'deprecated', deprecated)
self.course1_key = CourseLocator('test', 'asset_test', '2014_07')
self.course2_key = CourseLocator('test', 'asset_test2', '2014_07')
self.course1_files = ['contains.sh', 'picture1.jpg', 'picture2.jpg']
self.course2_files = ['picture1.jpg', 'picture3.jpg', 'door_2.ogg']
def load_assets(course_key, files):
locked = False
for filename in files:
asset_key = course_key.make_asset_key('asset', filename)
self.save_asset(filename, asset_key, filename, locked)
locked = not locked
load_assets(self.course1_key, self.course1_files)
load_assets(self.course2_key, self.course2_files)
def save_asset(self, filename, asset_key, displayname, locked):
"""
Load and save the given file.
"""
with open("{}/static/{}".format(DATA_DIR, filename), "rb") as f:
content = StaticContent(
asset_key, displayname, mimetypes.guess_type(filename)[0], f.read(),
locked=locked
)
self.contentstore.save(content)
@ddt.data(True, False)
def test_delete(self, deprecated):
"""
Test that deleting assets works
"""
self.set_up_assets(deprecated)
asset_key = self.course1_key.make_asset_key('asset', self.course1_files[0])
self.contentstore.delete(asset_key)
with self.assertRaises(NotFoundError):
self.contentstore.find(asset_key)
# ensure deleting a non-existent file is a noop
self.contentstore.delete(asset_key)
@ddt.data(True, False)
def test_find(self, deprecated):
"""
Test using find
"""
self.set_up_assets(deprecated)
asset_key = self.course1_key.make_asset_key('asset', self.course1_files[0])
self.assertIsNotNone(self.contentstore.find(asset_key), "Could not find {}".format(asset_key))
self.assertIsNotNone(self.contentstore.find(asset_key, as_stream=True), "Could not find {}".format(asset_key))
unknown_asset = self.course1_key.make_asset_key('asset', 'no_such_file.gif')
with self.assertRaises(NotFoundError):
self.contentstore.find(unknown_asset)
self.assertIsNone(
self.contentstore.find(unknown_asset, throw_on_not_found=False),
"Found unknown asset {}".format(unknown_asset)
)
@ddt.data(True, False)
def test_export_for_course(self, deprecated):
"""
Test export
"""
self.set_up_assets(deprecated)
root_dir = path.path(mkdtemp())
try:
self.contentstore.export_all_for_course(
self.course1_key, root_dir,
path.path(root_dir / "policy.json"),
)
for filename in self.course1_files:
filepath = path.path(root_dir / filename)
self.assertTrue(filepath.isfile(), "{} is not a file".format(filepath))
for filename in self.course2_files:
if filename not in self.course1_files:
filepath = path.path(root_dir / filename)
self.assertFalse(filepath.isfile(), "{} is unexpected exported a file".format(filepath))
finally:
shutil.rmtree(root_dir)
@ddt.data(True, False)
def test_get_all_content(self, deprecated):
"""
Test get_all_content_for_course
"""
self.set_up_assets(deprecated)
course1_assets, count = self.contentstore.get_all_content_for_course(self.course1_key)
self.assertEqual(count, len(self.course1_files), course1_assets)
for asset in course1_assets:
parsed = AssetKey.from_string(asset['filename'])
self.assertIn(parsed.name, self.course1_files)
course1_assets, __ = self.contentstore.get_all_content_for_course(self.course1_key, 1, 1)
self.assertEqual(len(course1_assets), 1, course1_assets)
fake_course = CourseLocator('test', 'fake', 'non')
course_assets, count = self.contentstore.get_all_content_for_course(fake_course)
self.assertEqual(count, 0)
self.assertEqual(course_assets, [])
@ddt.data(True, False)
def test_attrs(self, deprecated):
"""
Test setting and getting attrs
"""
self.set_up_assets(deprecated)
for filename in self.course1_files:
asset_key = self.course1_key.make_asset_key('asset', filename)
prelocked = self.contentstore.get_attr(asset_key, 'locked', False)
self.contentstore.set_attr(asset_key, 'locked', not prelocked)
self.assertEqual(self.contentstore.get_attr(asset_key, 'locked', False), not prelocked)
@ddt.data(True, False)
def test_copy_assets(self, deprecated):
"""
copy_all_course_assets
"""
self.set_up_assets(deprecated)
dest_course = CourseLocator('test', 'destination', 'copy')
self.contentstore.copy_all_course_assets(self.course1_key, dest_course)
for filename in self.course1_files:
asset_key = self.course1_key.make_asset_key('asset', filename)
dest_key = dest_course.make_asset_key('asset', filename)
source = self.contentstore.find(asset_key)
copied = self.contentstore.find(dest_key)
for propname in ['name', 'content_type', 'length', 'locked']:
self.assertEqual(getattr(source, propname), getattr(copied, propname))
__, count = self.contentstore.get_all_content_for_course(dest_course)
self.assertEqual(count, len(self.course1_files))
@ddt.data(True, False)
def test_delete_assets(self, deprecated):
"""
delete_all_course_assets
"""
self.set_up_assets(deprecated)
self.contentstore.delete_all_course_assets(self.course1_key)
__, count = self.contentstore.get_all_content_for_course(self.course1_key)
self.assertEqual(count, 0)
# ensure it didn't remove any from other course
__, count = self.contentstore.get_all_content_for_course(self.course2_key)
self.assertEqual(count, len(self.course2_files))
| agpl-3.0 |
markhamstra/spark | python/pyspark/sql/column.py | 28 | 25024 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
if sys.version >= '3':
basestring = str
long = int
from pyspark import copy_func, since
from pyspark.context import SparkContext
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.types import *
__all__ = ["Column"]
def _create_column_from_literal(literal):
sc = SparkContext._active_spark_context
return sc._jvm.functions.lit(literal)
def _create_column_from_name(name):
sc = SparkContext._active_spark_context
return sc._jvm.functions.col(name)
def _to_java_column(col):
if isinstance(col, Column):
jcol = col._jc
elif isinstance(col, basestring):
jcol = _create_column_from_name(col)
else:
raise TypeError(
"Invalid argument, not a string or column: "
"{0} of type {1}. "
"For column literals, use 'lit', 'array', 'struct' or 'create_map' "
"function.".format(col, type(col)))
return jcol
def _to_seq(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toSeq(cols)
def _to_list(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM (Scala) List of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toList(cols)
def _unary_op(name, doc="unary operator"):
""" Create a method for given unary operator """
def _(self):
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _
def _func_op(name, doc=''):
def _(self):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
def _bin_func_op(name, reverse=False, doc="binary function"):
def _(self, other):
sc = SparkContext._active_spark_context
fn = getattr(sc._jvm.functions, name)
jc = other._jc if isinstance(other, Column) else _create_column_from_literal(other)
njc = fn(self._jc, jc) if not reverse else fn(jc, self._jc)
return Column(njc)
_.__doc__ = doc
return _
def _bin_op(name, doc="binary operator"):
""" Create a method for given binary operator
"""
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _
def _reverse_op(name, doc="binary operator"):
""" Create a method for binary operator (this object is on right side)
"""
def _(self, other):
jother = _create_column_from_literal(other)
jc = getattr(jother, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
class Column(object):
"""
A column in a DataFrame.
:class:`Column` instances can be created by::
# 1. Select a column out of a DataFrame
df.colName
df["colName"]
# 2. Create from an expression
df.colName + 1
1 / df.colName
.. versionadded:: 1.3
"""
def __init__(self, jc):
self._jc = jc
# arithmetic operators
__neg__ = _func_op("negate")
__add__ = _bin_op("plus")
__sub__ = _bin_op("minus")
__mul__ = _bin_op("multiply")
__div__ = _bin_op("divide")
__truediv__ = _bin_op("divide")
__mod__ = _bin_op("mod")
__radd__ = _bin_op("plus")
__rsub__ = _reverse_op("minus")
__rmul__ = _bin_op("multiply")
__rdiv__ = _reverse_op("divide")
__rtruediv__ = _reverse_op("divide")
__rmod__ = _reverse_op("mod")
__pow__ = _bin_func_op("pow")
__rpow__ = _bin_func_op("pow", reverse=True)
# logistic operators
__eq__ = _bin_op("equalTo")
__ne__ = _bin_op("notEqual")
__lt__ = _bin_op("lt")
__le__ = _bin_op("leq")
__ge__ = _bin_op("geq")
__gt__ = _bin_op("gt")
_eqNullSafe_doc = """
Equality test that is safe for null values.
:param other: a value or :class:`Column`
>>> from pyspark.sql import Row
>>> df1 = spark.createDataFrame([
... Row(id=1, value='foo'),
... Row(id=2, value=None)
... ])
>>> df1.select(
... df1['value'] == 'foo',
... df1['value'].eqNullSafe('foo'),
... df1['value'].eqNullSafe(None)
... ).show()
+-------------+---------------+----------------+
|(value = foo)|(value <=> foo)|(value <=> NULL)|
+-------------+---------------+----------------+
| true| true| false|
| null| false| true|
+-------------+---------------+----------------+
>>> df2 = spark.createDataFrame([
... Row(value = 'bar'),
... Row(value = None)
... ])
>>> df1.join(df2, df1["value"] == df2["value"]).count()
0
>>> df1.join(df2, df1["value"].eqNullSafe(df2["value"])).count()
1
>>> df2 = spark.createDataFrame([
... Row(id=1, value=float('NaN')),
... Row(id=2, value=42.0),
... Row(id=3, value=None)
... ])
>>> df2.select(
... df2['value'].eqNullSafe(None),
... df2['value'].eqNullSafe(float('NaN')),
... df2['value'].eqNullSafe(42.0)
... ).show()
+----------------+---------------+----------------+
|(value <=> NULL)|(value <=> NaN)|(value <=> 42.0)|
+----------------+---------------+----------------+
| false| true| false|
| false| false| true|
| true| false| false|
+----------------+---------------+----------------+
.. note:: Unlike Pandas, PySpark doesn't consider NaN values to be NULL.
See the `NaN Semantics`_ for details.
.. _NaN Semantics:
https://spark.apache.org/docs/latest/sql-programming-guide.html#nan-semantics
.. versionadded:: 2.3.0
"""
eqNullSafe = _bin_op("eqNullSafe", _eqNullSafe_doc)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _bin_op('and')
__or__ = _bin_op('or')
__invert__ = _func_op('not')
__rand__ = _bin_op("and")
__ror__ = _bin_op("or")
# container operators
def __contains__(self, item):
raise ValueError("Cannot apply 'in' operator against a column: please use 'contains' "
"in a string column or 'array_contains' function for an array column.")
# bitwise operators
_bitwiseOR_doc = """
Compute bitwise OR of this expression with another expression.
:param other: a value or :class:`Column` to calculate bitwise or(|) against
this :class:`Column`.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseOR(df.b)).collect()
[Row((a | b)=235)]
"""
_bitwiseAND_doc = """
Compute bitwise AND of this expression with another expression.
:param other: a value or :class:`Column` to calculate bitwise and(&) against
this :class:`Column`.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseAND(df.b)).collect()
[Row((a & b)=10)]
"""
_bitwiseXOR_doc = """
Compute bitwise XOR of this expression with another expression.
:param other: a value or :class:`Column` to calculate bitwise xor(^) against
this :class:`Column`.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseXOR(df.b)).collect()
[Row((a ^ b)=225)]
"""
bitwiseOR = _bin_op("bitwiseOR", _bitwiseOR_doc)
bitwiseAND = _bin_op("bitwiseAND", _bitwiseAND_doc)
bitwiseXOR = _bin_op("bitwiseXOR", _bitwiseXOR_doc)
@since(1.3)
def getItem(self, key):
"""
An expression that gets an item at position ``ordinal`` out of a list,
or gets an item by key out of a dict.
>>> df = spark.createDataFrame([([1, 2], {"key": "value"})], ["l", "d"])
>>> df.select(df.l.getItem(0), df.d.getItem("key")).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
>>> df.select(df.l[0], df.d["key"]).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
"""
return self[key]
@since(1.3)
def getField(self, name):
"""
An expression that gets a field by name in a StructField.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(r=Row(a=1, b="b"))])
>>> df.select(df.r.getField("b")).show()
+---+
|r.b|
+---+
| b|
+---+
>>> df.select(df.r.a).show()
+---+
|r.a|
+---+
| 1|
+---+
"""
return self[name]
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
return self.getField(item)
def __getitem__(self, k):
if isinstance(k, slice):
if k.step is not None:
raise ValueError("slice with step is not supported.")
return self.substr(k.start, k.stop)
else:
return _bin_op("apply")(self, k)
def __iter__(self):
raise TypeError("Column is not iterable")
# string methods
_contains_doc = """
Contains the other element. Returns a boolean :class:`Column` based on a string match.
:param other: string in line
>>> df.filter(df.name.contains('o')).collect()
[Row(age=5, name=u'Bob')]
"""
_rlike_doc = """
SQL RLIKE expression (LIKE with Regex). Returns a boolean :class:`Column` based on a regex
match.
:param other: an extended regex expression
>>> df.filter(df.name.rlike('ice$')).collect()
[Row(age=2, name=u'Alice')]
"""
_like_doc = """
SQL like expression. Returns a boolean :class:`Column` based on a SQL LIKE match.
:param other: a SQL LIKE pattern
See :func:`rlike` for a regex version
>>> df.filter(df.name.like('Al%')).collect()
[Row(age=2, name=u'Alice')]
"""
_startswith_doc = """
String starts with. Returns a boolean :class:`Column` based on a string match.
:param other: string at start of line (do not use a regex `^`)
>>> df.filter(df.name.startswith('Al')).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter(df.name.startswith('^Al')).collect()
[]
"""
_endswith_doc = """
String ends with. Returns a boolean :class:`Column` based on a string match.
:param other: string at end of line (do not use a regex `$`)
>>> df.filter(df.name.endswith('ice')).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter(df.name.endswith('ice$')).collect()
[]
"""
contains = ignore_unicode_prefix(_bin_op("contains", _contains_doc))
rlike = ignore_unicode_prefix(_bin_op("rlike", _rlike_doc))
like = ignore_unicode_prefix(_bin_op("like", _like_doc))
startswith = ignore_unicode_prefix(_bin_op("startsWith", _startswith_doc))
endswith = ignore_unicode_prefix(_bin_op("endsWith", _endswith_doc))
@ignore_unicode_prefix
@since(1.3)
def substr(self, startPos, length):
"""
Return a :class:`Column` which is a substring of the column.
:param startPos: start position (int or Column)
:param length: length of the substring (int or Column)
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(col=u'Ali'), Row(col=u'Bob')]
"""
if type(startPos) != type(length):
raise TypeError(
"startPos and length must be the same type. "
"Got {startPos_t} and {length_t}, respectively."
.format(
startPos_t=type(startPos),
length_t=type(length),
))
if isinstance(startPos, int):
jc = self._jc.substr(startPos, length)
elif isinstance(startPos, Column):
jc = self._jc.substr(startPos._jc, length._jc)
else:
raise TypeError("Unexpected type: %s" % type(startPos))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def isin(self, *cols):
"""
A boolean expression that is evaluated to true if the value of this
expression is contained by the evaluated values of the arguments.
>>> df[df.name.isin("Bob", "Mike")].collect()
[Row(age=5, name=u'Bob')]
>>> df[df.age.isin([1, 2, 3])].collect()
[Row(age=2, name=u'Alice')]
"""
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols]
sc = SparkContext._active_spark_context
jc = getattr(self._jc, "isin")(_to_seq(sc, cols))
return Column(jc)
# order
_asc_doc = """
Returns a sort expression based on ascending order of the column.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc()).collect()
[Row(name=u'Alice'), Row(name=u'Tom')]
"""
_asc_nulls_first_doc = """
Returns a sort expression based on ascending order of the column, and null values
return before non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc_nulls_first()).collect()
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')]
.. versionadded:: 2.4
"""
_asc_nulls_last_doc = """
Returns a sort expression based on ascending order of the column, and null values
appear after non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc_nulls_last()).collect()
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)]
.. versionadded:: 2.4
"""
_desc_doc = """
Returns a sort expression based on the descending order of the column.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc()).collect()
[Row(name=u'Tom'), Row(name=u'Alice')]
"""
_desc_nulls_first_doc = """
Returns a sort expression based on the descending order of the column, and null values
appear before non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc_nulls_first()).collect()
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')]
.. versionadded:: 2.4
"""
_desc_nulls_last_doc = """
Returns a sort expression based on the descending order of the column, and null values
appear after non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc_nulls_last()).collect()
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)]
.. versionadded:: 2.4
"""
asc = ignore_unicode_prefix(_unary_op("asc", _asc_doc))
asc_nulls_first = ignore_unicode_prefix(_unary_op("asc_nulls_first", _asc_nulls_first_doc))
asc_nulls_last = ignore_unicode_prefix(_unary_op("asc_nulls_last", _asc_nulls_last_doc))
desc = ignore_unicode_prefix(_unary_op("desc", _desc_doc))
desc_nulls_first = ignore_unicode_prefix(_unary_op("desc_nulls_first", _desc_nulls_first_doc))
desc_nulls_last = ignore_unicode_prefix(_unary_op("desc_nulls_last", _desc_nulls_last_doc))
_isNull_doc = """
True if the current expression is null.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(name=u'Tom', height=80), Row(name=u'Alice', height=None)])
>>> df.filter(df.height.isNull()).collect()
[Row(height=None, name=u'Alice')]
"""
_isNotNull_doc = """
True if the current expression is NOT null.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(name=u'Tom', height=80), Row(name=u'Alice', height=None)])
>>> df.filter(df.height.isNotNull()).collect()
[Row(height=80, name=u'Tom')]
"""
isNull = ignore_unicode_prefix(_unary_op("isNull", _isNull_doc))
isNotNull = ignore_unicode_prefix(_unary_op("isNotNull", _isNotNull_doc))
@since(1.3)
def alias(self, *alias, **kwargs):
"""
Returns this column aliased with a new name or names (in the case of expressions that
return more than one column, such as explode).
:param alias: strings of desired column names (collects all positional arguments passed)
:param metadata: a dict of information to be stored in ``metadata`` attribute of the
corresponding :class: `StructField` (optional, keyword only argument)
.. versionchanged:: 2.2
Added optional ``metadata`` argument.
>>> df.select(df.age.alias("age2")).collect()
[Row(age2=2), Row(age2=5)]
>>> df.select(df.age.alias("age3", metadata={'max': 99})).schema['age3'].metadata['max']
99
"""
metadata = kwargs.pop('metadata', None)
assert not kwargs, 'Unexpected kwargs where passed: %s' % kwargs
sc = SparkContext._active_spark_context
if len(alias) == 1:
if metadata:
jmeta = sc._jvm.org.apache.spark.sql.types.Metadata.fromJson(
json.dumps(metadata))
return Column(getattr(self._jc, "as")(alias[0], jmeta))
else:
return Column(getattr(self._jc, "as")(alias[0]))
else:
if metadata:
raise ValueError('metadata can only be provided for a single column')
return Column(getattr(self._jc, "as")(_to_seq(sc, list(alias))))
name = copy_func(alias, sinceversion=2.0, doc=":func:`name` is an alias for :func:`alias`.")
@ignore_unicode_prefix
@since(1.3)
def cast(self, dataType):
""" Convert the column into type ``dataType``.
>>> df.select(df.age.cast("string").alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
>>> df.select(df.age.cast(StringType()).alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
"""
if isinstance(dataType, basestring):
jc = self._jc.cast(dataType)
elif isinstance(dataType, DataType):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
jdt = spark._jsparkSession.parseDataType(dataType.json())
jc = self._jc.cast(jdt)
else:
raise TypeError("unexpected type: %s" % type(dataType))
return Column(jc)
astype = copy_func(cast, sinceversion=1.4, doc=":func:`astype` is an alias for :func:`cast`.")
@since(1.3)
def between(self, lowerBound, upperBound):
"""
A boolean expression that is evaluated to true if the value of this
expression is between the given columns.
>>> df.select(df.name, df.age.between(2, 4)).show()
+-----+---------------------------+
| name|((age >= 2) AND (age <= 4))|
+-----+---------------------------+
|Alice| true|
| Bob| false|
+-----+---------------------------+
"""
return (self >= lowerBound) & (self <= upperBound)
@since(1.4)
def when(self, condition, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 4, 1).when(df.age < 3, -1).otherwise(0)).show()
+-----+------------------------------------------------------------+
| name|CASE WHEN (age > 4) THEN 1 WHEN (age < 3) THEN -1 ELSE 0 END|
+-----+------------------------------------------------------------+
|Alice| -1|
| Bob| 1|
+-----+------------------------------------------------------------+
"""
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = self._jc.when(condition._jc, v)
return Column(jc)
@since(1.4)
def otherwise(self, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 3, 1).otherwise(0)).show()
+-----+-------------------------------------+
| name|CASE WHEN (age > 3) THEN 1 ELSE 0 END|
+-----+-------------------------------------+
|Alice| 0|
| Bob| 1|
+-----+-------------------------------------+
"""
v = value._jc if isinstance(value, Column) else value
jc = self._jc.otherwise(v)
return Column(jc)
@since(1.4)
def over(self, window):
"""
Define a windowing column.
:param window: a :class:`WindowSpec`
:return: a Column
>>> from pyspark.sql import Window
>>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1)
>>> from pyspark.sql.functions import rank, min
>>> # df.select(rank().over(window), min('age').over(window))
"""
from pyspark.sql.window import WindowSpec
if not isinstance(window, WindowSpec):
raise TypeError("window should be WindowSpec")
jc = self._jc.over(window._jspec)
return Column(jc)
def __nonzero__(self):
raise ValueError("Cannot convert column into bool: please use '&' for 'and', '|' for 'or', "
"'~' for 'not' when building DataFrame boolean expressions.")
__bool__ = __nonzero__
def __repr__(self):
return 'Column<%s>' % self._jc.toString().encode('utf8')
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.column
globs = pyspark.sql.column.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.column tests")\
.getOrCreate()
sc = spark.sparkContext
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
(failure_count, test_count) = doctest.testmod(
pyspark.sql.column, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ivoire/DataTag | DataTag/config.py | 1 | 5663 | # -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2015 Rémi Duraffort
# This file is part of DataTag.
#
# DataTag is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DataTag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with DataTag. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
import yaml
class CategoryConf(object):
def __init__(self, name, description):
self.name = name
self.description = description
class MediaConf(object):
def __init__(self, pattern, tags, description):
self.pattern = pattern
self.tags = tags
self.description = description
class TagConf(object):
def __init__(self, name, description, shortname, groups, category, public):
self.name = name
self.description = description
self.shortname = shortname
self.groups = groups
self.category = category
self.public = public
class Configuration(object):
def __init__(self):
self.medias = []
self.tags = {}
self.categories = {}
self.exclude = []
self.default_groups = []
def load(self, filename):
try:
# Load the configuration file
with open(filename, 'r') as fin:
y_conf = yaml.load(fin)
# Load the medias
for media in y_conf.get('medias', []):
pattern = media['pattern']
if not isinstance(pattern, list):
pattern = [pattern]
self.medias.append(MediaConf(pattern,
media.get('tags', []),
media.get('description', None)))
# Load the tags
tags = y_conf.get('tags', {})
for tag_name in tags:
tag = tags[tag_name]
self.tags[tag_name] = TagConf(tag_name,
tag.get('description', None),
tag.get('shortname', None),
set(tag.get('groups', [])),
tag.get('category', None),
tag.get('public', False))
# Load categories
categories = y_conf.get('categories', {})
for category_name in categories:
category = categories[category_name]
self.categories[category_name] = CategoryConf(
category_name,
category.get('description', None))
# Load excludes and default groups
for exclude in y_conf.get('exclude', []):
self.exclude.append(exclude)
for group_name in y_conf.get('defaults', {}).get('groups', []):
self.default_groups.append(group_name)
except IOError:
pass
def media_tags(self):
tags = set()
for pattern in self.medias:
tags.update(pattern.tags)
return tags
def tag_set(self):
return set(self.tags.keys())
def dump(self, filename):
medias = []
tags = {}
categories = {}
# Create the list of media dicts
for media in self.medias:
new_media = {'pattern': media.pattern}
if media.tags:
new_media['tags'] = media.tags
if media.description:
new_media['description'] = media.description
medias.append(new_media)
# Create the list of categories
for cat_name in self.categories:
cat = self.categories[cat_name]
categories[cat_name] = {}
if cat.description:
categories[cat_name]['description'] = cat.description
# Create the list of tags dict
for tag_name in self.tags:
tag = self.tags[tag_name]
tags[tag.name] = {}
if tag.description:
tags[tag.name]['description'] = tag.description
if tag.shortname:
tags[tag.name]['shortname'] = tag.shortname
if tag.groups:
tags[tag.name]['groups'] = list(tag.groups)
if tag.category:
tags[tag.name]['category'] = tag.category
if tag.public:
tags[tag.name]['public'] = True
# Create the final dict
to_dump = {}
if medias:
to_dump['medias'] = medias
if categories:
to_dump['categories'] = categories
if tags:
to_dump['tags'] = tags
if self.exclude:
to_dump['exclude'] = self.exclude
if self.default_groups:
to_dump['defaults'] = dict()
to_dump['defaults']['groups'] = self.default_groups
with open(filename, 'w') as fout:
yaml.dump(to_dump, fout,
default_flow_style=False, default_style=None, indent=1)
| agpl-3.0 |
dmarteau/QGIS | python/plugins/processing/algs/gdal/sieve.py | 15 | 5672 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sieve.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class sieve(GdalAlgorithm):
INPUT = 'INPUT'
THRESHOLD = 'THRESHOLD'
EIGHT_CONNECTEDNESS = 'EIGHT_CONNECTEDNESS'
NO_MASK = 'NO_MASK'
MASK_LAYER = 'MASK_LAYER'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.THRESHOLD,
self.tr('Threshold'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=10))
self.addParameter(QgsProcessingParameterBoolean(self.EIGHT_CONNECTEDNESS,
self.tr('Use 8-connectedness'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_MASK,
self.tr('Do not use the default validity mask for the input band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterRasterLayer(self.MASK_LAYER,
self.tr('Validity mask'),
optional=True))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Sieved')))
def name(self):
return 'sieve'
def displayName(self):
return self.tr('Sieve')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'sieve.png'))
def commandName(self):
return 'gdal_sieve'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = [
'-st',
str(self.parameterAsInt(parameters, self.THRESHOLD, context)),
]
if self.parameterAsBoolean(parameters, self.EIGHT_CONNECTEDNESS, context):
arguments.append('-8')
else:
arguments.append('-4')
if self.parameterAsBoolean(parameters, self.NO_MASK, context):
arguments.append('-nomask')
mask = self.parameterAsRasterLayer(parameters, self.MASK_LAYER, context)
if mask:
arguments.append('-mask')
arguments.append(mask.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
arguments.append(out)
return [self.commandName() + ('.bat' if isWindows() else '.py'), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
stefanhenneking/mxnet | python/mxnet/executor_manager.py | 38 | 17449 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-locals, too-many-arguments, too-many-statements
"""Executor manager."""
from __future__ import absolute_import
import logging
import numpy as np
from .base import mx_real_t
from . import ndarray as nd
from .context import cpu
from .io import DataDesc
def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices. Some splits are empty.')
slices.append(slice(begin, end))
return slices
def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name)
def _load_general(data, targets):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
else:
assert d_targets[-1][0].stop == d_src.shape[0], \
"Batch size miss match. Expected %d, got %d"%( \
d_targets[-1][0].stop, d_src.shape[0])
for slice_idx, d_dst in d_targets:
d_src[slice_idx].copyto(d_dst)
def _load_data(batch, targets):
"""Load data into sliced arrays."""
_load_general(batch.data, targets)
def _load_label(batch, targets):
"""Load label into sliced arrays."""
_load_general(batch.label, targets)
# pylint: disable=too-many-branches
def _bind_exec(sym, ctx, input_shapes, param_names, need_grad=False,
base_exec=None, shared_data_arrays=None, input_types=None, logger=logging):
"""bind executor for bucketing, potentially sharing data with an existing executor."""
arg_shape, _, aux_shape = sym.infer_shape(**input_shapes)
assert(arg_shape is not None)
if input_types is None:
input_types = {k: mx_real_t for k in input_shapes.keys()}
arg_types, _, aux_types = sym.infer_type(**input_types)
assert(arg_types is not None)
arg_arrays = []
grad_arrays = {} if need_grad != False else None
arg_names = sym.list_arguments()
if need_grad is False:
need_grad = set()
elif need_grad is True:
need_grad = set(arg_names) - set(input_shapes.keys())
elif isinstance(need_grad, set):
pass
else:
raise AssertionError("need_grad must be boolean or set.")
grad_req = {name:('write' if name in need_grad else 'null') for name in arg_names}
# create or borrow arguments and gradients
for i, name in enumerate(arg_names):
if not name in param_names:
# data or label
if shared_data_arrays is not None and \
name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape[i]):
# good, we can share this memory
assert(arg_types[i] == arg_arr.dtype)
arg_arr = arg_arr.reshape(arg_shape[i])
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape[i])) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to be the bucket taking the largest ') +
('input for better memory sharing.'))
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if shared_data_arrays is not None:
shared_data_arrays[name] = arg_arr
arg_arrays.append(arg_arr)
else:
# model parameter
if base_exec is None:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if name in need_grad:
grad_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
grad_arrays[name] = grad_arr
else:
arg_arr = base_exec.arg_dict[name]
assert arg_arr.shape == arg_shape[i]
assert arg_arr.dtype == arg_types[i]
if name in need_grad:
grad_arrays[name] = base_exec.grad_dict[name]
arg_arrays.append(arg_arr)
# create or borrow aux variables
if base_exec is None:
aux_arrays = [nd.zeros(s, ctx, dtype=t) for s, t in zip(aux_shape, aux_types)]
else:
for i, a in enumerate(base_exec.aux_arrays):
assert aux_shape[i] == a.shape
assert aux_types[i] == a.dtype
aux_arrays = [a for a in base_exec.aux_arrays]
executor = sym.bind(ctx=ctx, args=arg_arrays, args_grad=grad_arrays,
aux_states=aux_arrays,
grad_req=grad_req, shared_exec=base_exec)
return executor
class DataParallelExecutorGroup(object):
"""A group of executors living on different devices, for data parallelization.
Parameters
----------
sym: Symbol
The network configuration.
arg_names: list of str
Equals `sym.list_arguments()`
param_names: list of str
List of names of all trainable parameters.
ctx: list of Context
List of devices for training (data parallelization).
slices: list of int
Describes how the data parallelization splits data into different devices.
train_data: DataIter (or DataBatch)
The dataset for training. It could be any object with `provide_data` and
`provide_label` properties. Loading of actual data is not necessarily needed
at this stage.
shared_grop: DataParallelExecutorGroup
An existing executor group, if to share parameters with it.
"""
def __init__(self, sym, arg_names, param_names, ctx, slices, train_data, shared_group=None):
# make sure the architecture is valid
_check_arguments(sym)
if shared_group is None:
self.shared_data_arrays = [{} for _ in ctx]
else:
self.shared_data_arrays = shared_group.shared_data_arrays
self.data_names = [x[0] for x in train_data.provide_data]
self.label_names = [x[0] for x in train_data.provide_label]
self.aux_names = sym.list_auxiliary_states()
self.param_idx = [i for i in range(len(arg_names)) if arg_names[i] in param_names]
self.param_names = [arg_names[i] for i in self.param_idx]
self.train_execs = []
for i, ctxi in enumerate(ctx):
data_shapes = {}
data_types = {}
for x in train_data.provide_data + train_data.provide_label:
data_shapes[x[0]] = tuple([slices[i].stop - slices[i].start] + list(x[1][1:]))
if isinstance(x, DataDesc):
data_types[x.name] = x.dtype
else:
data_types[x[0]] = mx_real_t
shared_exec = None if shared_group is None else shared_group.train_execs[i]
train_exec = _bind_exec(sym, ctxi, data_shapes, self.param_names,
need_grad=True, base_exec=shared_exec,
shared_data_arrays=self.shared_data_arrays[i],
input_types=data_types)
self.train_execs.append(train_exec)
# data structure
self.data_arrays = [[(slices[i], e.arg_dict[name]) for i, e in enumerate(self.train_execs)]
for name in self.data_names]
self.label_arrays = [[(slices[i], e.arg_dict[name]) for i, e in enumerate(self.train_execs)]
for name in self.label_names]
self.param_arrays = [[e.arg_arrays[i] for e in self.train_execs]
for i in self.param_idx]
self.grad_arrays = [[e.grad_arrays[i] for e in self.train_execs]
for i in self.param_idx]
self.aux_arrays = [[e.aux_arrays[i] for e in self.train_execs]
for i in range(len(self.aux_names))]
self.slices = slices
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
_load_data(data_batch, self.data_arrays)
_load_label(data_batch, self.label_arrays)
def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train)
def backward(self):
"""Perform a backward pass on each executor."""
for texec in self.train_execs:
texec.backward()
def update_metric(self, metric, labels):
"""Update evaluation metric with label and current outputs."""
for texec, islice in zip(self.train_execs, self.slices):
labels_slice = [label[islice] for label in labels]
metric.update(labels_slice, texec.outputs)
class DataParallelExecutorManager(object):
""" Helper class to manage multiple executors for data parallelism.
Parameters
----------
symbol : Symbol
Output symbol.
ctx : list of Context
Devices to run on.
param_names: list of str
Name of all trainable parameters of the network.
arg_names: list of str
Name of all arguments of the network.
aux_names: list of str
Name of all auxiliary states of the network.
train_data : DataIter
Training data iterator.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ctx.
logger : logging logger
When not specified, default logger will be used.
sym_gen : A function that generate new Symbols depending on different
input shapes. Used only for bucketing.
"""
def __init__(self, symbol, ctx, train_data,
arg_names, param_names, aux_names,
work_load_list=None, logger=None, sym_gen=None):
if logger is None:
logger = logging
# preparation
num_device = len(ctx)
logger.info('Start training with %s', str(ctx))
if work_load_list is None:
work_load_list = [1] * num_device
assert isinstance(work_load_list, list) and len(work_load_list) == num_device, \
"Invalid settings for work load. "
slices = _split_input_slice(train_data.batch_size, work_load_list)
self.slices = slices
self.arg_names = arg_names
self.param_names = param_names
self.aux_names = aux_names
self.ctx = ctx
self.execgrp = DataParallelExecutorGroup(symbol, self.arg_names, self.param_names, self.ctx,
self.slices, train_data)
self.symbol = symbol
self.sym_gen = sym_gen
self.curr_execgrp = None # this is set when data is loaded
if self.sym_gen is not None:
self.execgrp_bucket = {train_data.default_bucket_key: self.execgrp}
def install_monitor(self, monitor):
"""Install monitor on all executors."""
if self.sym_gen is not None:
raise NotImplementedError("Monitoring is not implemented for bucketing")
for train_exec in self.execgrp.train_execs:
monitor.install(train_exec)
def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params)
def copy_to(self, arg_params, aux_params):
""" Copy data from each executor to ```arg_params`` and ``aux_params``.
Parameters
----------
arg_params : list of NDArray
Target parameter arrays.
aux_params : list of NDArray
Target aux arrays.
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
@property
def param_arrays(self):
"""Shared parameter arrays."""
# param arrays should be shared by all executor groups
return self.execgrp.param_arrays
@property
def grad_arrays(self):
"""Shared gradient arrays."""
# grad arrays should be shared by all executor groups
return self.execgrp.grad_arrays
@property
def aux_arrays(self):
"""Shared aux states."""
# aux arrays are also shared by all executor groups
return self.execgrp.aux_arrays
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
if self.sym_gen is not None:
key = data_batch.bucket_key
if key not in self.execgrp_bucket:
# create new bucket entry
symbol = self.sym_gen(key)
execgrp = DataParallelExecutorGroup(symbol, self.arg_names,
self.param_names, self.ctx,
self.slices, data_batch,
shared_group=self.execgrp)
self.execgrp_bucket[key] = execgrp
self.curr_execgrp = self.execgrp_bucket[key]
else:
self.curr_execgrp = self.execgrp
self.curr_execgrp.load_data_batch(data_batch)
def forward(self, is_train=False):
"""Run forward on the current executor."""
self.curr_execgrp.forward(is_train=is_train)
def backward(self):
"""Run backward on the current executor."""
self.curr_execgrp.backward()
def update_metric(self, metric, labels):
"""Update metric with the current executor."""
self.curr_execgrp.update_metric(metric, labels)
| apache-2.0 |
jayceyxc/hue | desktop/core/ext-py/Django-1.6.10/tests/datetimes/tests.py | 49 | 3213 | from __future__ import absolute_import
import datetime
try:
import pytz
except ImportError:
pytz = None
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.unittest import skipIf
from .models import Article, Comment, Category
class DateTimesTests(TestCase):
def test_related_model_traverse(self):
a1 = Article.objects.create(
title="First one",
pub_date=datetime.datetime(2005, 7, 28, 9, 0, 0),
)
a2 = Article.objects.create(
title="Another one",
pub_date=datetime.datetime(2010, 7, 28, 10, 0, 0),
)
a3 = Article.objects.create(
title="Third one, in the first day",
pub_date=datetime.datetime(2005, 7, 28, 17, 0, 0),
)
a1.comments.create(
text="Im the HULK!",
pub_date=datetime.datetime(2005, 7, 28, 9, 30, 0),
)
a1.comments.create(
text="HULK SMASH!",
pub_date=datetime.datetime(2005, 7, 29, 1, 30, 0),
)
a2.comments.create(
text="LMAO",
pub_date=datetime.datetime(2010, 7, 28, 10, 10, 10),
)
a3.comments.create(
text="+1",
pub_date=datetime.datetime(2005, 8, 29, 10, 10, 10),
)
c = Category.objects.create(name="serious-news")
c.articles.add(a1, a3)
self.assertQuerysetEqual(
Comment.objects.datetimes("article__pub_date", "year"), [
datetime.datetime(2005, 1, 1),
datetime.datetime(2010, 1, 1),
],
lambda d: d,
)
self.assertQuerysetEqual(
Comment.objects.datetimes("article__pub_date", "month"), [
datetime.datetime(2005, 7, 1),
datetime.datetime(2010, 7, 1),
],
lambda d: d
)
self.assertQuerysetEqual(
Comment.objects.datetimes("article__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
datetime.datetime(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.datetimes("comments__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
datetime.datetime(2005, 7, 29),
datetime.datetime(2005, 8, 29),
datetime.datetime(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.datetimes("comments__approval_date", "day"), []
)
self.assertQuerysetEqual(
Category.objects.datetimes("articles__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
],
lambda d: d,
)
@skipIf(pytz is None, "this test requires pytz")
@override_settings(USE_TZ=True)
def test_21432(self):
now = timezone.localtime(timezone.now().replace(microsecond=0))
Article.objects.create(title="First one", pub_date=now)
qs = Article.objects.datetimes('pub_date', 'second')
self.assertEqual(qs[0], now)
| apache-2.0 |
2ndQuadrant/ansible | lib/ansible/modules/cloud/google/gcp_container_cluster_facts.py | 5 | 17700 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_container_cluster_facts
description:
- Gather facts for GCP Cluster
short_description: Gather facts for GCP Cluster
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
location:
description:
- The location where the cluster is deployed.
required: true
aliases:
- region
- zone
version_added: 2.8
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a cluster facts"
gcp_container_cluster_facts:
location: us-central1-a
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- The name of this cluster. The name must be unique within this project and
location, and can be up to 40 characters. Must be Lowercase letters, numbers,
and hyphens only. Must start with a letter. Must end with a number or a letter.
returned: success
type: str
description:
description:
- An optional description of this cluster.
returned: success
type: str
initialNodeCount:
description:
- The number of nodes to create in this cluster. You must ensure that your Compute
Engine resource quota is sufficient for this number of instances. You must
also have available firewall and routes quota. For requests, this field should
only be used in lieu of a "nodePool" object, since this configuration (along
with the "nodeConfig") will be used to create a "NodePool" object with an
auto-generated name. Do not use this and a nodePool at the same time.
returned: success
type: int
nodeConfig:
description:
- Parameters used in creating the cluster's nodes.
- For requests, this field should only be used in lieu of a "nodePool" object,
since this configuration (along with the "initialNodeCount") will be used
to create a "NodePool" object with an auto-generated name. Do not use this
and a nodePool at the same time. For responses, this field will be populated
with the node configuration of the first node pool. If unspecified, the defaults
are used.
returned: success
type: complex
contains:
machineType:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
returned: success
type: str
diskSizeGb:
description:
- Size of the disk attached to each node, specified in GB. The smallest
allowed disk size is 10GB. If unspecified, the default disk size is 100GB.
returned: success
type: int
oauthScopes:
description:
- The set of Google API scopes to be made available on all of the node VMs
under the "default" service account.
- 'The following scopes are recommended, but not required, and by default
are not included: U(https://www.googleapis.com/auth/compute) is required
for mounting persistent storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for
communicating with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring
are enabled, in which case their required scopes will be added.
returned: success
type: list
serviceAccount:
description:
- The Google Cloud Platform Service Account to be used by the node VMs.
If no Service Account is specified, the "default" service account is used.
returned: success
type: str
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the four reserved keys: "instance-template",
"kube-env", "startup-script", and "user-data" Values are free-form strings,
and only have meaning as interpreted by the image running in the instance.
The only restriction placed on them is that each value''s size must be
less than or equal to 32 KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
imageType:
description:
- The image type to use for this node. Note that for a given image type,
the latest version of it will be used.
returned: success
type: str
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each
node. These will added in addition to any default label(s) that Kubernetes
may apply to the node. In case of conflict in label keys, the applied
set may differ depending on the Kubernetes version -- it''s best to assume
the behavior is undefined and conflicts should be avoided. For more information,
including usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html)
An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
localSsdCount:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks
available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits)
for more information.'
returned: success
type: int
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls and are specified by the
client during cluster or node pool creation. Each tag within the list
must comply with RFC1035.
returned: success
type: list
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more information about preemptible VM instances.'
returned: success
type: bool
masterAuth:
description:
- The authentication information for accessing the master endpoint.
returned: success
type: complex
contains:
username:
description:
- The username to use for HTTP basic authentication to the master endpoint.
returned: success
type: str
password:
description:
- The password to use for HTTP basic authentication to the master endpoint.
Because the master endpoint is open to the Internet, you should create
a strong password.
returned: success
type: str
clusterCaCertificate:
description:
- Base64-encoded public certificate that is the root of trust for the cluster.
returned: success
type: str
clientCertificate:
description:
- Base64-encoded public certificate used by clients to authenticate to the
cluster endpoint.
returned: success
type: str
clientKey:
description:
- Base64-encoded private key used by clients to authenticate to the cluster
endpoint.
returned: success
type: str
loggingService:
description:
- 'The logging service the cluster should use to write logs. Currently available
options: logging.googleapis.com - the Google Cloud Logging service.'
- none - no logs will be exported from the cluster.
- if left as an empty string,logging.googleapis.com will be used.
returned: success
type: str
monitoringService:
description:
- The monitoring service the cluster should use to write metrics.
- 'Currently available options: monitoring.googleapis.com - the Google Cloud
Monitoring service.'
- none - no metrics will be exported from the cluster.
- if left as an empty string, monitoring.googleapis.com will be used.
returned: success
type: str
network:
description:
- The name of the Google Compute Engine network to which the cluster is connected.
If left unspecified, the default network will be used.
returned: success
type: str
privateClusterConfig:
description:
- Configuration for a private cluster.
returned: success
type: complex
contains:
enablePrivateNodes:
description:
- Whether nodes have internal IP addresses only. If enabled, all nodes are
given only RFC 1918 private addresses and communicate with the master
via private networking.
returned: success
type: bool
enablePrivateEndpoint:
description:
- Whether the master's internal IP address is used as the cluster endpoint.
returned: success
type: bool
masterIpv4CidrBlock:
description:
- The IP range in CIDR notation to use for the hosted master network. This
range will be used for assigning internal IP addresses to the master or
set of masters, as well as the ILB VIP. This range must not overlap with
any other ranges in use within the cluster's network.
returned: success
type: str
privateEndpoint:
description:
- The internal IP address of this cluster's master endpoint.
returned: success
type: str
publicEndpoint:
description:
- The external IP address of this cluster's master endpoint.
returned: success
type: str
clusterIpv4Cidr:
description:
- The IP address range of the container pods in this cluster, in CIDR notation
(e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify
a /14 block in 10.0.0.0/8.
returned: success
type: str
addonsConfig:
description:
- Configurations for the various addons available to run in the cluster.
returned: success
type: complex
contains:
httpLoadBalancing:
description:
- Configuration for the HTTP (L7) load balancing controller addon, which
makes it easy to set up HTTP load balancers for services in a cluster.
returned: success
type: complex
contains:
disabled:
description:
- Whether the HTTP Load Balancing controller is enabled in the cluster.
When enabled, it runs a small pod in the cluster that manages the
load balancers.
returned: success
type: bool
horizontalPodAutoscaling:
description:
- Configuration for the horizontal pod autoscaling feature, which increases
or decreases the number of replica pods a replication controller has based
on the resource usage of the existing pods.
returned: success
type: complex
contains:
disabled:
description:
- Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
When enabled, it ensures that a Heapster pod is running in the cluster,
which is also used by the Cloud Monitoring service.
returned: success
type: bool
subnetwork:
description:
- The name of the Google Compute Engine subnetwork to which the cluster is connected.
returned: success
type: str
endpoint:
description:
- The IP address of this cluster's master endpoint.
- The endpoint can be accessed from the internet at https://username:password@endpoint/
See the masterAuth property of this resource for username and password information.
returned: success
type: str
initialClusterVersion:
description:
- The software version of the master endpoint and kubelets used in the cluster
when it was first created. The version can be upgraded over time.
returned: success
type: str
currentMasterVersion:
description:
- The current software version of the master endpoint.
returned: success
type: str
currentNodeVersion:
description:
- The current version of the node software components. If they are currently
at multiple versions because they're in the process of being upgraded, this
reflects the minimum version of all nodes.
returned: success
type: str
createTime:
description:
- The time the cluster was created, in RFC3339 text format.
returned: success
type: str
nodeIpv4CidrSize:
description:
- The size of the address space on each node for hosting containers.
- This is provisioned from within the container_ipv4_cidr range.
returned: success
type: int
servicesIpv4Cidr:
description:
- The IP address range of the Kubernetes services in this cluster, in CIDR notation
(e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from
the container CIDR.
returned: success
type: str
currentNodeCount:
description:
- The number of nodes currently in the cluster.
returned: success
type: int
expireTime:
description:
- The time the cluster will be automatically deleted in RFC3339 text format.
returned: success
type: str
location:
description:
- The location where the cluster is deployed.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(location=dict(required=True, type='str', aliases=['region', 'zone'])))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
items = fetch_list(module, collection(module))
if items.get('clusters'):
items = items.get('clusters')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'container')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
0x726d77/storm | storm-client/src/py/storm/DistributedRPC.py | 22 | 9644 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import logging
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
self.send_execute(functionName, funcArgs)
return self.recv_execute()
def send_execute(self, functionName, funcArgs):
self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
args = execute_args()
args.functionName = functionName
args.funcArgs = funcArgs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["execute"] = Processor.process_execute
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_execute(self, seqid, iprot, oprot):
args = execute_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_result()
try:
result.success = self._handler.execute(args.functionName, args.funcArgs)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except DRPCExecutionException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class execute_args:
"""
Attributes:
- functionName
- funcArgs
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', None, None, ), # 1
(2, TType.STRING, 'funcArgs', None, None, ), # 2
)
def __init__(self, functionName=None, funcArgs=None,):
self.functionName = functionName
self.funcArgs = funcArgs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.funcArgs = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_args')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName.encode('utf-8'))
oprot.writeFieldEnd()
if self.funcArgs is not None:
oprot.writeFieldBegin('funcArgs', TType.STRING, 2)
oprot.writeString(self.funcArgs.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.functionName)
value = (value * 31) ^ hash(self.funcArgs)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (DRPCExecutionException, DRPCExecutionException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = DRPCExecutionException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
hfp/tensorflow-xsmm | tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py | 29 | 12269 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC compatible with TensorFlow's eager execution.
Reference [Generalizing Hamiltonian Monte Carlo with Neural
Networks](https://arxiv.org/pdf/1711.09268.pdf)
Code adapted from the released TensorFlow graph implementation by original
authors https://github.com/brain-research/l2hmc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import neural_nets
class Dynamics(tf.keras.Model):
"""Dynamics engine of naive L2HMC sampler."""
def __init__(self,
x_dim,
minus_loglikelihood_fn,
n_steps=25,
eps=.1,
np_seed=1):
"""Initialization.
Args:
x_dim: dimensionality of observed data
minus_loglikelihood_fn: log-likelihood function of conditional probability
n_steps: number of leapfrog steps within each transition
eps: initial value learnable scale of step size
np_seed: Random seed for numpy; used to control sampled masks.
"""
super(Dynamics, self).__init__()
npr.seed(np_seed)
self.x_dim = x_dim
self.potential = minus_loglikelihood_fn
self.n_steps = n_steps
self._construct_time()
self._construct_masks()
self.position_fn = neural_nets.GenericNet(x_dim, factor=2.)
self.momentum_fn = neural_nets.GenericNet(x_dim, factor=1.)
self.eps = tf.Variable(
initial_value=eps, name="eps", dtype=tf.float32, trainable=True)
def apply_transition(self, position):
"""Propose a new state and perform the accept or reject step."""
# Simulate dynamics both forward and backward;
# Use sampled Bernoulli masks to compute the actual solutions
position_f, momentum_f, accept_prob_f = self.transition_kernel(
position, forward=True)
position_b, momentum_b, accept_prob_b = self.transition_kernel(
position, forward=False)
# Decide direction uniformly
batch_size = tf.shape(position)[0]
forward_mask = tf.cast(tf.random_uniform((batch_size,)) > .5, tf.float32)
backward_mask = 1. - forward_mask
# Obtain proposed states
position_post = (
forward_mask[:, None] * position_f +
backward_mask[:, None] * position_b)
momentum_post = (
forward_mask[:, None] * momentum_f +
backward_mask[:, None] * momentum_b)
# Probability of accepting the proposed states
accept_prob = forward_mask * accept_prob_f + backward_mask * accept_prob_b
# Accept or reject step
accept_mask = tf.cast(
accept_prob > tf.random_uniform(tf.shape(accept_prob)), tf.float32)
reject_mask = 1. - accept_mask
# Samples after accept/reject step
position_out = (
accept_mask[:, None] * position_post + reject_mask[:, None] * position)
return position_post, momentum_post, accept_prob, position_out
def transition_kernel(self, position, forward=True):
"""Transition kernel of augmented leapfrog integrator."""
lf_fn = self._forward_lf if forward else self._backward_lf
# Resample momentum
momentum = tf.random_normal(tf.shape(position))
position_post, momentum_post = position, momentum
sumlogdet = 0.
# Apply augmented leapfrog steps
for i in range(self.n_steps):
position_post, momentum_post, logdet = lf_fn(position_post, momentum_post,
i)
sumlogdet += logdet
accept_prob = self._compute_accept_prob(position, momentum, position_post,
momentum_post, sumlogdet)
return position_post, momentum_post, accept_prob
def _forward_lf(self, position, momentum, i):
"""One forward augmented leapfrog step. See eq (5-6) in paper."""
t = self._get_time(i)
mask, mask_inv = self._get_mask(i)
sumlogdet = 0.
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _backward_lf(self, position, momentum, i):
"""One backward augmented leapfrog step. See Appendix A in paper."""
# Reversed index/sinusoidal time
t = self._get_time(self.n_steps - i - 1)
mask, mask_inv = self._get_mask(self.n_steps - i - 1)
sumlogdet = 0.
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _update_momentum_forward(self, position, momentum, t):
"""Update v in the forward leapfrog step."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= .5 * self.eps
transformed *= self.eps
momentum = (
momentum * tf.exp(scale) -
.5 * self.eps * (tf.exp(transformed) * grad - translation))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_forward(self, position, momentum, t, mask, mask_inv):
"""Update x in the forward leapfrog step."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= self.eps
transformed *= self.eps
position = (
mask * position +
mask_inv * (position * tf.exp(scale) + self.eps *
(tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _update_momentum_backward(self, position, momentum, t):
"""Update v in the backward leapfrog step. Inverting the forward update."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= -.5 * self.eps
transformed *= self.eps
momentum = (
tf.exp(scale) * (momentum + .5 * self.eps *
(tf.exp(transformed) * grad - translation)))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_backward(self, position, momentum, t, mask, mask_inv):
"""Update x in the backward leapfrog step. Inverting the forward update."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= -self.eps
transformed *= self.eps
position = (
mask * position + mask_inv * tf.exp(scale) *
(position - self.eps * (tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _compute_accept_prob(self, position, momentum, position_post,
momentum_post, sumlogdet):
"""Compute the prob of accepting the proposed state given old state."""
old_hamil = self.hamiltonian(position, momentum)
new_hamil = self.hamiltonian(position_post, momentum_post)
prob = tf.exp(tf.minimum(old_hamil - new_hamil + sumlogdet, 0.))
# Ensure numerical stability as well as correct gradients
return tf.where(tf.is_finite(prob), prob, tf.zeros_like(prob))
def _construct_time(self):
"""Convert leapfrog step index into sinusoidal time."""
self.ts = []
for i in range(self.n_steps):
t = tf.constant(
[
np.cos(2 * np.pi * i / self.n_steps),
np.sin(2 * np.pi * i / self.n_steps)
],
dtype=tf.float32)
self.ts.append(t[None, :])
def _get_time(self, i):
"""Get sinusoidal time for i-th augmented leapfrog step."""
return self.ts[i]
def _construct_masks(self):
"""Construct different binary masks for different time steps."""
self.masks = []
for _ in range(self.n_steps):
# Need to use npr here because tf would generated different random
# values across different `sess.run`
idx = npr.permutation(np.arange(self.x_dim))[:self.x_dim // 2]
mask = np.zeros((self.x_dim,))
mask[idx] = 1.
mask = tf.constant(mask, dtype=tf.float32)
self.masks.append(mask[None, :])
def _get_mask(self, i):
"""Get binary masks for i-th augmented leapfrog step."""
m = self.masks[i]
return m, 1. - m
def kinetic(self, v):
"""Compute the kinetic energy."""
return .5 * tf.reduce_sum(v**2, axis=1)
def hamiltonian(self, position, momentum):
"""Compute the overall Hamiltonian."""
return self.potential(position) + self.kinetic(momentum)
def grad_potential(self, position, check_numerics=True):
"""Get gradient of potential function at current location."""
if tf.executing_eagerly():
grad = tfe.gradients_function(self.potential)(position)[0]
else:
grad = tf.gradients(self.potential(position), position)[0]
return grad
# Examples of unnormalized log densities
def get_scg_energy_fn():
"""Get energy function for 2d strongly correlated Gaussian."""
# Avoid recreating tf constants on each invocation of gradients
mu = tf.constant([0., 0.])
sigma = tf.constant([[50.05, -49.95], [-49.95, 50.05]])
sigma_inv = tf.matrix_inverse(sigma)
def energy(x):
"""Unnormalized minus log density of 2d strongly correlated Gaussian."""
xmmu = x - mu
return .5 * tf.diag_part(
tf.matmul(tf.matmul(xmmu, sigma_inv), tf.transpose(xmmu)))
return energy, mu, sigma
def get_rw_energy_fn():
"""Get energy function for rough well distribution."""
# For small eta, the density underlying the rough-well energy is very close to
# a unit Gaussian; however, the gradient is greatly affected by the small
# cosine perturbations
eta = 1e-2
mu = tf.constant([0., 0.])
sigma = tf.constant([[1., 0.], [0., 1.]])
def energy(x):
ip = tf.reduce_sum(x**2., axis=1)
return .5 * ip + eta * tf.reduce_sum(tf.cos(x / eta), axis=1)
return energy, mu, sigma
# Loss function
def compute_loss(dynamics, x, scale=.1, eps=1e-4):
"""Compute loss defined in equation (8)."""
z = tf.random_normal(tf.shape(x)) # Auxiliary variable
x_, _, x_accept_prob, x_out = dynamics.apply_transition(x)
z_, _, z_accept_prob, _ = dynamics.apply_transition(z)
# Add eps for numerical stability; following released impl
x_loss = tf.reduce_sum((x - x_)**2, axis=1) * x_accept_prob + eps
z_loss = tf.reduce_sum((z - z_)**2, axis=1) * z_accept_prob + eps
loss = tf.reduce_mean(
(1. / x_loss + 1. / z_loss) * scale - (x_loss + z_loss) / scale, axis=0)
return loss, x_out, x_accept_prob
def loss_and_grads(dynamics, x, loss_fn=compute_loss):
"""Obtain loss value and gradients."""
with tf.GradientTape() as tape:
loss_val, out, accept_prob = loss_fn(dynamics, x)
grads = tape.gradient(loss_val, dynamics.trainable_variables)
return loss_val, grads, out, accept_prob
| apache-2.0 |
ymero/tornado | tornado/__init__.py | 75 | 1130 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Tornado web server and tools."""
from __future__ import absolute_import, division, print_function, with_statement
# version is a human-readable version number.
# version_info is a four-tuple for programmatic comparison. The first
# three numbers are the components of the version number. The fourth
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
version = "4.3.dev1"
version_info = (4, 3, 0, -100)
| apache-2.0 |
sureshsundriyal/pysqlitebkup | pysqlitebkup.py | 1 | 4083 | #! /usr/bin/env python
import ctypes
from ctypes.util import find_library
__author__ = 'Suresh Sundriyal'
__license__ = 'CC0 - No rights reserved.'
__version__ = '0.0.1'
__credits__ = [ 'Joongi Kim: https://gist.github.com/achimnol/3021995',
'sqlite3.org: http://www.sqlite.org/backup.html' ]
SQLITE_OK = 0
SQLITE_ERROR = 1
SQLITE_BUSY = 5
SQLITE_LOCKED = 6
SQLITE_DONE = 101
SQLITE_OPEN_READONLY = 1
SQLITE_OPEN_READWRITE = 2
SQLITE_OPEN_CREATE = 4
sqlite = ctypes.CDLL(find_library('sqlite3'))
sqlite.sqlite3_backup_init.restype = ctypes.c_void_p
class BackupInitError(Exception):
pass
class BackupFailedError(Exception):
pass
class FileOpenError(Exception):
pass
class UninitializedError(Exception):
pass
def _openFile(fileAttributes):
fileName, ptr, mode = fileAttributes
fileName_p = ctypes.c_char_p(fileName.encode('utf-8'))
rc = sqlite.sqlite3_open_v2(fileName_p, ctypes.byref(ptr),
mode, None)
if (rc != SQLITE_OK or ptr.value is None):
raise FileOpenError("Unable to open file(%s), rc(%s)" % (
fileName, rc))
class dbbackup(object):
def __init__(self, src, dst):
self.src = src
self.dst = dst
self.p_src_db = ctypes.c_void_p(None)
self.p_dst_db = ctypes.c_void_p(None)
self.p_backup = ctypes.c_void_p(None)
self.finished = False
self.remaining = None
self.pagecount = None
def __enter__(self):
self.backupInit()
return self
def __exit__(self, type, value, traceback):
self.backupFinish()
def backupInit(self):
# We do this for the side-effect of opening both the files and not
# having boilerplate code and the fact that map is generally faster
# than a for loop.
list(map(_openFile,
[(self.src, self.p_src_db, SQLITE_OPEN_READONLY),
(self.dst, self.p_dst_db,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE)]))
dbType = 'main'.encode('utf-8')
self.p_backup = ctypes.c_void_p(sqlite.sqlite3_backup_init(
self.p_dst_db, dbType,
self.p_src_db, dbType))
if self.p_backup.value is None:
raise BackupInitError("Failed to backup_init")
def backupFinish(self):
if self.p_backup.value is not None:
sqlite.sqlite3_backup_finish(self.p_backup)
rc = sqlite.sqlite3_errcode(self.p_dst_db)
if self.p_dst_db.value is not None:
sqlite.sqlite3_close(self.p_dst_db)
if self.p_src_db.value is not None:
sqlite.sqlite3_close(self.p_src_db)
if rc != SQLITE_OK:
raise BackupFailedError("Failed to backup db: rc(%s)" % rc)
def step(self, size=5):
if self.p_backup.value is None:
raise UninitializedError(
"step called without calling backupInit first")
rc = sqlite.sqlite3_backup_step(self.p_backup, size)
self.remaining = sqlite.sqlite3_backup_remaining(self.p_backup)
self.pagecount = sqlite.sqlite3_backup_pagecount(self.p_backup)
if rc == SQLITE_DONE:
self.finished = True
if rc in (SQLITE_OK, SQLITE_BUSY, SQLITE_LOCKED):
# sleep for 250 ms before continuing.
sqlite.sqlite3_sleep(250)
def backup(self, stepSize=5):
import os
__unlink = True
if os.path.exists(self.dst):
__unlink = False
try:
while not self.finished:
self.step(stepSize)
except:
if __unlink:
try:
os.unlink(self.dst)
except OSError as e:
pass
raise
if __name__ == '__main__':
import sys
import logging
try:
with dbbackup(sys.argv[1], sys.argv[2]) as p:
p.backup(20)
except:
logging.exception("Failed to backup sqlite db")
| cc0-1.0 |
PeterFaiman/ruby-grpc-minimal | src/python/grpcio_tests/tests/testing/_server_application.py | 12 | 2824 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example gRPC Python-using server-side application."""
import grpc
# requests_pb2 is a semantic dependency of this module.
from tests.testing import _application_common
from tests.testing.proto import requests_pb2 # pylint: disable=unused-import
from tests.testing.proto import services_pb2
from tests.testing.proto import services_pb2_grpc
class FirstServiceServicer(services_pb2_grpc.FirstServiceServicer):
"""Services RPCs."""
def UnUn(self, request, context):
if _application_common.UNARY_UNARY_REQUEST == request:
return _application_common.UNARY_UNARY_RESPONSE
else:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return services_pb2.Down()
def UnStre(self, request, context):
if _application_common.UNARY_STREAM_REQUEST != request:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return
yield services_pb2.Strange()
def StreUn(self, request_iterator, context):
context.send_initial_metadata((
('server_application_metadata_key', 'Hi there!',),))
for request in request_iterator:
if request != _application_common.STREAM_UNARY_REQUEST:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return services_pb2.Strange()
elif not context.is_active():
return services_pb2.Strange()
else:
return _application_common.STREAM_UNARY_RESPONSE
def StreStre(self, request_iterator, context):
for request in request_iterator:
if request != _application_common.STREAM_STREAM_REQUEST:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return
elif not context.is_active():
return
else:
yield _application_common.STREAM_STREAM_RESPONSE
yield _application_common.STREAM_STREAM_RESPONSE
| apache-2.0 |
PantherHackers/PantherBot | scripts/rage.py | 1 | 1230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import upsidedown
import sys
from response import Response
from pb_logging import PBLogger
logger = PBLogger("Rage")
# flips text using upsidedown module and has a donger for emohasis
def run(response, args=[]):
response_obj = Response(sys.modules[__name__])
toFlip = ''
donger = '(ノಠ益ಠ)ノ彡'
for n in range(0, len(args)):
toFlip += args[n] + " "
if toFlip == '':
toFlip = unicode('┻━┻', "utf-8")
try:
donger = unicode(donger, "utf-8")
logger.info(toFlip[:15 or len(toFlip)] + "...")
flippedmsg = upsidedown.transform(toFlip)
response_obj.messages_to_send.append(donger + flippedmsg)
except Exception as e:
logger.error("Error in flip: " + str(e))
response_obj.messages_to_send.append("Sorry, I can't seem to flip right now, or you gave an invalid argument")
return response_obj
def return_alias():
alias_list = ["rage"]
return alias_list
def is_admin_command():
return False
def help_preview():
return "!rage <Optional:String>"
def help_text():
return "Rage flips the text or table because you really want the world to know that you're upset." | mpl-2.0 |
andreabedini/PyTables | tables/exceptions.py | 5 | 11309 | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: December 17, 2004
# Author: Francesc Alted - [email protected]
#
# $Id$
#
########################################################################
"""Declare exceptions and warnings that are specific to PyTables."""
from __future__ import absolute_import
import six
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
import os
import warnings
import traceback
class HDF5ExtError(RuntimeError):
"""A low level HDF5 operation failed.
This exception is raised the low level PyTables components used for
accessing HDF5 files. It usually signals that something is not
going well in the HDF5 library or even at the Input/Output level.
Errors in the HDF5 C library may be accompanied by an extensive
HDF5 back trace on standard error (see also
:func:`tables.silence_hdf5_messages`).
.. versionchanged:: 2.4
Parameters
----------
message
error message
h5bt
This parameter (keyword only) controls the HDF5 back trace
handling. Any keyword arguments other than h5bt is ignored.
* if set to False the HDF5 back trace is ignored and the
:attr:`HDF5ExtError.h5backtrace` attribute is set to None
* if set to True the back trace is retrieved from the HDF5
library and stored in the :attr:`HDF5ExtError.h5backtrace`
attribute as a list of tuples
* if set to "VERBOSE" (default) the HDF5 back trace is
stored in the :attr:`HDF5ExtError.h5backtrace` attribute
and also included in the string representation of the
exception
* if not set (or set to None) the default policy is used
(see :attr:`HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY`)
"""
# NOTE: in order to avoid circular dependencies between modules the
# _dump_h5_backtrace method is set at initialization time in
# the utilsExtenion.
_dump_h5_backtrace = None
DEFAULT_H5_BACKTRACE_POLICY = "VERBOSE"
"""Default policy for HDF5 backtrace handling
* if set to False the HDF5 back trace is ignored and the
:attr:`HDF5ExtError.h5backtrace` attribute is set to None
* if set to True the back trace is retrieved from the HDF5
library and stored in the :attr:`HDF5ExtError.h5backtrace`
attribute as a list of tuples
* if set to "VERBOSE" (default) the HDF5 back trace is
stored in the :attr:`HDF5ExtError.h5backtrace` attribute
and also included in the string representation of the
exception
This parameter can be set using the
:envvar:`PT_DEFAULT_H5_BACKTRACE_POLICY` environment variable.
Allowed values are "IGNORE" (or "FALSE"), "SAVE" (or "TRUE") and
"VERBOSE" to set the policy to False, True and "VERBOSE"
respectively. The special value "DEFAULT" can be used to reset
the policy to the default value
.. versionadded:: 2.4
"""
@classmethod
def set_policy_from_env(cls):
envmap = {
"IGNORE": False,
"FALSE": False,
"SAVE": True,
"TRUE": True,
"VERBOSE": "VERBOSE",
"DEFAULT": "VERBOSE",
}
oldvalue = cls.DEFAULT_H5_BACKTRACE_POLICY
envvalue = os.environ.get("PT_DEFAULT_H5_BACKTRACE_POLICY", "DEFAULT")
try:
newvalue = envmap[envvalue.upper()]
except KeyError:
warnings.warn("Invalid value for the environment variable "
"'PT_DEFAULT_H5_BACKTRACE_POLICY'. The default "
"policy for HDF5 back trace management in PyTables "
"will be: '%s'" % oldvalue)
else:
cls.DEFAULT_H5_BACKTRACE_POLICY = newvalue
return oldvalue
def __init__(self, *args, **kargs):
super(HDF5ExtError, self).__init__(*args)
self._h5bt_policy = kargs.get('h5bt', self.DEFAULT_H5_BACKTRACE_POLICY)
if self._h5bt_policy and self._dump_h5_backtrace is not None:
self.h5backtrace = self._dump_h5_backtrace()
"""HDF5 back trace.
Contains the HDF5 back trace as a (possibly empty) list of
tuples. Each tuple has the following format::
(filename, line number, function name, text)
Depending on the value of the *h5bt* parameter passed to the
initializer the h5backtrace attribute can be set to None.
This means that the HDF5 back trace has been simply ignored
(not retrieved from the HDF5 C library error stack) or that
there has been an error (silently ignored) during the HDF5 back
trace retrieval.
.. versionadded:: 2.4
See Also
--------
traceback.format_list : :func:`traceback.format_list`
"""
# XXX: check _dump_h5_backtrace failures
else:
self.h5backtrace = None
def __str__(self):
"""Returns a sting representation of the exception.
The actual result depends on policy set in the initializer
:meth:`HDF5ExtError.__init__`.
.. versionadded:: 2.4
"""
verbose = bool(self._h5bt_policy in ('VERBOSE', 'verbose'))
if verbose and self.h5backtrace:
bt = "\n".join([
"HDF5 error back trace\n",
self.format_h5_backtrace(),
"End of HDF5 error back trace"
])
if len(self.args) == 1 and isinstance(self.args[0], six.string_types):
msg = super(HDF5ExtError, self).__str__()
msg = "%s\n\n%s" % (bt, msg)
elif self.h5backtrace[-1][-1]:
msg = "%s\n\n%s" % (bt, self.h5backtrace[-1][-1])
else:
msg = bt
else:
msg = super(HDF5ExtError, self).__str__()
return msg
def format_h5_backtrace(self, backtrace=None):
"""Convert the HDF5 trace back represented as a list of tuples.
(see :attr:`HDF5ExtError.h5backtrace`) into a string.
.. versionadded:: 2.4
"""
if backtrace is None:
backtrace = self.h5backtrace
if backtrace is None:
return 'No HDF5 back trace available'
else:
return ''.join(traceback.format_list(backtrace))
# Initialize the policy for HDF5 back trace handling
HDF5ExtError.set_policy_from_env()
# The following exceptions are concretions of the ``ValueError`` exceptions
# raised by ``file`` objects on certain operations.
class ClosedNodeError(ValueError):
"""The operation can not be completed because the node is closed.
For instance, listing the children of a closed group is not allowed.
"""
pass
class ClosedFileError(ValueError):
"""The operation can not be completed because the hosting file is closed.
For instance, getting an existing node from a closed file is not
allowed.
"""
pass
class FileModeError(ValueError):
"""The operation can not be carried out because the mode in which the
hosting file is opened is not adequate.
For instance, removing an existing leaf from a read-only file is not
allowed.
"""
pass
class NodeError(AttributeError, LookupError):
"""Invalid hierarchy manipulation operation requested.
This exception is raised when the user requests an operation on the
hierarchy which can not be run because of the current layout of the
tree. This includes accessing nonexistent nodes, moving or copying
or creating over an existing node, non-recursively removing groups
with children, and other similarly invalid operations.
A node in a PyTables database cannot be simply overwritten by
replacing it. Instead, the old node must be removed explicitely
before another one can take its place. This is done to protect
interactive users from inadvertedly deleting whole trees of data by
a single erroneous command.
"""
pass
class NoSuchNodeError(NodeError):
"""An operation was requested on a node that does not exist.
This exception is raised when an operation gets a path name or a
``(where, name)`` pair leading to a nonexistent node.
"""
pass
class UndoRedoError(Exception):
"""Problems with doing/redoing actions with Undo/Redo feature.
This exception indicates a problem related to the Undo/Redo
mechanism, such as trying to undo or redo actions with this
mechanism disabled, or going to a nonexistent mark.
"""
pass
class UndoRedoWarning(Warning):
"""Issued when an action not supporting Undo/Redo is run.
This warning is only shown when the Undo/Redo mechanism is enabled.
"""
pass
class NaturalNameWarning(Warning):
"""Issued when a non-pythonic name is given for a node.
This is not an error and may even be very useful in certain
contexts, but one should be aware that such nodes cannot be
accessed using natural naming (instead, ``getattr()`` must be
used explicitly).
"""
pass
class PerformanceWarning(Warning):
"""Warning for operations which may cause a performance drop.
This warning is issued when an operation is made on the database
which may cause it to slow down on future operations (i.e. making
the node tree grow too much).
"""
pass
class FlavorError(ValueError):
"""Unsupported or unavailable flavor or flavor conversion.
This exception is raised when an unsupported or unavailable flavor
is given to a dataset, or when a conversion of data between two
given flavors is not supported nor available.
"""
pass
class FlavorWarning(Warning):
"""Unsupported or unavailable flavor conversion.
This warning is issued when a conversion of data between two given
flavors is not supported nor available, and raising an error would
render the data inaccessible (e.g. on a dataset of an unavailable
flavor in a read-only file).
See the `FlavorError` class for more information.
"""
pass
class FiltersWarning(Warning):
"""Unavailable filters.
This warning is issued when a valid filter is specified but it is
not available in the system. It may mean that an available default
filter is to be used instead.
"""
pass
class OldIndexWarning(Warning):
"""Unsupported index format.
This warning is issued when an index in an unsupported format is
found. The index will be marked as invalid and will behave as if
doesn't exist.
"""
pass
class DataTypeWarning(Warning):
"""Unsupported data type.
This warning is issued when an unsupported HDF5 data type is found
(normally in a file created with other tool than PyTables).
"""
pass
class ExperimentalFeatureWarning(Warning):
"""Generic warning for experimental features.
This warning is issued when using a functionality that is still
experimental and that users have to use with care.
"""
pass
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| bsd-3-clause |
petrus-v/odoo | openerp/addons/base/ir/ir_qweb.py | 38 | 64677 | # -*- coding: utf-8 -*-
import collections
import cStringIO
import datetime
import hashlib
import json
import itertools
import logging
import math
import os
import re
import sys
import textwrap
import uuid
from subprocess import Popen, PIPE
from urlparse import urlparse
import babel
import babel.dates
import werkzeug
from lxml import etree, html
from PIL import Image
import psycopg2
import openerp.http
import openerp.tools
from openerp.tools.func import lazy_property
import openerp.tools.lru
from openerp.http import request
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import osv, orm, fields
from openerp.tools import html_escape as escape
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_CSS_RULES = 4095
#--------------------------------------------------------------------
# QWeb template engine
#--------------------------------------------------------------------
class QWebException(Exception):
def __init__(self, message, **kw):
Exception.__init__(self, message)
self.qweb = dict(kw)
def pretty_xml(self):
if 'node' not in self.qweb:
return ''
return etree.tostring(self.qweb['node'], pretty_print=True)
class QWebTemplateNotFound(QWebException):
pass
def raise_qweb_exception(etype=None, **kw):
if etype is None:
etype = QWebException
orig_type, original, tb = sys.exc_info()
try:
raise etype, original, tb
except etype, e:
for k, v in kw.items():
e.qweb[k] = v
# Will use `raise foo from bar` in python 3 and rename cause to __cause__
e.qweb['cause'] = original
raise
def _build_attribute(name, value):
value = escape(value)
if isinstance(name, unicode): name = name.encode('utf-8')
if isinstance(value, unicode): value = value.encode('utf-8')
return ' %s="%s"' % (name, value)
class QWebContext(dict):
def __init__(self, cr, uid, data, loader=None, templates=None, context=None):
self.cr = cr
self.uid = uid
self.loader = loader
self.templates = templates or {}
self.context = context
dic = dict(data)
super(QWebContext, self).__init__(dic)
self['defined'] = lambda key: key in self
def safe_eval(self, expr):
locals_dict = collections.defaultdict(lambda: None)
locals_dict.update(self)
locals_dict.pop('cr', None)
locals_dict.pop('loader', None)
return eval(expr, None, locals_dict, nocopy=True, locals_builtins=True)
def copy(self):
""" Clones the current context, conserving all data and metadata
(loader, template cache, ...)
"""
return QWebContext(self.cr, self.uid, dict.copy(self),
loader=self.loader,
templates=self.templates,
context=self.context)
def __copy__(self):
return self.copy()
class QWeb(orm.AbstractModel):
""" Base QWeb rendering engine
* to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and
create new models called :samp:`ir.qweb.field.{widget}`
* alternatively, override :meth:`~.get_converter_for` and return an
arbitrary model to use as field converter
Beware that if you need extensions or alterations which could be
incompatible with other subsystems, you should create a local object
inheriting from ``ir.qweb`` and customize that.
"""
_name = 'ir.qweb'
_void_elements = frozenset([
u'area', u'base', u'br', u'col', u'embed', u'hr', u'img', u'input',
u'keygen', u'link', u'menuitem', u'meta', u'param', u'source',
u'track', u'wbr'])
_format_regex = re.compile(
'(?:'
# ruby-style pattern
'#\{(.+?)\}'
')|(?:'
# jinja-style pattern
'\{\{(.+?)\}\}'
')')
def __init__(self, pool, cr):
super(QWeb, self).__init__(pool, cr)
self._render_tag = self.prefixed_methods('render_tag_')
self._render_att = self.prefixed_methods('render_att_')
def prefixed_methods(self, prefix):
""" Extracts all methods prefixed by ``prefix``, and returns a mapping
of (t-name, method) where the t-name is the method name with prefix
removed and underscore converted to dashes
:param str prefix:
:return: dict
"""
n_prefix = len(prefix)
return dict(
(name[n_prefix:].replace('_', '-'), getattr(type(self), name))
for name in dir(self)
if name.startswith(prefix)
)
def register_tag(self, tag, func):
self._render_tag[tag] = func
def add_template(self, qwebcontext, name, node):
"""Add a parsed template in the context. Used to preprocess templates."""
qwebcontext.templates[name] = node
def load_document(self, document, res_id, qwebcontext):
"""
Loads an XML document and installs any contained template in the engine
:type document: a parsed lxml.etree element, an unparsed XML document
(as a string) or the path of an XML file to load
"""
if not isinstance(document, basestring):
# assume lxml.etree.Element
dom = document
elif document.startswith("<?xml"):
dom = etree.fromstring(document)
else:
dom = etree.parse(document).getroot()
for node in dom:
if node.get('t-name'):
name = str(node.get("t-name"))
self.add_template(qwebcontext, name, node)
if res_id and node.tag == "t":
self.add_template(qwebcontext, res_id, node)
res_id = None
def get_template(self, name, qwebcontext):
""" Tries to fetch the template ``name``, either gets it from the
context's template cache or loads one with the context's loader (if
any).
:raises QWebTemplateNotFound: if the template can not be found or loaded
"""
origin_template = qwebcontext.get('__caller__') or qwebcontext['__stack__'][0]
if qwebcontext.loader and name not in qwebcontext.templates:
try:
xml_doc = qwebcontext.loader(name)
except ValueError:
raise_qweb_exception(QWebTemplateNotFound, message="Loader could not find template %r" % name, template=origin_template)
self.load_document(xml_doc, isinstance(name, (int, long)) and name or None, qwebcontext=qwebcontext)
if name in qwebcontext.templates:
return qwebcontext.templates[name]
raise QWebTemplateNotFound("Template %r not found" % name, template=origin_template)
def eval(self, expr, qwebcontext):
try:
return qwebcontext.safe_eval(expr)
except Exception:
template = qwebcontext.get('__template__')
raise_qweb_exception(message="Could not evaluate expression %r" % expr, expression=expr, template=template)
def eval_object(self, expr, qwebcontext):
return self.eval(expr, qwebcontext)
def eval_str(self, expr, qwebcontext):
if expr == "0":
return qwebcontext.get(0, '')
val = self.eval(expr, qwebcontext)
if isinstance(val, unicode):
return val.encode("utf8")
if val is False or val is None:
return ''
return str(val)
def eval_format(self, expr, qwebcontext):
expr, replacements = self._format_regex.subn(
lambda m: self.eval_str(m.group(1) or m.group(2), qwebcontext),
expr
)
if replacements:
return expr
try:
return str(expr % qwebcontext)
except Exception:
template = qwebcontext.get('__template__')
raise_qweb_exception(message="Format error for expression %r" % expr, expression=expr, template=template)
def eval_bool(self, expr, qwebcontext):
return int(bool(self.eval(expr, qwebcontext)))
def render(self, cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None):
""" render(cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None)
Renders the template specified by the provided template name
:param qwebcontext: context for rendering the template
:type qwebcontext: dict or :class:`QWebContext` instance
:param loader: if ``qwebcontext`` is a dict, loader set into the
context instantiated for rendering
"""
if qwebcontext is None:
qwebcontext = {}
if not isinstance(qwebcontext, QWebContext):
qwebcontext = QWebContext(cr, uid, qwebcontext, loader=loader, context=context)
qwebcontext['__template__'] = id_or_xml_id
stack = qwebcontext.get('__stack__', [])
if stack:
qwebcontext['__caller__'] = stack[-1]
stack.append(id_or_xml_id)
qwebcontext['__stack__'] = stack
qwebcontext['xmlid'] = str(stack[0]) # Temporary fix
return self.render_node(self.get_template(id_or_xml_id, qwebcontext), qwebcontext)
def render_node(self, element, qwebcontext):
generated_attributes = ""
t_render = None
template_attributes = {}
for (attribute_name, attribute_value) in element.attrib.iteritems():
attribute_name = unicode(attribute_name)
if attribute_name == "groups":
cr = qwebcontext.get('request') and qwebcontext['request'].cr or None
uid = qwebcontext.get('request') and qwebcontext['request'].uid or None
can_see = self.user_has_groups(cr, uid, groups=attribute_value) if cr and uid else False
if not can_see:
return ''
attribute_value = attribute_value.encode("utf8")
if attribute_name.startswith("t-"):
for attribute in self._render_att:
if attribute_name[2:].startswith(attribute):
attrs = self._render_att[attribute](
self, element, attribute_name, attribute_value, qwebcontext)
for att, val in attrs:
if not val: continue
generated_attributes += self.render_attribute(element, att, val, qwebcontext)
break
else:
if attribute_name[2:] in self._render_tag:
t_render = attribute_name[2:]
template_attributes[attribute_name[2:]] = attribute_value
else:
generated_attributes += self.render_attribute(element, attribute_name, attribute_value, qwebcontext)
if 'debug' in template_attributes:
debugger = template_attributes.get('debug', 'pdb')
__import__(debugger).set_trace() # pdb, ipdb, pudb, ...
if t_render:
result = self._render_tag[t_render](self, element, template_attributes, generated_attributes, qwebcontext)
else:
result = self.render_element(element, template_attributes, generated_attributes, qwebcontext)
if element.tail:
result += element.tail.encode('utf-8')
if isinstance(result, unicode):
return result.encode('utf-8')
return result
def render_element(self, element, template_attributes, generated_attributes, qwebcontext, inner=None):
# element: element
# template_attributes: t-* attributes
# generated_attributes: generated attributes
# qwebcontext: values
# inner: optional innerXml
if inner:
g_inner = inner.encode('utf-8') if isinstance(inner, unicode) else inner
else:
g_inner = [] if element.text is None else [element.text.encode('utf-8')]
for current_node in element.iterchildren(tag=etree.Element):
try:
g_inner.append(self.render_node(current_node, qwebcontext))
except QWebException:
raise
except Exception:
template = qwebcontext.get('__template__')
raise_qweb_exception(message="Could not render element %r" % element.tag, node=element, template=template)
name = unicode(element.tag)
inner = "".join(g_inner)
trim = template_attributes.get("trim", 0)
if trim == 0:
pass
elif trim == 'left':
inner = inner.lstrip()
elif trim == 'right':
inner = inner.rstrip()
elif trim == 'both':
inner = inner.strip()
if name == "t":
return inner
elif len(inner) or name not in self._void_elements:
return "<%s%s>%s</%s>" % tuple(
qwebcontext if isinstance(qwebcontext, str) else qwebcontext.encode('utf-8')
for qwebcontext in (name, generated_attributes, inner, name)
)
else:
return "<%s%s/>" % (name.encode("utf-8"), generated_attributes)
def render_attribute(self, element, name, value, qwebcontext):
return _build_attribute(name, value)
# Attributes
def render_att_att(self, element, attribute_name, attribute_value, qwebcontext):
if attribute_name.startswith("t-attf-"):
return [(attribute_name[7:], self.eval_format(attribute_value, qwebcontext))]
if attribute_name.startswith("t-att-"):
return [(attribute_name[6:], self.eval(attribute_value, qwebcontext))]
result = self.eval_object(attribute_value, qwebcontext)
if isinstance(result, collections.Mapping):
return result.iteritems()
# assume tuple
return [result]
# Tags
def render_tag_raw(self, element, template_attributes, generated_attributes, qwebcontext):
inner = self.eval_str(template_attributes["raw"], qwebcontext)
return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner)
def render_tag_esc(self, element, template_attributes, generated_attributes, qwebcontext):
options = json.loads(template_attributes.get('esc-options') or '{}')
widget = self.get_widget_for(options.get('widget'))
inner = widget.format(template_attributes['esc'], options, qwebcontext)
return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner)
def _iterate(self, iterable):
if isinstance (iterable, collections.Mapping):
return iterable.iteritems()
return itertools.izip(*itertools.tee(iterable))
def render_tag_foreach(self, element, template_attributes, generated_attributes, qwebcontext):
expr = template_attributes["foreach"]
enum = self.eval_object(expr, qwebcontext)
if enum is None:
template = qwebcontext.get('__template__')
raise QWebException("foreach enumerator %r is not defined while rendering template %r" % (expr, template), template=template)
if isinstance(enum, int):
enum = range(enum)
varname = template_attributes['as'].replace('.', '_')
copy_qwebcontext = qwebcontext.copy()
size = None
if isinstance(enum, collections.Sized):
size = len(enum)
copy_qwebcontext["%s_size" % varname] = size
copy_qwebcontext["%s_all" % varname] = enum
ru = []
for index, (item, value) in enumerate(self._iterate(enum)):
copy_qwebcontext.update({
varname: item,
'%s_value' % varname: value,
'%s_index' % varname: index,
'%s_first' % varname: index == 0,
})
if size is not None:
copy_qwebcontext['%s_last' % varname] = index + 1 == size
if index % 2:
copy_qwebcontext.update({
'%s_parity' % varname: 'odd',
'%s_even' % varname: False,
'%s_odd' % varname: True,
})
else:
copy_qwebcontext.update({
'%s_parity' % varname: 'even',
'%s_even' % varname: True,
'%s_odd' % varname: False,
})
ru.append(self.render_element(element, template_attributes, generated_attributes, copy_qwebcontext))
for k in qwebcontext.keys():
qwebcontext[k] = copy_qwebcontext[k]
return "".join(ru)
def render_tag_if(self, element, template_attributes, generated_attributes, qwebcontext):
if self.eval_bool(template_attributes["if"], qwebcontext):
return self.render_element(element, template_attributes, generated_attributes, qwebcontext)
return ""
def render_tag_call(self, element, template_attributes, generated_attributes, qwebcontext):
d = qwebcontext.copy()
d[0] = self.render_element(element, template_attributes, generated_attributes, d)
cr = d.get('request') and d['request'].cr or None
uid = d.get('request') and d['request'].uid or None
template = self.eval_format(template_attributes["call"], d)
try:
template = int(template)
except ValueError:
pass
return self.render(cr, uid, template, d)
def render_tag_call_assets(self, element, template_attributes, generated_attributes, qwebcontext):
""" This special 't-call' tag can be used in order to aggregate/minify javascript and css assets"""
if len(element):
# An asset bundle is rendered in two differents contexts (when genereting html and
# when generating the bundle itself) so they must be qwebcontext free
# even '0' variable is forbidden
template = qwebcontext.get('__template__')
raise QWebException("t-call-assets cannot contain children nodes", template=template)
xmlid = template_attributes['call-assets']
cr, uid, context = [getattr(qwebcontext, attr) for attr in ('cr', 'uid', 'context')]
bundle = AssetsBundle(xmlid, cr=cr, uid=uid, context=context, registry=self.pool)
css = self.get_attr_bool(template_attributes.get('css'), default=True)
js = self.get_attr_bool(template_attributes.get('js'), default=True)
return bundle.to_html(css=css, js=js, debug=bool(qwebcontext.get('debug')))
def render_tag_set(self, element, template_attributes, generated_attributes, qwebcontext):
if "value" in template_attributes:
qwebcontext[template_attributes["set"]] = self.eval_object(template_attributes["value"], qwebcontext)
elif "valuef" in template_attributes:
qwebcontext[template_attributes["set"]] = self.eval_format(template_attributes["valuef"], qwebcontext)
else:
qwebcontext[template_attributes["set"]] = self.render_element(element, template_attributes, generated_attributes, qwebcontext)
return ""
def render_tag_field(self, element, template_attributes, generated_attributes, qwebcontext):
""" eg: <span t-record="browse_record(res.partner, 1)" t-field="phone">+1 555 555 8069</span>"""
node_name = element.tag
assert node_name not in ("table", "tbody", "thead", "tfoot", "tr", "td",
"li", "ul", "ol", "dl", "dt", "dd"),\
"RTE widgets do not work correctly on %r elements" % node_name
assert node_name != 't',\
"t-field can not be used on a t element, provide an actual HTML node"
record, field_name = template_attributes["field"].rsplit('.', 1)
record = self.eval_object(record, qwebcontext)
field = record._fields[field_name]
options = json.loads(template_attributes.get('field-options') or '{}')
field_type = get_field_type(field, options)
converter = self.get_converter_for(field_type)
return converter.to_html(qwebcontext.cr, qwebcontext.uid, field_name, record, options,
element, template_attributes, generated_attributes, qwebcontext, context=qwebcontext.context)
def get_converter_for(self, field_type):
""" returns a :class:`~openerp.models.Model` used to render a
``t-field``.
By default, tries to get the model named
:samp:`ir.qweb.field.{field_type}`, falling back on ``ir.qweb.field``.
:param str field_type: type or widget of field to render
"""
return self.pool.get('ir.qweb.field.' + field_type, self.pool['ir.qweb.field'])
def get_widget_for(self, widget):
""" returns a :class:`~openerp.models.Model` used to render a
``t-esc``
:param str widget: name of the widget to use, or ``None``
"""
widget_model = ('ir.qweb.widget.' + widget) if widget else 'ir.qweb.widget'
return self.pool.get(widget_model) or self.pool['ir.qweb.widget']
def get_attr_bool(self, attr, default=False):
if attr:
attr = attr.lower()
if attr in ('false', '0'):
return False
elif attr in ('true', '1'):
return True
return default
#--------------------------------------------------------------------
# QWeb Fields converters
#--------------------------------------------------------------------
class FieldConverter(osv.AbstractModel):
""" Used to convert a t-field specification into an output HTML field.
:meth:`~.to_html` is the entry point of this conversion from QWeb, it:
* converts the record value to html using :meth:`~.record_to_html`
* generates the metadata attributes (``data-oe-``) to set on the root
result node
* generates the root result node itself through :meth:`~.render_element`
"""
_name = 'ir.qweb.field'
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
""" attributes(cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=None)
Generates the metadata attributes (prefixed by ``data-oe-`` for the
root node of the field conversion. Attribute values are escaped by the
parent.
The default attributes are:
* ``model``, the name of the record's model
* ``id`` the id of the record to which the field belongs
* ``field`` the name of the converted field
* ``type`` the logical field type (widget, may not match the field's
``type``, may not be any Field subclass name)
* ``translate``, a boolean flag (``0`` or ``1``) denoting whether the
field is translatable
* ``expression``, the original expression
:returns: iterable of (attribute name, attribute value) pairs.
"""
field = record._fields[field_name]
field_type = get_field_type(field, options)
return [
('data-oe-model', record._name),
('data-oe-id', record.id),
('data-oe-field', field_name),
('data-oe-type', field_type),
('data-oe-expression', t_att['field']),
]
def value_to_html(self, cr, uid, value, field, options=None, context=None):
""" value_to_html(cr, uid, value, field, options=None, context=None)
Converts a single value to its HTML version/output
"""
if not value: return ''
return value
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
""" record_to_html(cr, uid, field_name, record, options=None, context=None)
Converts the specified field of the browse_record ``record`` to HTML
"""
field = record._fields[field_name]
return self.value_to_html(
cr, uid, record[field_name], field, options=options, context=context)
def to_html(self, cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=None):
""" to_html(cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None)
Converts a ``t-field`` to its HTML output. A ``t-field`` may be
extended by a ``t-field-options``, which is a JSON-serialized mapping
of configuration values.
A default configuration key is ``widget`` which can override the
field's own ``_type``.
"""
try:
content = self.record_to_html(cr, uid, field_name, record, options, context=context)
if options.get('html-escape', True):
content = escape(content)
elif hasattr(content, '__html__'):
content = content.__html__()
except Exception:
_logger.warning("Could not get field %s for model %s",
field_name, record._name, exc_info=True)
content = None
inherit_branding = context and context.get('inherit_branding')
if not inherit_branding and context and context.get('inherit_branding_auto'):
inherit_branding = self.pool['ir.model.access'].check(cr, uid, record._name, 'write', False, context=context)
if inherit_branding:
# add branding attributes
g_att += ''.join(
_build_attribute(name, value)
for name, value in self.attributes(
cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context, context=context)
)
return self.render_element(cr, uid, source_element, t_att, g_att,
qweb_context, content)
def qweb_object(self):
return self.pool['ir.qweb']
def render_element(self, cr, uid, source_element, t_att, g_att,
qweb_context, content):
""" render_element(cr, uid, source_element, t_att, g_att, qweb_context, content)
Final rendering hook, by default just calls ir.qweb's ``render_element``
"""
return self.qweb_object().render_element(
source_element, t_att, g_att, qweb_context, content or '')
def user_lang(self, cr, uid, context):
""" user_lang(cr, uid, context)
Fetches the res.lang object corresponding to the language code stored
in the user's context. Fallbacks to en_US if no lang is present in the
context *or the language code is not valid*.
:returns: res.lang browse_record
"""
if context is None: context = {}
lang_code = context.get('lang') or 'en_US'
Lang = self.pool['res.lang']
lang_ids = Lang.search(cr, uid, [('code', '=', lang_code)], context=context) \
or Lang.search(cr, uid, [('code', '=', 'en_US')], context=context)
return Lang.browse(cr, uid, lang_ids[0], context=context)
class FloatConverter(osv.AbstractModel):
_name = 'ir.qweb.field.float'
_inherit = 'ir.qweb.field'
def precision(self, cr, uid, field, options=None, context=None):
_, precision = field.digits or (None, None)
return precision
def value_to_html(self, cr, uid, value, field, options=None, context=None):
if context is None:
context = {}
precision = self.precision(cr, uid, field, options=options, context=context)
fmt = '%f' if precision is None else '%.{precision}f'
lang_code = context.get('lang') or 'en_US'
lang = self.pool['res.lang']
formatted = lang.format(cr, uid, [lang_code], fmt.format(precision=precision), value, grouping=True)
# %f does not strip trailing zeroes. %g does but its precision causes
# it to switch to scientific notation starting at a million *and* to
# strip decimals. So use %f and if no precision was specified manually
# strip trailing 0.
if precision is None:
formatted = re.sub(r'(?:(0|\d+?)0+)$', r'\1', formatted)
return formatted
class DateConverter(osv.AbstractModel):
_name = 'ir.qweb.field.date'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
if not value or len(value)<10: return ''
lang = self.user_lang(cr, uid, context=context)
locale = babel.Locale.parse(lang.code)
if isinstance(value, basestring):
value = datetime.datetime.strptime(
value[:10], openerp.tools.DEFAULT_SERVER_DATE_FORMAT)
if options and 'format' in options:
pattern = options['format']
else:
strftime_pattern = lang.date_format
pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale)
return babel.dates.format_date(
value, format=pattern,
locale=locale)
class DateTimeConverter(osv.AbstractModel):
_name = 'ir.qweb.field.datetime'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
if not value: return ''
lang = self.user_lang(cr, uid, context=context)
locale = babel.Locale.parse(lang.code)
if isinstance(value, basestring):
value = datetime.datetime.strptime(
value, openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT)
value = fields.datetime.context_timestamp(
cr, uid, timestamp=value, context=context)
if options and 'format' in options:
pattern = options['format']
else:
strftime_pattern = (u"%s %s" % (lang.date_format, lang.time_format))
pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale)
if options and options.get('hide_seconds'):
pattern = pattern.replace(":ss", "").replace(":s", "")
return babel.dates.format_datetime(value, format=pattern, locale=locale)
def record_to_html(self, cr, uid, field_name, record, options, context=None):
field = field = record._fields[field_name]
value = record[field_name]
return self.value_to_html(
cr, uid, value, field, options=options, context=dict(context, **record.env.context))
class TextConverter(osv.AbstractModel):
_name = 'ir.qweb.field.text'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
"""
Escapes the value and converts newlines to br. This is bullshit.
"""
if not value: return ''
return nl2br(value, options=options)
class SelectionConverter(osv.AbstractModel):
_name = 'ir.qweb.field.selection'
_inherit = 'ir.qweb.field'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
value = record[field_name]
if not value: return ''
field = record._fields[field_name]
selection = dict(field.get_description(record.env)['selection'])
return self.value_to_html(
cr, uid, selection[value], field, options=options)
class ManyToOneConverter(osv.AbstractModel):
_name = 'ir.qweb.field.many2one'
_inherit = 'ir.qweb.field'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
[read] = record.read([field_name])
if not read[field_name]: return ''
_, value = read[field_name]
return nl2br(value, options=options)
class HTMLConverter(osv.AbstractModel):
_name = 'ir.qweb.field.html'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
return HTMLSafe(value or '')
class ImageConverter(osv.AbstractModel):
""" ``image`` widget rendering, inserts a data:uri-using image tag in the
document. May be overridden by e.g. the website module to generate links
instead.
.. todo:: what happens if different output need different converters? e.g.
reports may need embedded images or FS links whereas website
needs website-aware
"""
_name = 'ir.qweb.field.image'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
try:
image = Image.open(cStringIO.StringIO(value.decode('base64')))
image.verify()
except IOError:
raise ValueError("Non-image binary fields can not be converted to HTML")
except: # image.verify() throws "suitable exceptions", I have no idea what they are
raise ValueError("Invalid image content")
return HTMLSafe('<img src="data:%s;base64,%s">' % (Image.MIME[image.format], value))
class MonetaryConverter(osv.AbstractModel):
""" ``monetary`` converter, has a mandatory option
``display_currency``.
The currency is used for formatting *and rounding* of the float value. It
is assumed that the linked res_currency has a non-empty rounding value and
res.currency's ``round`` method is used to perform rounding.
.. note:: the monetary converter internally adds the qweb context to its
options mapping, so that the context is available to callees.
It's set under the ``_qweb_context`` key.
"""
_name = 'ir.qweb.field.monetary'
_inherit = 'ir.qweb.field'
def to_html(self, cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=None):
options['_qweb_context'] = qweb_context
return super(MonetaryConverter, self).to_html(
cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=context)
def record_to_html(self, cr, uid, field_name, record, options, context=None):
if context is None:
context = {}
Currency = self.pool['res.currency']
display_currency = self.display_currency(cr, uid, options['display_currency'], options)
# lang.format mandates a sprintf-style format. These formats are non-
# minimal (they have a default fixed precision instead), and
# lang.format will not set one by default. currency.round will not
# provide one either. So we need to generate a precision value
# (integer > 0) from the currency's rounding (a float generally < 1.0).
#
# The log10 of the rounding should be the number of digits involved if
# negative, if positive clamp to 0 digits and call it a day.
# nb: int() ~ floor(), we want nearest rounding instead
precision = int(math.floor(math.log10(display_currency.rounding)))
fmt = "%.{0}f".format(-precision if precision < 0 else 0)
from_amount = record[field_name]
if options.get('from_currency'):
from_currency = self.display_currency(cr, uid, options['from_currency'], options)
from_amount = Currency.compute(cr, uid, from_currency.id, display_currency.id, from_amount)
lang_code = context.get('lang') or 'en_US'
lang = self.pool['res.lang']
formatted_amount = lang.format(cr, uid, [lang_code],
fmt, Currency.round(cr, uid, display_currency, from_amount),
grouping=True, monetary=True)
pre = post = u''
if display_currency.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'
else:
post = u'\N{NO-BREAK SPACE}{symbol}'
return HTMLSafe(u'{pre}<span class="oe_currency_value">{0}</span>{post}'.format(
formatted_amount,
pre=pre, post=post,
).format(
symbol=display_currency.symbol,
))
def display_currency(self, cr, uid, currency, options):
return self.qweb_object().eval_object(
currency, options['_qweb_context'])
TIMEDELTA_UNITS = (
('year', 3600 * 24 * 365),
('month', 3600 * 24 * 30),
('week', 3600 * 24 * 7),
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
('second', 1)
)
class DurationConverter(osv.AbstractModel):
""" ``duration`` converter, to display integral or fractional values as
human-readable time spans (e.g. 1.5 as "1 hour 30 minutes").
Can be used on any numerical field.
Has a mandatory option ``unit`` which can be one of ``second``, ``minute``,
``hour``, ``day``, ``week`` or ``year``, used to interpret the numerical
field value before converting it.
Sub-second values will be ignored.
"""
_name = 'ir.qweb.field.duration'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
units = dict(TIMEDELTA_UNITS)
if value < 0:
raise ValueError(_("Durations can't be negative"))
if not options or options.get('unit') not in units:
raise ValueError(_("A unit must be provided to duration widgets"))
locale = babel.Locale.parse(
self.user_lang(cr, uid, context=context).code)
factor = units[options['unit']]
sections = []
r = value * factor
for unit, secs_per_unit in TIMEDELTA_UNITS:
v, r = divmod(r, secs_per_unit)
if not v: continue
section = babel.dates.format_timedelta(
v*secs_per_unit, threshold=1, locale=locale)
if section:
sections.append(section)
return ' '.join(sections)
class RelativeDatetimeConverter(osv.AbstractModel):
_name = 'ir.qweb.field.relative'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
parse_format = openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT
locale = babel.Locale.parse(
self.user_lang(cr, uid, context=context).code)
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, parse_format)
# value should be a naive datetime in UTC. So is fields.Datetime.now()
reference = datetime.datetime.strptime(field.now(), parse_format)
return babel.dates.format_timedelta(
value - reference, add_direction=True, locale=locale)
class Contact(orm.AbstractModel):
_name = 'ir.qweb.field.contact'
_inherit = 'ir.qweb.field.many2one'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
if context is None:
context = {}
if options is None:
options = {}
opf = options.get('fields') or ["name", "address", "phone", "mobile", "fax", "email"]
value_rec = record[field_name]
if not value_rec:
return None
value_rec = value_rec.sudo().with_context(show_address=True)
value = value_rec.name_get()[0][1]
val = {
'name': value.split("\n")[0],
'address': escape("\n".join(value.split("\n")[1:])),
'phone': value_rec.phone,
'mobile': value_rec.mobile,
'fax': value_rec.fax,
'city': value_rec.city,
'country_id': value_rec.country_id.display_name,
'website': value_rec.website,
'email': value_rec.email,
'fields': opf,
'object': value_rec,
'options': options
}
html = self.pool["ir.ui.view"].render(cr, uid, "base.contact", val, engine='ir.qweb', context=context).decode('utf8')
return HTMLSafe(html)
class QwebView(orm.AbstractModel):
_name = 'ir.qweb.field.qweb'
_inherit = 'ir.qweb.field.many2one'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
if not getattr(record, field_name):
return None
view = getattr(record, field_name)
if view._model._name != "ir.ui.view":
_logger.warning("%s.%s must be a 'ir.ui.view' model." % (record, field_name))
return None
ctx = (context or {}).copy()
ctx['object'] = record
html = view.render(ctx, engine='ir.qweb', context=ctx).decode('utf8')
return HTMLSafe(html)
class QwebWidget(osv.AbstractModel):
_name = 'ir.qweb.widget'
def _format(self, inner, options, qwebcontext):
return self.pool['ir.qweb'].eval_str(inner, qwebcontext)
def format(self, inner, options, qwebcontext):
return escape(self._format(inner, options, qwebcontext))
class QwebWidgetMonetary(osv.AbstractModel):
_name = 'ir.qweb.widget.monetary'
_inherit = 'ir.qweb.widget'
def _format(self, inner, options, qwebcontext):
inner = self.pool['ir.qweb'].eval(inner, qwebcontext)
display = self.pool['ir.qweb'].eval_object(options['display_currency'], qwebcontext)
precision = int(round(math.log10(display.rounding)))
fmt = "%.{0}f".format(-precision if precision < 0 else 0)
lang_code = qwebcontext.context.get('lang') or 'en_US'
formatted_amount = self.pool['res.lang'].format(
qwebcontext.cr, qwebcontext.uid, [lang_code], fmt, inner, grouping=True, monetary=True
)
pre = post = u''
if display.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'
else:
post = u'\N{NO-BREAK SPACE}{symbol}'
return u'{pre}{0}{post}'.format(
formatted_amount, pre=pre, post=post
).format(symbol=display.symbol,)
class HTMLSafe(object):
""" HTMLSafe string wrapper, Werkzeug's escape() has special handling for
objects with a ``__html__`` methods but AFAIK does not provide any such
object.
Wrapping a string in HTML will prevent its escaping
"""
__slots__ = ['string']
def __init__(self, string):
self.string = string
def __html__(self):
return self.string
def __str__(self):
s = self.string
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def __unicode__(self):
s = self.string
if isinstance(s, str):
return s.decode('utf-8')
return s
def nl2br(string, options=None):
""" Converts newlines to HTML linebreaks in ``string``. Automatically
escapes content unless options['html-escape'] is set to False, and returns
the result wrapped in an HTMLSafe object.
:param str string:
:param dict options:
:rtype: HTMLSafe
"""
if options is None: options = {}
if options.get('html-escape', True):
string = escape(string)
return HTMLSafe(string.replace('\n', '<br>\n'))
def get_field_type(field, options):
""" Gets a t-field's effective type from the field definition and its options """
return options.get('widget', field.type)
class AssetError(Exception):
pass
class AssetNotFound(AssetError):
pass
class AssetsBundle(object):
# Sass installation:
#
# sudo gem install sass compass bootstrap-sass
#
# If the following error is encountered:
# 'ERROR: Cannot load compass.'
# Use this:
# sudo gem install compass --pre
cmd_sass = ['sass', '--stdin', '-t', 'compressed', '--unix-newlines', '--compass', '-r', 'bootstrap-sass']
rx_css_import = re.compile("(@import[^;{]+;?)", re.M)
rx_sass_import = re.compile("""(@import\s?['"]([^'"]+)['"])""")
rx_css_split = re.compile("\/\*\! ([a-f0-9-]+) \*\/")
def __init__(self, xmlid, debug=False, cr=None, uid=None, context=None, registry=None):
self.xmlid = xmlid
self.cr = request.cr if cr is None else cr
self.uid = request.uid if uid is None else uid
self.context = request.context if context is None else context
self.registry = request.registry if registry is None else registry
self.javascripts = []
self.stylesheets = []
self.css_errors = []
self.remains = []
self._checksum = None
context = self.context.copy()
context['inherit_branding'] = False
context['inherit_branding_auto'] = False
context['rendering_bundle'] = True
self.html = self.registry['ir.ui.view'].render(self.cr, self.uid, xmlid, context=context)
self.parse()
def parse(self):
fragments = html.fragments_fromstring(self.html)
for el in fragments:
if isinstance(el, basestring):
self.remains.append(el)
elif isinstance(el, html.HtmlElement):
src = el.get('src', '')
href = el.get('href', '')
atype = el.get('type')
media = el.get('media')
if el.tag == 'style':
if atype == 'text/sass' or src.endswith('.sass'):
self.stylesheets.append(SassAsset(self, inline=el.text, media=media))
else:
self.stylesheets.append(StylesheetAsset(self, inline=el.text, media=media))
elif el.tag == 'link' and el.get('rel') == 'stylesheet' and self.can_aggregate(href):
if href.endswith('.sass') or atype == 'text/sass':
self.stylesheets.append(SassAsset(self, url=href, media=media))
else:
self.stylesheets.append(StylesheetAsset(self, url=href, media=media))
elif el.tag == 'script' and not src:
self.javascripts.append(JavascriptAsset(self, inline=el.text))
elif el.tag == 'script' and self.can_aggregate(src):
self.javascripts.append(JavascriptAsset(self, url=src))
else:
self.remains.append(html.tostring(el))
else:
try:
self.remains.append(html.tostring(el))
except Exception:
# notYETimplementederror
raise NotImplementedError
def can_aggregate(self, url):
return not urlparse(url).netloc and not url.startswith(('/web/css', '/web/js'))
def to_html(self, sep=None, css=True, js=True, debug=False):
if sep is None:
sep = '\n '
response = []
if debug:
if css and self.stylesheets:
self.compile_sass()
for style in self.stylesheets:
response.append(style.to_html())
if js:
for jscript in self.javascripts:
response.append(jscript.to_html())
else:
url_for = self.context.get('url_for', lambda url: url)
if css and self.stylesheets:
suffix = ''
if request:
ua = request.httprequest.user_agent
if ua.browser == "msie" and int((ua.version or '0').split('.')[0]) < 10:
suffix = '.0'
href = '/web/css%s/%s/%s' % (suffix, self.xmlid, self.version)
response.append('<link href="%s" rel="stylesheet"/>' % url_for(href))
if js:
src = '/web/js/%s/%s' % (self.xmlid, self.version)
response.append('<script type="text/javascript" src="%s"></script>' % url_for(src))
response.extend(self.remains)
return sep + sep.join(response)
@lazy_property
def last_modified(self):
"""Returns last modified date of linked files"""
return max(itertools.chain(
(asset.last_modified for asset in self.javascripts),
(asset.last_modified for asset in self.stylesheets),
))
@lazy_property
def version(self):
return self.checksum[0:7]
@lazy_property
def checksum(self):
"""
Not really a full checksum.
We compute a SHA1 on the rendered bundle + max linked files last_modified date
"""
check = self.html + str(self.last_modified)
return hashlib.sha1(check).hexdigest()
def js(self):
content = self.get_cache('js')
if content is None:
content = ';\n'.join(asset.minify() for asset in self.javascripts)
self.set_cache('js', content)
return content
def css(self, page_number=None):
if page_number is not None:
return self.css_page(page_number)
content = self.get_cache('css')
if content is None:
self.compile_sass()
content = '\n'.join(asset.minify() for asset in self.stylesheets)
if self.css_errors:
msg = '\n'.join(self.css_errors)
content += self.css_message(msg.replace('\n', '\\A '))
# move up all @import rules to the top
matches = []
def push(matchobj):
matches.append(matchobj.group(0))
return ''
content = re.sub(self.rx_css_import, push, content)
matches.append(content)
content = u'\n'.join(matches)
if not self.css_errors:
self.set_cache('css', content)
content = content.encode('utf-8')
return content
def css_page(self, page_number):
content = self.get_cache('css.%d' % (page_number,))
if page_number:
return content
if content is None:
css = self.css().decode('utf-8')
re_rules = '([^{]+\{(?:[^{}]|\{[^{}]*\})*\})'
re_selectors = '()(?:\s*@media\s*[^{]*\{)?(?:\s*(?:[^,{]*(?:,|\{(?:[^}]*\}))))'
css_url = '@import url(\'/web/css.%%d/%s/%s\');' % (self.xmlid, self.version)
pages = [[]]
page = pages[0]
page_selectors = 0
for rule in re.findall(re_rules, css):
selectors = len(re.findall(re_selectors, rule))
if page_selectors + selectors < MAX_CSS_RULES:
page_selectors += selectors
page.append(rule)
else:
pages.append([rule])
page = pages[-1]
page_selectors = selectors
if len(pages) == 1:
pages = []
for idx, page in enumerate(pages):
self.set_cache("css.%d" % (idx+1), ''.join(page))
content = '\n'.join(css_url % i for i in range(1,len(pages)+1))
self.set_cache("css.0", content)
if not content:
return self.css()
return content
def get_cache(self, type):
content = None
domain = [('url', '=', '/web/%s/%s/%s' % (type, self.xmlid, self.version))]
bundle = self.registry['ir.attachment'].search_read(self.cr, openerp.SUPERUSER_ID, domain, ['datas'], context=self.context)
if bundle and bundle[0]['datas']:
content = bundle[0]['datas'].decode('base64')
return content
def set_cache(self, type, content):
ira = self.registry['ir.attachment']
url_prefix = '/web/%s/%s/' % (type, self.xmlid)
# Invalidate previous caches
try:
with self.cr.savepoint():
domain = [('url', '=like', url_prefix + '%')]
oids = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context)
if oids:
ira.unlink(self.cr, openerp.SUPERUSER_ID, oids, context=self.context)
url = url_prefix + self.version
ira.create(self.cr, openerp.SUPERUSER_ID, dict(
datas=content.encode('utf8').encode('base64'),
type='binary',
name=url,
url=url,
), context=self.context)
except psycopg2.Error:
pass
def css_message(self, message):
return """
body:before {
background: #ffc;
width: 100%%;
font-size: 14px;
font-family: monospace;
white-space: pre;
content: "%s";
}
""" % message.replace('"', '\\"')
def compile_sass(self):
"""
Checks if the bundle contains any sass content, then compiles it to css.
Css compilation is done at the bundle level and not in the assets
because they are potentially interdependant.
"""
sass = [asset for asset in self.stylesheets if isinstance(asset, SassAsset)]
if not sass:
return
source = '\n'.join([asset.get_source() for asset in sass])
# move up all @import rules to the top and exclude file imports
imports = []
def push(matchobj):
ref = matchobj.group(2)
line = '@import "%s"' % ref
if '.' not in ref and line not in imports and not ref.startswith(('.', '/', '~')):
imports.append(line)
return ''
source = re.sub(self.rx_sass_import, push, source)
imports.append(source)
source = u'\n'.join(imports)
try:
compiler = Popen(self.cmd_sass, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except Exception:
msg = "Could not find 'sass' program needed to compile sass/scss files"
_logger.error(msg)
self.css_errors.append(msg)
return
result = compiler.communicate(input=source.encode('utf-8'))
if compiler.returncode:
error = self.get_sass_error(''.join(result), source=source)
_logger.warning(error)
self.css_errors.append(error)
return
compiled = result[0].strip().decode('utf8')
fragments = self.rx_css_split.split(compiled)[1:]
while fragments:
asset_id = fragments.pop(0)
asset = next(asset for asset in sass if asset.id == asset_id)
asset._content = fragments.pop(0)
def get_sass_error(self, stderr, source=None):
# TODO: try to find out which asset the error belongs to
error = stderr.split('Load paths')[0].replace(' Use --trace for backtrace.', '')
error += "This error occured while compiling the bundle '%s' containing:" % self.xmlid
for asset in self.stylesheets:
if isinstance(asset, SassAsset):
error += '\n - %s' % (asset.url if asset.url else '<inline sass>')
return error
class WebAsset(object):
html_url = '%s'
def __init__(self, bundle, inline=None, url=None):
self.id = str(uuid.uuid4())
self.bundle = bundle
self.inline = inline
self.url = url
self.cr = bundle.cr
self.uid = bundle.uid
self.registry = bundle.registry
self.context = bundle.context
self._content = None
self._filename = None
self._ir_attach = None
name = '<inline asset>' if inline else url
self.name = "%s defined in bundle '%s'" % (name, bundle.xmlid)
if not inline and not url:
raise Exception("An asset should either be inlined or url linked")
def stat(self):
if not (self.inline or self._filename or self._ir_attach):
addon = filter(None, self.url.split('/'))[0]
try:
# Test url against modules static assets
mpath = openerp.http.addons_manifest[addon]['addons_path']
self._filename = mpath + self.url.replace('/', os.path.sep)
except Exception:
try:
# Test url against ir.attachments
fields = ['__last_update', 'datas', 'mimetype']
domain = [('type', '=', 'binary'), ('url', '=', self.url)]
ira = self.registry['ir.attachment']
attach = ira.search_read(self.cr, openerp.SUPERUSER_ID, domain, fields, context=self.context)
self._ir_attach = attach[0]
except Exception:
raise AssetNotFound("Could not find %s" % self.name)
def to_html(self):
raise NotImplementedError()
@lazy_property
def last_modified(self):
try:
self.stat()
if self._filename:
return datetime.datetime.fromtimestamp(os.path.getmtime(self._filename))
elif self._ir_attach:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
last_update = self._ir_attach['__last_update']
try:
return datetime.datetime.strptime(last_update, server_format + '.%f')
except ValueError:
return datetime.datetime.strptime(last_update, server_format)
except Exception:
pass
return datetime.datetime(1970, 1, 1)
@property
def content(self):
if not self._content:
self._content = self.inline or self._fetch_content()
return self._content
def _fetch_content(self):
""" Fetch content from file or database"""
try:
self.stat()
if self._filename:
with open(self._filename, 'rb') as fp:
return fp.read().decode('utf-8')
else:
return self._ir_attach['datas'].decode('base64')
except UnicodeDecodeError:
raise AssetError('%s is not utf-8 encoded.' % self.name)
except IOError:
raise AssetNotFound('File %s does not exist.' % self.name)
except:
raise AssetError('Could not get content for %s.' % self.name)
def minify(self):
return self.content
def with_header(self, content=None):
if content is None:
content = self.content
return '\n/* %s */\n%s' % (self.name, content)
class JavascriptAsset(WebAsset):
def minify(self):
return self.with_header(rjsmin(self.content))
def _fetch_content(self):
try:
return super(JavascriptAsset, self)._fetch_content()
except AssetError, e:
return "console.error(%s);" % json.dumps(e.message)
def to_html(self):
if self.url:
return '<script type="text/javascript" src="%s"></script>' % (self.html_url % self.url)
else:
return '<script type="text/javascript" charset="utf-8">%s</script>' % self.with_header()
class StylesheetAsset(WebAsset):
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U)
rx_sourceMap = re.compile(r'(/\*# sourceMappingURL=.*)', re.U)
rx_charset = re.compile(r'(@charset "[^"]+";)', re.U)
def __init__(self, *args, **kw):
self.media = kw.pop('media', None)
super(StylesheetAsset, self).__init__(*args, **kw)
@property
def content(self):
content = super(StylesheetAsset, self).content
if self.media:
content = '@media %s { %s }' % (self.media, content)
return content
def _fetch_content(self):
try:
content = super(StylesheetAsset, self)._fetch_content()
web_dir = os.path.dirname(self.url)
content = self.rx_import.sub(
r"""@import \1%s/""" % (web_dir,),
content,
)
content = self.rx_url.sub(
r"url(\1%s/" % (web_dir,),
content,
)
# remove charset declarations, we only support utf-8
content = self.rx_charset.sub('', content)
except AssetError, e:
self.bundle.css_errors.append(e.message)
return ''
return content
def minify(self):
# remove existing sourcemaps, make no sense after re-mini
content = self.rx_sourceMap.sub('', self.content)
# comments
content = re.sub(r'/\*.*?\*/', '', content, flags=re.S)
# space
content = re.sub(r'\s+', ' ', content)
content = re.sub(r' *([{}]) *', r'\1', content)
return self.with_header(content)
def to_html(self):
media = (' media="%s"' % werkzeug.utils.escape(self.media)) if self.media else ''
if self.url:
href = self.html_url % self.url
return '<link rel="stylesheet" href="%s" type="text/css"%s/>' % (href, media)
else:
return '<style type="text/css"%s>%s</style>' % (media, self.with_header())
class SassAsset(StylesheetAsset):
html_url = '%s.css'
rx_indent = re.compile(r'^( +|\t+)', re.M)
indent = None
reindent = ' '
def minify(self):
return self.with_header()
def to_html(self):
if self.url:
try:
ira = self.registry['ir.attachment']
url = self.html_url % self.url
domain = [('type', '=', 'binary'), ('url', '=', self.url)]
with self.cr.savepoint():
ira_id = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context)
if ira_id:
# TODO: update only if needed
ira.write(self.cr, openerp.SUPERUSER_ID, [ira_id], {'datas': self.content},
context=self.context)
else:
ira.create(self.cr, openerp.SUPERUSER_ID, dict(
datas=self.content.encode('utf8').encode('base64'),
mimetype='text/css',
type='binary',
name=url,
url=url,
), context=self.context)
except psycopg2.Error:
pass
return super(SassAsset, self).to_html()
def get_source(self):
content = textwrap.dedent(self.inline or self._fetch_content())
def fix_indent(m):
ind = m.group()
if self.indent is None:
self.indent = ind
if self.indent == self.reindent:
# Don't reindent the file if identation is the final one (reindent)
raise StopIteration()
return ind.replace(self.indent, self.reindent)
try:
content = self.rx_indent.sub(fix_indent, content)
except StopIteration:
pass
return "/*! %s */\n%s" % (self.id, content)
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
result = re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
# vim:et:
| agpl-3.0 |
hlin117/statsmodels | examples/python/regression_plots.py | 33 | 9585 |
## Regression Plots
from __future__ import print_function
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import ols
### Duncan's Prestige Dataset
#### Load the Data
# We can use a utility function to load any R dataset available from the great <a href="http://vincentarelbundock.github.com/Rdatasets/">Rdatasets package</a>.
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
prestige.head()
prestige_model = ols("prestige ~ income + education", data=prestige).fit()
print(prestige_model.summary())
#### Influence plots
# Influence plots show the (externally) studentized residuals vs. the leverage of each observation as measured by the hat matrix.
#
# Externally studentized residuals are residuals that are scaled by their standard deviation where
#
# $$var(\\hat{\epsilon}_i)=\hat{\sigma}^2_i(1-h_{ii})$$
#
# with
#
# $$\hat{\sigma}^2_i=\frac{1}{n - p - 1 \;\;}\sum_{j}^{n}\;\;\;\forall \;\;\; j \neq i$$
#
# $n$ is the number of observations and $p$ is the number of regressors. $h_{ii}$ is the $i$-th diagonal element of the hat matrix
#
# $$H=X(X^{\;\prime}X)^{-1}X^{\;\prime}$$
#
# The influence of each point can be visualized by the criterion keyword argument. Options are Cook's distance and DFFITS, two measures of influence.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(prestige_model, ax=ax, criterion="cooks")
# As you can see there are a few worrisome observations. Both contractor and reporter have low leverage but a large residual. <br />
# RR.engineer has small residual and large leverage. Conductor and minister have both high leverage and large residuals, and, <br />
# therefore, large influence.
#### Partial Regression Plots
# Since we are doing multivariate regressions, we cannot just look at individual bivariate plots to discern relationships. <br />
# Instead, we want to look at the relationship of the dependent variable and independent variables conditional on the other <br />
# independent variables. We can do this through using partial regression plots, otherwise known as added variable plots. <br />
#
# In a partial regression plot, to discern the relationship between the response variable and the $k$-th variabe, we compute <br />
# the residuals by regressing the response variable versus the independent variables excluding $X_k$. We can denote this by <br />
# $X_{\sim k}$. We then compute the residuals by regressing $X_k$ on $X_{\sim k}$. The partial regression plot is the plot <br />
# of the former versus the latter residuals. <br />
#
# The notable points of this plot are that the fitted line has slope $\beta_k$ and intercept zero. The residuals of this plot <br />
# are the same as those of the least squares fit of the original model with full $X$. You can discern the effects of the <br />
# individual data values on the estimation of a coefficient easily. If obs_labels is True, then these points are annotated <br />
# with their observation label. You can also see the violation of underlying assumptions such as homooskedasticity and <br />
# linearity.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("prestige", "income", ["income", "education"], data=prestige, ax=ax)
ax = fig.axes[0]
ax.set_xlim(-2e-15, 1e-14)
ax.set_ylim(-25, 30);
fix, ax = plt.subplots(figsize=(12,14))
fig = sm.graphics.plot_partregress("prestige", "income", ["education"], data=prestige, ax=ax)
# As you can see the partial regression plot confirms the influence of conductor, minister, and RR.engineer on the partial relationship between income and prestige. The cases greatly decrease the effect of income on prestige. Dropping these cases confirms this.
subset = ~prestige.index.isin(["conductor", "RR.engineer", "minister"])
prestige_model2 = ols("prestige ~ income + education", data=prestige, subset=subset).fit()
print(prestige_model2.summary())
# For a quick check of all the regressors, you can use plot_partregress_grid. These plots will not label the <br />
# points, but you can use them to identify problems and then use plot_partregress to get more information.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(prestige_model, fig=fig)
#### Component-Component plus Residual (CCPR) Plots
# The CCPR plot provides a way to judge the effect of one regressor on the <br />
# response variable by taking into account the effects of the other <br />
# independent variables. The partial residuals plot is defined as <br />
# $\text{Residuals} + B_iX_i \text{ }\text{ }$ versus $X_i$. The component adds $B_iX_i$ versus <br />
# $X_i$ to show where the fitted line would lie. Care should be taken if $X_i$ <br />
# is highly correlated with any of the other independent variables. If this <br />
# is the case, the variance evident in the plot will be an underestimate of <br />
# the true variance.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_ccpr(prestige_model, "education", ax=ax)
# As you can see the relationship between the variation in prestige explained by education conditional on income seems to be linear, though you can see there are some observations that are exerting considerable influence on the relationship. We can quickly look at more than one variable by using plot_ccpr_grid.
fig = plt.figure(figsize=(12, 8))
fig = sm.graphics.plot_ccpr_grid(prestige_model, fig=fig)
#### Regression Plots
# The plot_regress_exog function is a convenience function that gives a 2x2 plot containing the dependent variable and fitted values with confidence intervals vs. the independent variable chosen, the residuals of the model vs. the chosen independent variable, a partial regression plot, and a CCPR plot. This function can be used for quickly checking modeling assumptions with respect to a single regressor.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(prestige_model, "education", fig=fig)
#### Fit Plot
# The plot_fit function plots the fitted values versus a chosen independent variable. It includes prediction confidence intervals and optionally plots the true dependent variable.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_fit(prestige_model, "education", ax=ax)
### Statewide Crime 2009 Dataset
# Compare the following to http://www.ats.ucla.edu/stat/stata/webbooks/reg/chapter4/statareg_self_assessment_answers4.htm
#
# Though the data here is not the same as in that example. You could run that example by uncommenting the necessary cells below.
#dta = pd.read_csv("http://www.stat.ufl.edu/~aa/social/csv_files/statewide-crime-2.csv")
#dta = dta.set_index("State", inplace=True).dropna()
#dta.rename(columns={"VR" : "crime",
# "MR" : "murder",
# "M" : "pctmetro",
# "W" : "pctwhite",
# "H" : "pcths",
# "P" : "poverty",
# "S" : "single"
# }, inplace=True)
#
#crime_model = ols("murder ~ pctmetro + poverty + pcths + single", data=dta).fit()
dta = sm.datasets.statecrime.load_pandas().data
crime_model = ols("murder ~ urban + poverty + hs_grad + single", data=dta).fit()
print(crime_model.summary())
#### Partial Regression Plots
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(crime_model, fig=fig)
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("murder", "hs_grad", ["urban", "poverty", "single"], ax=ax, data=dta)
#### Leverage-Resid<sup>2</sup> Plot
# Closely related to the influence_plot is the leverage-resid<sup>2</sup> plot.
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.plot_leverage_resid2(crime_model, ax=ax)
#### Influence Plot
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.influence_plot(crime_model, ax=ax)
#### Using robust regression to correct for outliers.
# Part of the problem here in recreating the Stata results is that M-estimators are not robust to leverage points. MM-estimators should do better with this examples.
from statsmodels.formula.api import rlm
rob_crime_model = rlm("murder ~ urban + poverty + hs_grad + single", data=dta,
M=sm.robust.norms.TukeyBiweight(3)).fit(conv="weights")
print(rob_crime_model.summary())
#rob_crime_model = rlm("murder ~ pctmetro + poverty + pcths + single", data=dta, M=sm.robust.norms.TukeyBiweight()).fit(conv="weights")
#print(rob_crime_model.summary())
# There aren't yet an influence diagnostics as part of RLM, but we can recreate them. (This depends on the status of [issue #888](https://github.com/statsmodels/statsmodels/issues/808))
weights = rob_crime_model.weights
idx = weights > 0
X = rob_crime_model.model.exog[idx]
ww = weights[idx] / weights[idx].mean()
hat_matrix_diag = ww*(X*np.linalg.pinv(X).T).sum(1)
resid = rob_crime_model.resid
resid2 = resid**2
resid2 /= resid2.sum()
nobs = int(idx.sum())
hm = hat_matrix_diag.mean()
rm = resid2.mean()
from statsmodels.graphics import utils
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(resid2[idx], hat_matrix_diag, 'o')
ax = utils.annotate_axes(range(nobs), labels=rob_crime_model.model.data.row_labels[idx],
points=lzip(resid2[idx], hat_matrix_diag), offset_points=[(-5,5)]*nobs,
size="large", ax=ax)
ax.set_xlabel("resid2")
ax.set_ylabel("leverage")
ylim = ax.get_ylim()
ax.vlines(rm, *ylim)
xlim = ax.get_xlim()
ax.hlines(hm, *xlim)
ax.margins(0,0)
| bsd-3-clause |
Jmainguy/ansible-modules-core | utilities/logic/include_vars.py | 28 | 1982 | # -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author: "Benno Joy (@bennojoy)"
module: include_vars
short_description: Load variables from files, dynamically within a task.
description:
- Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
options:
file:
version_added: "2.2"
description:
- The file name from which variables should be loaded.
- If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
name:
version_added: "2.2"
description:
- The name of a variable into which assign the included vars, if omitted (null) they will be made top level vars.
default: null
free-form:
description:
- This module allows you to specify the 'file' option directly w/o any other options.
notes:
- The file is always required either as the explicit option or using the free-form.
version_added: "1.4"
'''
EXAMPLES = """
# Include vars of stuff.yml into the 'stuff' variable (2.2).
- include_vars:
file: stuff.yml
name: stuff
# Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
- include_vars: file=contingency_plan.yml name=plans
when: x == 0
# Load a variable file based on the OS type, or a default if not found.
- include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_distribution }}.yml"
- "{{ ansible_os_family }}.yml"
- "default.yml"
# bare include (free-form)
- include_vars: myvars.yml
"""
| gpl-3.0 |
alekz112/statsmodels | statsmodels/tsa/statespace/model.py | 6 | 3282 | """
State Space Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from .representation import Representation
from .kalman_filter import KalmanFilter
import statsmodels.tsa.base.tsa_model as tsbase
class Model(KalmanFilter, Representation, tsbase.TimeSeriesModel):
"""
State space representation of a time series process, with Kalman filter and
Statsmodels integration.
This intermediate class joins the state space representation and filtering
classes with the Statsmodels `TimeSeriesModel`.
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
k_states : int
The dimension of the unobserved state process.
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k. Default is no
exogenous regressors.
dates : array-like of datetime, optional
An array-like object of datetime objects. If a Pandas object is given
for endog, it is assumed to have a DateIndex.
freq : str, optional
The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',
'M', 'A', or 'Q'. This is optional if dates are given.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices, for Kalman filtering options, for Kalman smoothing
options, or for Simulation smoothing options.
See `Representation`, `KalmanFilter`, and `KalmanSmoother` for more
details.
See Also
--------
statsmodels.tsa.statespace.tsa.base.tsa_model.TimeSeriesModel
statsmodels.tsa.statespace.mlemodel.MLEModel
statsmodels.tsa.statespace.kalman_filter.KalmanFilter
statsmodels.tsa.statespace.representation.Representation
"""
def __init__(self, endog, k_states, exog=None, dates=None, freq=None,
**kwargs):
# Initialize the model base
tsbase.TimeSeriesModel.__init__(self, endog=endog, exog=exog,
dates=dates, freq=freq, missing='none')
# Need to modify the endog variable
endog = self.endog
# Base class may allow 1-dim data, whereas we need 2-dim
if endog.ndim == 1:
endog.shape = (endog.shape[0], 1) # this will be C-contiguous
# Base classes data may be either C-ordered or F-ordered - we want it
# to be C-ordered since it will also be in shape (nobs, k_endog), and
# then we can just transpose it.
if not endog.flags['C_CONTIGUOUS']:
# TODO this breaks the reference link between the model endog
# variable and the original object - do we need a warn('')?
# This will happen often with Pandas DataFrames, which are often
# Fortran-ordered and in the long format
endog = np.ascontiguousarray(endog)
# Now endog is C-ordered and in long format (nobs x k_endog). To get
# F-ordered and in wide format just need to transpose.
endog = endog.T
# Initialize the statespace representation
super(Model, self).__init__(endog.shape[0], k_states, **kwargs)
# Bind the data to the model
self.bind(endog)
| bsd-3-clause |
mindw/numpy | numpy/distutils/command/build_scripts.py | 264 | 1731 | """ Modified version of build_scripts that handles building scripts from functions.
"""
from __future__ import division, absolute_import, print_function
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
from numpy.distutils.misc_util import is_string
class build_scripts(old_build_scripts):
def generate_scripts(self, scripts):
new_scripts = []
func_scripts = []
for script in scripts:
if is_string(script):
new_scripts.append(script)
else:
func_scripts.append(script)
if not func_scripts:
return new_scripts
build_dir = self.build_dir
self.mkpath(build_dir)
for func in func_scripts:
script = func(build_dir)
if not script:
continue
if is_string(script):
log.info(" adding '%s' to scripts" % (script,))
new_scripts.append(script)
else:
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
new_scripts.extend(list(script))
return new_scripts
def run (self):
if not self.scripts:
return
self.scripts = self.generate_scripts(self.scripts)
# Now make sure that the distribution object has this list of scripts.
# setuptools' develop command requires that this be a list of filenames,
# not functions.
self.distribution.scripts = self.scripts
return old_build_scripts.run(self)
def get_source_files(self):
from numpy.distutils.misc_util import get_script_files
return get_script_files(self.scripts)
| bsd-3-clause |
benhc123/p2pool | p2pool/test/test_p2p.py | 269 | 2724 | import random
from twisted.internet import defer, endpoints, protocol, reactor
from twisted.trial import unittest
from p2pool import networks, p2p
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_sharereq(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
def handle_share_hashes(self, hashes, peer):
peer.get_shares(
hashes=[hashes[0]],
parents=5,
stops=[],
).chainDeferred(self.df)
df = defer.Deferred()
n = MyNode(df)
n.start()
try:
yield df
finally:
yield n.stop()
@defer.inlineCallbacks
def test_tx_limit(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
self.sent_time = 0
@defer.inlineCallbacks
def got_conn(self, conn):
p2p.Node.got_conn(self, conn)
yield deferral.sleep(.5)
new_mining_txs = dict(self.mining_txs_var.value)
for i in xrange(3):
huge_tx = dict(
version=0,
tx_ins=[],
tx_outs=[dict(
value=0,
script='x'*900000,
)],
lock_time=i,
)
new_mining_txs[bitcoin_data.hash256(bitcoin_data.tx_type.pack(huge_tx))] = huge_tx
self.mining_txs_var.set(new_mining_txs)
self.sent_time = reactor.seconds()
def lost_conn(self, conn, reason):
self.df.callback(None)
try:
p2p.Protocol.max_remembered_txs_size *= 10
df = defer.Deferred()
n = MyNode(df)
n.start()
yield df
if not (n.sent_time <= reactor.seconds() <= n.sent_time + 1):
raise ValueError('node did not disconnect within 1 seconds of receiving too much tx data')
yield n.stop()
finally:
p2p.Protocol.max_remembered_txs_size //= 10
| gpl-3.0 |
sengupta/spritzbot | spritzbot/processor.py | 1 | 2789 | import os
import re
import imp
import json
class TweetProcessor:
plugins = {}
commands = {}
base_path = os.path.dirname(os.path.realpath(__file__))
plugin_path = os.path.join(base_path, "plugins")
def __init__(self):
self.load_plugins()
def load_plugins(self):
"""Loads plugins and associated commands."""
# Filename pattern that we want to load.
re_plugin = re.compile('[^.].*\.py$')
for plugin_module in os.listdir(self.plugin_path):
if re_plugin.match(plugin_module):
# Get the module's name
name = plugin_module[:-3]
plugin_info = imp.find_module(name, [self.plugin_path])
plugin = imp.load_module(name, *plugin_info)
self.plugins.update({name:plugin})
for command in plugin.commands():
status_type = command['type']
triggers = command['triggers']
if self.commands.has_key(status_type):
self.commands[status_type].append({'plugin':name,'triggers':triggers})
else:
self.commands[status_type] = [{'plugin':name,'triggers':triggers}]
def process(self, data):
"""Processes the status/tweet and hands over to appropriate plugins."""
try:
status = json.loads(data)
except:
return None
for status_type in self.commands:
# see if it is of typs 'text' or 'friends' or something else
if status.has_key(status_type):
# if it is, find out the modules associated with it
commands = self.commands[status_type]
# for each module that handles say 'text',
for command in commands:
# for triggers that should send data to process
# in that module,
triggers = command['triggers']
for t in triggers:
# compiled regex match:
if t.match(data):
# currently, we're just printing the output
# later there will be facility to reply
# or better - send a tweepy api object to the
# processing module so it can take actions
# independently.
print self.plugins[command['plugin']].process(status)
if __name__ == '__main__':
tp = TweetProcessor()
tweet = r"""{"text":"Chai craving!","id":190207791800135680}"""
friends = r"""{"friends":[123,456,789]}"""
tp.process(friends)
tp.process(tweet)
| bsd-2-clause |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_peer_express_route_circuit_connections_operations.py | 1 | 9352 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PeerExpressRouteCircuitConnectionsOperations:
"""PeerExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs
) -> "_models.PeerExpressRouteCircuitConnection":
"""Gets the specified Peer Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the peer express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeerExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.PeerExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> AsyncIterable["_models.PeerExpressRouteCircuitConnectionListResult"]:
"""Gets all global reach peer connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.PeerExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PeerExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections'} # type: ignore
| mit |
aringh/odl | odl/discr/partition.py | 1 | 51284 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Partitons of interval products based on rectilinear grids.
A partition of a set is a finite collection of nonempty, pairwise
disjoint subsets whose union is the original set. The partitions
considered here are based on hypercubes, i.e. the tensor products
of partitions of intervals.
"""
from __future__ import print_function, division, absolute_import
from builtins import object
import numpy as np
from odl.discr.grid import RectGrid, uniform_grid_fromintv
from odl.set import IntervalProd
from odl.util import (
normalized_index_expression, normalized_nodes_on_bdry,
normalized_scalar_param_list, safe_int_conv,
signature_string, indent, array_str, npy_printoptions)
__all__ = ('RectPartition', 'uniform_partition_fromintv',
'uniform_partition_fromgrid', 'uniform_partition',
'nonuniform_partition')
class RectPartition(object):
"""Rectangular partition by hypercubes based on `RectGrid`.
In 1d, a partition of an interval is implicitly defined by a
collection of points x[0], ..., x[N-1] (a grid) which are chosen to
lie in the center of the subintervals. The i-th subinterval is thus
given by
``I[i] = [(x[i-1]+x[i])/2, (x[i]+x[i+1])/2]``
"""
def __init__(self, intv_prod, grid):
"""Initialize a new instance.
Parameters
----------
intv_prod : `IntervalProd`
Set to be partitioned
grid : `RectGrid`
Spatial points supporting the partition. They must be
contained in ``intv_prod``.
"""
super(RectPartition, self).__init__()
if not isinstance(intv_prod, IntervalProd):
raise TypeError('{!r} is not an IntervalProd instance'
''.format(intv_prod))
if not isinstance(grid, RectGrid):
raise TypeError('{!r} is not a RectGrid instance'
''.format(grid))
# More conclusive error than the one from contains_set
if intv_prod.ndim != grid.ndim:
raise ValueError('interval product {} is {}-dimensional while '
'grid {} is {}-dimensional'
''.format(intv_prod, intv_prod.ndim,
grid, grid.ndim))
if not intv_prod.contains_set(grid):
raise ValueError('{} is not contained in {}'
''.format(grid, intv_prod))
self.__set = intv_prod
self.__grid = grid
# Initialize the cell boundaries, the defining property of partitions
bdry_vecs = []
for ax, vec in enumerate(self.grid.coord_vectors):
bdry = np.empty(len(vec) + 1)
bdry[1:-1] = (vec[1:] + vec[:-1]) / 2.0
bdry[0] = self.min()[ax]
bdry[-1] = self.max()[ax]
bdry_vecs.append(bdry)
self.__cell_boundary_vecs = tuple(bdry_vecs)
# Initialize nodes_on_bdry
left_on_bdry = np.isclose(self.grid.min_pt, self.set.min_pt)[:, None]
right_on_bdry = np.isclose(self.grid.max_pt, self.set.max_pt)[:, None]
on_bdry = np.hstack([left_on_bdry, right_on_bdry]).tolist()
self.__nodes_on_bdry = tuple(tuple(r) for r in on_bdry)
@property
def cell_boundary_vecs(self):
"""Return the cell boundaries as coordinate vectors.
Examples
--------
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ]))
"""
return self.__cell_boundary_vecs
@property
def set(self):
"""Partitioned set, an `IntervalProd`."""
return self.__set
@property
def nodes_on_bdry(self):
"""Encoding of grid points lying on the boundary.
Examples
--------
Using global option (default ``False``):
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3])
>>> part.nodes_on_bdry
False
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=True)
>>> part.nodes_on_bdry
True
``False`` in axis 0, ``True`` in axis 1:
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=[False, True])
>>> part.nodes_on_bdry
(False, True)
In axis 0, ``False`` left and ``True`` right, in axis 1 ``False``:
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=[[False, True],
... False])
>>> part.nodes_on_bdry
((False, True), False)
"""
if self.size == 0:
return True
nodes_on_bdry = []
for on_bdry in self.nodes_on_bdry_byaxis:
left, right = on_bdry
if left == right:
nodes_on_bdry.append(left)
else:
nodes_on_bdry.append((left, right))
if all(on_bdry == nodes_on_bdry[0] for on_bdry in nodes_on_bdry[1:]):
return nodes_on_bdry[0]
else:
return tuple(nodes_on_bdry)
@property
def nodes_on_bdry_byaxis(self):
"""Nested tuple of booleans for `nodes_on_bdry`.
This attribute is equivalent to `nodes_on_bdry`, but always in
the form of a nested tuple.
"""
return self.__nodes_on_bdry
# IntervalProd related pass-through methods and derived properties
# min, max and extent are for duck-typing purposes
@property
def min_pt(self):
"""Minimum coordinates of the partitioned set."""
return self.set.min_pt
@property
def max_pt(self):
"""Maximum coordinates of the partitioned set."""
return self.set.max_pt
@property
def mid_pt(self):
"""Midpoint of the partitioned set."""
return self.set.mid_pt
def min(self):
"""Return the minimum point of the partitioned set.
See Also
--------
odl.set.domain.IntervalProd.min
"""
return self.set.min()
def max(self):
"""Return the maximum point of the partitioned set.
See Also
--------
odl.set.domain.IntervalProd.max
"""
return self.set.max()
@property
def extent(self):
"""Return a vector containing the total extent (max - min)."""
return self.set.extent
@property
def grid(self):
"""`RectGrid` defining this partition."""
return self.__grid
# RectGrid related pass-through methods and derived properties
@property
def is_uniform_byaxis(self):
"""Boolean tuple showing uniformity of ``self.grid`` per axis.
Examples
--------
>>> part = nonuniform_partition([0, 1, 3], [1, 2, 3])
>>> part.is_uniform_byaxis
(False, True)
"""
return self.grid.is_uniform_byaxis
@property
def is_uniform(self):
"""``True`` if `grid` is uniform."""
return self.grid.is_uniform
@property
def has_isotropic_cells(self):
"""``True`` if `grid` is uniform and `cell sides` are all equal.
Always ``True`` for 1D partitions.
Examples
--------
>>> part = uniform_partition([0, -1], [1, 1], (5, 10))
>>> part.has_isotropic_cells
True
>>> part = uniform_partition([0, -1], [1, 1], (5, 5))
>>> part.has_isotropic_cells
False
"""
return self.is_uniform and np.allclose(self.cell_sides[:-1],
self.cell_sides[1:])
@property
def ndim(self):
"""Number of dimensions."""
return self.grid.ndim
@property
def shape(self):
"""Number of cells per axis, equal to ``self.grid.shape``."""
return self.grid.shape
@property
def size(self):
"""Total number of cells, equal to ``self.grid.size``."""
return self.grid.size
def __len__(self):
"""Return ``len(self)``.
Total number of cells along the first dimension.
Examples
--------
>>> partition = odl.uniform_partition([0, 0, 0],
... [1, 1, 1],
... shape=(2, 3, 4))
>>> len(partition)
2
See Also
--------
size : The total number of cells.
"""
return len(self.grid)
def points(self, order='C'):
"""Return the sampling grid points.
See Also
--------
RectGrid.points
"""
return self.grid.points(order)
@property
def meshgrid(self):
"""Return the sparse meshgrid of sampling points."""
return self.grid.meshgrid
@property
def coord_vectors(self):
"""Coordinate vectors of the grid."""
return self.grid.coord_vectors
# Further derived methods / properties
@property
def boundary_cell_fractions(self):
"""Return a tuple of contained fractions of boundary cells.
Since the outermost grid points can have any distance to the
boundary of the partitioned set, the "natural" outermost cell
around these points can either be cropped or extended. This
property is a tuple of (float, float) tuples, one entry per
dimension, where the fractions of the left- and rightmost
cells inside the set are stored. If a grid point lies exactly
on the boundary, the value is 1/2 since the cell is cut in half.
Otherwise, any value larger than 1/2 is possible.
Returns
-------
on_bdry : tuple of 2-tuples of floats
Each 2-tuple contains the fraction of the leftmost
(first entry) and rightmost (second entry) cell in the
partitioned set in the corresponding dimension.
See Also
--------
cell_boundary_vecs
Examples
--------
We create a partition of the rectangle [0, 1.5] x [-2, 2] with
the grid points [0, 1] x [-1, 0, 2]. The "natural" cells at the
boundary would be:
[-0.5, 0.5] and [0.5, 1.5] in the first axis
[-1.5, -0.5] and [1, 3] in the second axis
Thus, in the first axis, the fractions contained in [0, 1.5]
are 0.5 and 1, and in the second axis, [-2, 2] contains the
fractions 1.5 and 0.5.
>>> rect = odl.IntervalProd([0, -2], [1.5, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.boundary_cell_fractions
((0.5, 1.0), (1.5, 0.5))
"""
frac_list = []
for ax, (cvec, bmin, bmax) in enumerate(zip(
self.grid.coord_vectors, self.set.min_pt, self.set.max_pt)):
# Degenerate axes have a value of 1.0 (this is used as weight
# in integration formulas later)
if len(cvec) == 1:
frac_list.append((1.0, 1.0))
else:
left_frac = 0.5 + (cvec[0] - bmin) / (cvec[1] - cvec[0])
right_frac = 0.5 + (bmax - cvec[-1]) / (cvec[-1] - cvec[-2])
frac_list.append((left_frac, right_frac))
return tuple(frac_list)
@property
def cell_sizes_vecs(self):
"""Return the cell sizes as coordinate vectors.
Returns
-------
csizes : tuple of `numpy.ndarray`'s
The cell sizes per axis. The length of the vectors is the
same as the corresponding ``grid.coord_vectors``.
For axes with 1 grid point, cell size is set to 0.0.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
2 x 3 cells with the grid points [0, 1] x [-1, 0, 2]. This
implies that the cell boundaries are given as
[0, 0.5, 1] x [-1, -0.5, 1, 2], hence the cell size vectors
are [0.5, 0.5] x [0.5, 1.5, 1]:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ]))
>>> part.cell_sizes_vecs
(array([ 0.5, 0.5]), array([ 0.5, 1.5, 1. ]))
"""
csizes = []
for ax, cvec in enumerate(self.grid.coord_vectors):
if len(cvec) == 1:
csizes.append(np.array([0.0]))
else:
csize = np.empty_like(cvec)
csize[1:-1] = (cvec[2:] - cvec[:-2]) / 2.0
csize[0] = (cvec[0] + cvec[1]) / 2 - self.min()[ax]
csize[-1] = self.max()[ax] - (cvec[-2] + cvec[-1]) / 2
csizes.append(csize)
return tuple(csizes)
@property
def cell_sides(self):
"""Side lengths of all 'inner' cells of a uniform partition.
Only defined if ``self.grid`` is uniform.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
3 x 3 cells, where the grid points lie on the boundary. This
means that the grid points are [0, 0.5, 1] x [-1, 0.5, 2],
i.e. the inner cell has side lengths 0.5 x 1.5:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.uniform_grid([0, -1], [1, 2], (3, 3))
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_sides
array([ 0.5, 1.5])
"""
sides = self.grid.stride
sides[sides == 0] = self.extent[sides == 0]
return sides
@property
def cell_volume(self):
"""Volume of the 'inner' cells of a uniform partition.
Only defined if ``self.grid`` is uniform.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
3 x 3 cells, where the grid points lie on the boundary. This
means that the grid points are [0, 0.5, 1] x [-1, 0.5, 2],
i.e. the inner cell has side lengths 0.5 x 1.5:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.uniform_grid([0, -1], [1, 2], (3, 3))
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_sides
array([ 0.5, 1.5])
>>> part.cell_volume
0.75
"""
return 0.0 if self.size == 0 else float(np.prod(self.cell_sides))
def approx_equals(self, other, atol):
"""Return ``True`` in case of approximate equality.
Returns
-------
approx_eq : bool
``True`` if ``other`` is a `RectPartition` instance with
``self.set == other.set`` up to ``atol`` and
``self.grid == other.other`` up to ``atol``, ``False`` otherwise.
"""
if other is self:
return True
elif not isinstance(other, RectPartition):
return False
else:
return (self.set.approx_equals(other.set, atol=atol) and
self.grid.approx_equals(other.grid, atol=atol))
def __eq__(self, other):
"""Return ``self == other``."""
# Implemented separately for performance reasons
if other is self:
return True
# Optimized version for exact equality
return (type(other) is type(self) and
self.set == other.set and
self.grid == other.grid)
def __hash__(self):
"""Return ``hash(self)``."""
return hash((type(self), self.set, self.grid))
def __ne__(self, other):
"""Return ``self != other``."""
return not (self == other)
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object determining which parts of the partition to extract.
``None`` (new axis) and empty axes are not supported.
Examples
--------
Take every second grid point. Note that is is in general non-uniform:
>>> partition = odl.uniform_partition(0, 10, 10)
>>> partition[::2]
nonuniform_partition(
[ 0.5, 2.5, 4.5, 6.5, 8.5],
min_pt=0.0, max_pt=10.0
)
A more advanced example is:
>>> intvp = odl.IntervalProd([-1, 1, 4, 2], [3, 6, 5, 7])
>>> grid = odl.RectGrid([-1, 0, 3], [2, 4], [5], [2, 4, 7])
>>> part = odl.RectPartition(intvp, grid)
>>> part
nonuniform_partition(
[-1., 0., 3.],
[ 2., 4.],
[ 5.],
[ 2., 4., 7.],
min_pt=[-1., 1., 4., 2.], max_pt=[ 3., 6., 5., 7.]
)
Take an advanced slice (every second along the first axis,
the last in the last axis and everything in between):
>>> part[::2, ..., -1]
nonuniform_partition(
[-1., 3.],
[ 2., 4.],
[ 5.],
[ 7.],
min_pt=[-1. , 1. , 4. , 5.5], max_pt=[ 3., 6., 5., 7.]
)
Too few indices are filled up with an ellipsis from the right:
>>> part[1]
nonuniform_partition(
[ 0.],
[ 2., 4.],
[ 5.],
[ 2., 4., 7.],
min_pt=[-0.5, 1. , 4. , 2. ], max_pt=[ 1.5, 6. , 5. , 7. ]
)
Colons etc work as expected:
>>> part[:] == part
True
>>> part[:, :, :] == part
True
>>> part[...] == part
True
"""
# Special case of index list: slice along first axis
if isinstance(indices, list):
if indices == []:
new_min_pt = new_max_pt = []
else:
new_min_pt = [self.cell_boundary_vecs[0][:-1][indices][0]]
new_max_pt = [self.cell_boundary_vecs[0][1:][indices][-1]]
for cvec in self.cell_boundary_vecs[1:]:
new_min_pt.append(cvec[0])
new_max_pt.append(cvec[-1])
new_intvp = IntervalProd(new_min_pt, new_max_pt)
new_grid = self.grid[indices]
return RectPartition(new_intvp, new_grid)
indices = normalized_index_expression(indices, self.shape,
int_to_slice=True)
# Build the new partition
new_min_pt, new_max_pt = [], []
for cvec, idx in zip(self.cell_boundary_vecs, indices):
# Determine the subinterval min_pt and max_pt vectors. Take the
# first min_pt as new min_pt and the last max_pt as new max_pt.
if isinstance(idx, slice):
# Only use the slice to extract min and max without using
# the step size. This is in order for expressions like
# self[::2] to not change the maximum.
idx = slice(idx.start, idx.stop, None)
sub_min_pt = cvec[:-1][idx]
sub_max_pt = cvec[1:][idx]
new_min_pt.append(sub_min_pt[0])
new_max_pt.append(sub_max_pt[-1])
new_intvp = IntervalProd(new_min_pt, new_max_pt)
new_grid = self.grid[indices]
return RectPartition(new_intvp, new_grid)
def insert(self, index, *parts):
"""Return a copy with ``parts`` inserted before ``index``.
The given partitions are inserted (as a block) into ``self``,
yielding a new partition whose number of dimensions is the sum of
the numbers of dimensions of all involved partitions.
Note that no changes are made in-place.
Parameters
----------
index : int
Index of the dimension before which ``other`` is to
be inserted. Negative indices count backwards from
``self.ndim``.
part1, ..., partN : `RectPartition`
Partitions to be inserted into ``self``.
Returns
-------
newpart : `RectPartition`
The enlarged partition.
Examples
--------
>>> part1 = odl.uniform_partition([0, -1], [1, 2], (3, 3))
>>> part2 = odl.uniform_partition(0, 1, 5)
>>> part1.insert(1, part2)
uniform_partition([ 0., 0., -1.], [ 1., 1., 2.], (3, 5, 3))
See Also
--------
append
"""
if not all(isinstance(p, RectPartition) for p in parts):
raise TypeError('`parts` must all be `RectPartition` instances, '
'got ({})'
''.format(', '.join(repr(p) for p in parts)))
newgrid = self.grid.insert(index, *(p.grid for p in parts))
newset = self.set.insert(index, *(p.set for p in parts))
return RectPartition(newset, newgrid)
def append(self, *parts):
"""Insert ``parts`` at the end as a block.
Parameters
----------
part1, ..., partN : `RectPartition`
Partitions to be appended to ``self``.
Returns
-------
newpart : `RectPartition`
The enlarged partition.
Examples
--------
>>> part1 = odl.uniform_partition(-1, 2, 3)
>>> part2 = odl.uniform_partition(0, 1, 5)
>>> part1.append(part2)
uniform_partition([-1., 0.], [ 2., 1.], (3, 5))
>>> part1.append(part2, part2)
uniform_partition([-1., 0., 0.], [ 2., 1., 1.], (3, 5, 5))
See Also
--------
insert
"""
return self.insert(self.ndim, *parts)
def squeeze(self, axis=None):
"""Return the partition with removed degenerate (length 1) dimensions.
Parameters
----------
axis : None or index expression, optional
Subset of the axes to squeeze. Default: All axes.
Returns
-------
squeezed : `RectPartition`
Squeezed partition.
Examples
--------
>>> p = odl.uniform_partition([0, -1], [1, 2], (3, 1))
>>> p.squeeze()
uniform_partition(0.0, 1.0, 3)
The axis argument can be used to only squeeze some axes (if applicable)
>>> p.squeeze(axis=0)
uniform_partition([ 0., -1.], [ 1., 2.], (3, 1))
Notes
-----
This is not equivalent to
``RectPartiton(self.set.squeeze(), self.grid.squeeze())`` since the
definition of degenerate is different in sets and grids. This method
follow the definition used in grids, that is, an axis is degenerate if
it has only one element.
See Also
--------
RectGrid.squeeze
IntervalProd.squeeze
"""
if axis is None:
rng = range(self.ndim)
else:
rng = list(np.atleast_1d(np.arange(self.ndim)[axis]))
new_indcs = [i for i in range(self.ndim)
if i not in rng or self.grid.nondegen_byaxis[i]]
newset = self.set[new_indcs]
return RectPartition(newset, self.grid.squeeze(axis))
def index(self, value, floating=False):
"""Return the index of a value in the domain.
Parameters
----------
value : ``self.set`` element
Point whose index to find.
floating : bool, optional
If True, then the index should also give the position inside the
voxel. This is given by returning the integer valued index of the
voxel plus the distance from the left cell boundary as a fraction
of the full cell size.
Returns
-------
index : int, float, tuple of int or tuple of float
Index of the value, as counted from the left.
If ``self.ndim > 1`` the result is a tuple, else a scalar.
If ``floating=True`` the scalar is a float, else an int.
Examples
--------
Get the indices of start and end:
>>> p = odl.uniform_partition(0, 2, 5)
>>> p.index(0)
0
>>> p.index(2)
4
For points inside voxels, the index of the containing cell is returned:
>>> p.index(0.2)
0
By using the ``floating`` argument, partial positions inside the voxels
can instead be determined:
>>> p.index(0.2, floating=True)
0.5
These indices work with indexing, extracting the voxel in which the
point lies:
>>> p[p.index(0.1)]
uniform_partition(0.0, 0.4, 1)
The same principle also works in higher dimensions:
>>> p = uniform_partition([0, -1], [1, 2], (4, 1))
>>> p.index([0.5, 2])
(2, 0)
>>> p[p.index([0.5, 2])]
uniform_partition([ 0.5, -1. ], [ 0.75, 2. ], (1, 1))
"""
value = np.atleast_1d(self.set.element(value))
result = []
for val, cell_bdry_vec in zip(value, self.cell_boundary_vecs):
ind = np.searchsorted(cell_bdry_vec, val)
if floating:
if cell_bdry_vec[ind] == val:
# Value is on top of edge
result.append(float(ind))
else:
# interpolate between
csize = float(cell_bdry_vec[ind] - cell_bdry_vec[ind - 1])
result.append(ind - (cell_bdry_vec[ind] - val) / csize)
else:
if cell_bdry_vec[ind] == val and ind != len(cell_bdry_vec) - 1:
# Value is on top of edge, but not last edge
result.append(ind)
else:
result.append(ind - 1)
if self.ndim == 1:
result = result[0]
else:
result = tuple(result)
return result
@property
def byaxis(self):
"""Object to index ``self`` along axes.
Examples
--------
Indexing with integers or slices:
>>> p = odl.uniform_partition([0, 1, 2], [1, 3, 5], (3, 5, 6))
>>> p.byaxis[0]
uniform_partition(0.0, 1.0, 3)
>>> p.byaxis[1]
uniform_partition(1.0, 3.0, 5)
>>> p.byaxis[2]
uniform_partition(2.0, 5.0, 6)
>>> p.byaxis[:] == p
True
>>> p.byaxis[1:]
uniform_partition([ 1., 2.], [ 3., 5.], (5, 6))
Lists can be used to stack subpartitions arbitrarily:
>>> p.byaxis[[0, 2, 0]]
uniform_partition([ 0., 2., 0.], [ 1., 5., 1.], (3, 6, 3))
"""
partition = self
class RectPartitionByAxis(object):
"""Helper class for accessing `RectPartition` by axis."""
def __getitem__(self, indices):
"""Return ``self[indices]``."""
try:
iter(indices)
except TypeError:
# Slice or integer
slc = np.zeros(partition.ndim, dtype=object)
slc[indices] = slice(None)
squeeze_axes = np.where(slc == 0)[0]
newpart = partition[tuple(slc)].squeeze(squeeze_axes)
else:
# Sequence, stack together from single-integer indexing
indices = [int(i) for i in indices]
byaxis = partition.byaxis
parts = [byaxis[i] for i in indices]
if not parts:
newpart = uniform_partition([], [], ())
else:
newpart = parts[0].append(*(parts[1:]))
return newpart
def __repr__(self):
"""Return ``repr(self)``.
Examples
--------
>>> p = odl.uniform_partition(0, 1, 5)
>>> p.byaxis
uniform_partition(0, 1, 5).byaxis
"""
return '{!r}.byaxis'.format(partition)
return RectPartitionByAxis()
def __repr__(self):
"""Return ``repr(self)``."""
if self.ndim == 0:
return 'uniform_partition([], [], ())'
bdry_fracs = np.vstack(self.boundary_cell_fractions)
default_bdry_fracs = np.all(np.isclose(bdry_fracs, 0.5) |
np.isclose(bdry_fracs, 1.0))
# Get default shifts of min_pt and max_pt from corresponding
# grid points
csizes_l = np.fromiter((s[0] for s in self.cell_sizes_vecs),
dtype=float)
csizes_r = np.fromiter((s[-1] for s in self.cell_sizes_vecs),
dtype=float)
shift_l = ((bdry_fracs[:, 0].astype(float).squeeze() - 0.5) *
csizes_l)
shift_r = ((bdry_fracs[:, 1].astype(float).squeeze() - 0.5) *
csizes_r)
if self.is_uniform and default_bdry_fracs:
ctor = 'uniform_partition'
if self.ndim == 1:
posargs = [self.min_pt[0], self.max_pt[0], self.shape[0]]
posmod = [':.4', ':.4', '']
else:
posargs = [self.min_pt, self.max_pt, self.shape]
posmod = [array_str, array_str, '']
optargs = [('nodes_on_bdry', self.nodes_on_bdry, False)]
with npy_printoptions(precision=4):
sig_str = signature_string(posargs, optargs, mod=[posmod, ''])
return '{}({})'.format(ctor, sig_str)
else:
ctor = 'nonuniform_partition'
posargs = self.coord_vectors
posmod = array_str
optargs = []
# Defaults with and without nodes_on_bdry option
nodes_def_min_pt = self.grid.min_pt - shift_l
nodes_def_max_pt = self.grid.max_pt + shift_r
def_min_pt = self.grid.min_pt - 0.5 * csizes_l
def_max_pt = self.grid.max_pt + 0.5 * csizes_r
# Since min/max_pt and nodes_on_bdry are mutex, we need a
# couple of cases here
optmod = []
if (np.allclose(self.min_pt, nodes_def_min_pt) and
np.allclose(self.max_pt, nodes_def_max_pt)):
# Append nodes_on_bdry to list of optional args
optargs.append(('nodes_on_bdry', self.nodes_on_bdry, False))
optmod.append('')
else:
# Append min/max_pt to list of optional args if not
# default (need check manually because array comparison is
# ambiguous)
if not np.allclose(self.min_pt, def_min_pt):
if self.ndim == 1:
optargs.append(('min_pt', self.min_pt[0], None))
optmod.append(':.4')
else:
with npy_printoptions(precision=4):
optargs.append(
('min_pt', array_str(self.min_pt), ''))
optmod.append('!s')
if not np.allclose(self.max_pt, def_max_pt):
if self.ndim == 1:
optargs.append(('max_pt', self.max_pt[0], None))
optmod.append(':.4')
else:
with npy_printoptions(precision=4):
optargs.append(
('max_pt', array_str(self.max_pt), ''))
optmod.append('!s')
sig_str = signature_string(posargs, optargs, mod=[posmod, optmod],
sep=[',\n', ', ', ',\n'])
return '{}(\n{}\n)'.format(ctor, indent(sig_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
def uniform_partition_fromintv(intv_prod, shape, nodes_on_bdry=False):
"""Return a partition of an interval product into equally sized cells.
Parameters
----------
intv_prod : `IntervalProd`
Interval product to be partitioned
shape : int or sequence of ints
Number of nodes per axis. For 1d intervals, a single integer
can be specified.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``intv_prod.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
See Also
--------
uniform_partition_fromgrid
Examples
--------
By default, no grid points are placed on the boundary:
>>> interval = odl.IntervalProd(0, 1)
>>> part = odl.uniform_partition_fromintv(interval, 4)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0.125, 0.375, 0.625, 0.875]),)
This can be changed with the nodes_on_bdry parameter:
>>> part = odl.uniform_partition_fromintv(interval, 3,
... nodes_on_bdry=True)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
We can specify this per axis, too. In this case we choose both
in the first axis and only the rightmost in the second:
>>> rect = odl.IntervalProd([0, 0], [1, 1])
>>> part = odl.uniform_partition_fromintv(
... rect, (3, 3), nodes_on_bdry=(True, (False, True)))
...
>>> part.cell_boundary_vecs[0] # first axis, as above
array([ 0. , 0.25, 0.75, 1. ])
>>> part.grid.coord_vectors[0]
array([ 0. , 0.5, 1. ])
>>> part.cell_boundary_vecs[1] # second, asymmetric axis
array([ 0. , 0.4, 0.8, 1. ])
>>> part.grid.coord_vectors[1]
array([ 0.2, 0.6, 1. ])
"""
grid = uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=nodes_on_bdry)
return RectPartition(intv_prod, grid)
def uniform_partition_fromgrid(grid, min_pt=None, max_pt=None):
"""Return a partition of an interval product based on a given grid.
This method is complementary to `uniform_partition_fromintv` in that
it infers the set to be partitioned from a given grid and optional
parameters for ``min_pt`` and ``max_pt`` of the set.
Parameters
----------
grid : `RectGrid`
Grid on which the partition is based
min_pt, max_pt : float, sequence of floats, or dict, optional
Spatial points defining the lower/upper limits of the intervals
to be partitioned. The points can be specified in two ways:
float or sequence: The values are used directly as ``min_pt``
and/or ``max_pt``.
dict: Index-value pairs specifying an axis and a spatial
coordinate to be used in that axis. In axes which are not a key
in the dictionary, the coordinate for the vector is calculated
as::
min_pt = x[0] - (x[1] - x[0]) / 2
max_pt = x[-1] + (x[-1] - x[-2]) / 2
See ``Examples`` below.
In general, ``min_pt`` may not be larger than ``grid.min_pt``,
and ``max_pt`` not smaller than ``grid.max_pt`` in any component.
``None`` is equivalent to an empty dictionary, i.e. the values
are calculated in each dimension.
See Also
--------
uniform_partition_fromintv
Examples
--------
Have ``min_pt`` and ``max_pt`` of the bounding box automatically
calculated:
>>> grid = odl.uniform_grid(0, 1, 3)
>>> grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
>>> part = odl.uniform_partition_fromgrid(grid)
>>> part.cell_boundary_vecs
(array([-0.25, 0.25, 0.75, 1.25]),)
``min_pt`` and ``max_pt`` can be given explicitly:
>>> part = odl.uniform_partition_fromgrid(grid, min_pt=0, max_pt=1)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
Using dictionaries, selective axes can be explicitly set. The
keys refer to axes, the values to the coordinates to use:
>>> grid = odl.uniform_grid([0, 0], [1, 1], (3, 3))
>>> part = odl.uniform_partition_fromgrid(grid,
... min_pt={0: -1}, max_pt={-1: 3})
>>> part.cell_boundary_vecs[0]
array([-1. , 0.25, 0.75, 1.25])
>>> part.cell_boundary_vecs[1]
array([-0.25, 0.25, 0.75, 3. ])
"""
# Make dictionaries from `min_pt` and `max_pt` and fill with `None` where
# no value is given (taking negative indices into account)
if min_pt is None:
min_pt = {i: None for i in range(grid.ndim)}
elif not hasattr(min_pt, 'items'): # array-like
min_pt = np.atleast_1d(min_pt)
min_pt = {i: float(v) for i, v in enumerate(min_pt)}
else:
min_pt.update({i: None for i in range(grid.ndim)
if i not in min_pt and i - grid.ndim not in min_pt})
if max_pt is None:
max_pt = {i: None for i in range(grid.ndim)}
elif not hasattr(max_pt, 'items'):
max_pt = np.atleast_1d(max_pt)
max_pt = {i: float(v) for i, v in enumerate(max_pt)}
else:
max_pt.update({i: None for i in range(grid.ndim)
if i not in max_pt and i - grid.ndim not in max_pt})
# Set the values in the vectors by computing (None) or directly from the
# given vectors (otherwise).
min_pt_vec = np.empty(grid.ndim)
for ax, xmin in min_pt.items():
if xmin is None:
cvec = grid.coord_vectors[ax]
if len(cvec) == 1:
raise ValueError('in axis {}: cannot calculate `min_pt` with '
'only 1 grid point'.format(ax))
min_pt_vec[ax] = cvec[0] - (cvec[1] - cvec[0]) / 2
else:
min_pt_vec[ax] = xmin
max_pt_vec = np.empty(grid.ndim)
for ax, xmax in max_pt.items():
if xmax is None:
cvec = grid.coord_vectors[ax]
if len(cvec) == 1:
raise ValueError('in axis {}: cannot calculate `max_pt` with '
'only 1 grid point'.format(ax))
max_pt_vec[ax] = cvec[-1] + (cvec[-1] - cvec[-2]) / 2
else:
max_pt_vec[ax] = xmax
return RectPartition(IntervalProd(min_pt_vec, max_pt_vec), grid)
def uniform_partition(min_pt=None, max_pt=None, shape=None, cell_sides=None,
nodes_on_bdry=False):
"""Return a partition with equally sized cells.
Parameters
----------
min_pt, max_pt : float or sequence of float, optional
Vectors defining the lower/upper limits of the intervals in an
`IntervalProd` (a rectangular box). ``None`` entries mean
"compute the value".
shape : int or sequence of ints, optional
Number of nodes per axis. ``None`` entries mean
"compute the value".
cell_sides : float or sequence of floats, optional
Side length of the partition cells per axis. ``None`` entries mean
"compute the value".
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Notes
-----
In each axis, 3 of the 4 possible parameters ``min_pt``, ``max_pt``,
``shape`` and ``cell_sides`` must be given. If all four are
provided, they are checked for consistency.
See Also
--------
uniform_partition_fromintv : partition an existing set
uniform_partition_fromgrid : use an existing grid as basis
Examples
--------
Any combination of three of the four parameters can be used for
creation of a partition:
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, shape=4)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(max_pt=2, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
In higher dimensions, the parameters can be given differently in
each axis. Where ``None`` is given, the value will be computed:
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[4, 2])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[None, 2], cell_sides=[0.25, None])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, None], max_pt=[None, 2],
... shape=[4, 2], cell_sides=[0.25, 1])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
By default, no grid points are placed on the boundary:
>>> part = odl.uniform_partition(0, 1, 4)
>>> part.nodes_on_bdry
False
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0.125, 0.375, 0.625, 0.875]),)
This can be changed with the nodes_on_bdry parameter:
>>> part = odl.uniform_partition(0, 1, 3, nodes_on_bdry=True)
>>> part.nodes_on_bdry
True
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
We can specify this per axis, too. In this case we choose both
in the first axis and only the rightmost in the second:
>>> part = odl.uniform_partition([0, 0], [1, 1], (3, 3),
... nodes_on_bdry=(True, (False, True)))
...
>>> part.cell_boundary_vecs[0] # first axis, as above
array([ 0. , 0.25, 0.75, 1. ])
>>> part.grid.coord_vectors[0]
array([ 0. , 0.5, 1. ])
>>> part.cell_boundary_vecs[1] # second, asymmetric axis
array([ 0. , 0.4, 0.8, 1. ])
>>> part.grid.coord_vectors[1]
array([ 0.2, 0.6, 1. ])
"""
# Normalize partition parameters
# np.size(None) == 1, so that would screw it for sizes 0 of the rest
sizes = [np.size(p) for p in (min_pt, max_pt, shape, cell_sides)
if p is not None]
ndim = int(np.max(sizes))
min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float,
keep_none=True)
max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float,
keep_none=True)
shape = normalized_scalar_param_list(shape, ndim, param_conv=safe_int_conv,
keep_none=True)
cell_sides = normalized_scalar_param_list(cell_sides, ndim,
param_conv=float, keep_none=True)
nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim)
# Calculate the missing parameters in min_pt, max_pt, shape
for i, (xmin, xmax, n, dx, on_bdry) in enumerate(
zip(min_pt, max_pt, shape, cell_sides, nodes_on_bdry)):
num_params = sum(p is not None for p in (xmin, xmax, n, dx))
if num_params < 3:
raise ValueError('in axis {}: expected at least 3 of the '
'parameters `min_pt`, `max_pt`, `shape`, '
'`cell_sides`, got {}'
''.format(i, num_params))
# Unpack the tuple if possible, else use bool globally for this axis
try:
bdry_l, bdry_r = on_bdry
except TypeError:
bdry_l = bdry_r = on_bdry
# For each node on the boundary, we subtract 1/2 from the number of
# full cells between min_pt and max_pt.
if xmin is None:
min_pt[i] = xmax - (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif xmax is None:
max_pt[i] = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif n is None:
# Here we add to n since (e-b)/s gives the reduced number of cells.
n_calc = (xmax - xmin) / dx + sum([bdry_l, bdry_r]) / 2.0
n_round = int(round(n_calc))
if abs(n_calc - n_round) > 1e-5:
raise ValueError('in axis {}: calculated number of nodes '
'{} = ({} - {}) / {} too far from integer'
''.format(i, n_calc, xmax, xmin, dx))
shape[i] = n_round
elif dx is None:
pass
else:
xmax_calc = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
if not np.isclose(xmax, xmax_calc):
raise ValueError('in axis {}: calculated endpoint '
'{} = {} + {} * {} too far from given '
'endpoint {}.'
''.format(i, xmax_calc, xmin, n, dx, xmax))
return uniform_partition_fromintv(
IntervalProd(min_pt, max_pt), shape, nodes_on_bdry)
def nonuniform_partition(*coord_vecs, **kwargs):
"""Return a partition with un-equally sized cells.
Parameters
----------
coord_vecs1, ... coord_vecsN : `array-like`
Arrays of coordinates of the mid-points of the partition cells.
min_pt, max_pt : float or sequence of floats, optional
Vectors defining the lower/upper limits of the intervals in an
`IntervalProd` (a rectangular box). ``None`` entries mean
"compute the value".
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Cannot be given with both min_pt and max_pt since they determine the
same thing.
Default: ``False``
See Also
--------
uniform_partition : uniformly spaced points
uniform_partition_fromintv : partition an existing set
uniform_partition_fromgrid : use an existing grid as basis
Examples
--------
With uniformly spaced points the result is the same as a
uniform partition:
>>> odl.nonuniform_partition([0, 1, 2, 3])
uniform_partition(-0.5, 3.5, 4)
>>> odl.nonuniform_partition([0, 1, 2, 3], [1, 2])
uniform_partition([-0.5, 0.5], [ 3.5, 2.5], (4, 2))
If the points are not uniformly spaced, a nonuniform partition is
created. Note that the containing interval is calculated by assuming
that the points are in the middle of the sub-intervals:
>>> odl.nonuniform_partition([0, 1, 3])
nonuniform_partition(
[ 0., 1., 3.]
)
Higher dimensional partitions are created by specifying the gridpoints
along each dimension:
>>> odl.nonuniform_partition([0, 1, 3], [1, 2])
nonuniform_partition(
[ 0., 1., 3.],
[ 1., 2.]
)
Partitions with a single element are by default degenerate
>>> odl.nonuniform_partition(1)
uniform_partition(1.0, 1.0, 1, nodes_on_bdry=True)
If the endpoints should be on the boundary, the ``nodes_on_bdry`` parameter
can be used:
>>> odl.nonuniform_partition([0, 1, 3], nodes_on_bdry=True)
nonuniform_partition(
[ 0., 1., 3.],
nodes_on_bdry=True
)
Users can also manually specify the containing intervals dimensions by
using the ``min_pt`` and ``max_pt`` arguments:
>>> odl.nonuniform_partition([0, 1, 3], min_pt=-2, max_pt=3)
nonuniform_partition(
[ 0., 1., 3.],
min_pt=-2.0, max_pt=3.0
)
"""
# Get parameters from kwargs
min_pt = kwargs.pop('min_pt', None)
max_pt = kwargs.pop('max_pt', None)
nodes_on_bdry = kwargs.pop('nodes_on_bdry', False)
# np.size(None) == 1
sizes = [len(coord_vecs)] + [np.size(p) for p in (min_pt, max_pt)]
ndim = int(np.max(sizes))
min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float,
keep_none=True)
max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float,
keep_none=True)
nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim)
# Calculate the missing parameters in min_pt, max_pt
for i, (xmin, xmax, (bdry_l, bdry_r), coords) in enumerate(
zip(min_pt, max_pt, nodes_on_bdry, coord_vecs)):
# Check input for redundancy
if xmin is not None and bdry_l:
raise ValueError('in axis {}: got both `min_pt` and '
'`nodes_on_bdry=True`'.format(i))
if xmax is not None and bdry_r:
raise ValueError('in axis {}: got both `max_pt` and '
'`nodes_on_bdry=True`'.format(i))
# Handle length 1 inputs
coords = np.array(coords, copy=False, ndmin=1)
# Compute boundary position if not given by user
if xmin is None:
if bdry_l or len(coords) == 1:
min_pt[i] = coords[0]
else:
min_pt[i] = coords[0] - (coords[1] - coords[0]) / 2.0
if xmax is None:
if bdry_r or len(coords) == 1:
max_pt[i] = coords[-1]
else:
max_pt[i] = coords[-1] + (coords[-1] - coords[-2]) / 2.0
interval = IntervalProd(min_pt, max_pt)
grid = RectGrid(*coord_vecs)
return RectPartition(interval, grid)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 |
ahmed-mahran/hue | desktop/core/ext-py/markdown/markdown/odict.py | 143 | 5157 | class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error
| apache-2.0 |
techhat/libcloud | libcloud/test/common/test_openstack_identity.py | 8 | 33173 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import datetime
try:
import simplejson as json
except ImportError:
import json
from mock import Mock
from libcloud.utils.py3 import httplib
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.common.openstack_identity import AUTH_TOKEN_EXPIRES_GRACE_SECONDS
from libcloud.common.openstack_identity import get_class_for_auth_version
from libcloud.common.openstack_identity import OpenStackServiceCatalog
from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection
from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection
from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection_OIDC_access_token
from libcloud.common.openstack_identity import OpenStackIdentityUser
from libcloud.compute.drivers.openstack import OpenStack_1_0_NodeDriver
from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection_VOMS
from libcloud.test import unittest
from libcloud.test import MockHttp
from libcloud.test.secrets import OPENSTACK_PARAMS
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.compute.test_openstack import OpenStackMockHttp
from libcloud.test.compute.test_openstack import OpenStack_2_0_MockHttp
class OpenStackIdentityConnectionTestCase(unittest.TestCase):
def setUp(self):
OpenStackBaseConnection.auth_url = None
OpenStackBaseConnection.conn_class = OpenStackMockHttp
def test_auth_url_is_correctly_assembled(self):
tuples = [
('1.0', OpenStackMockHttp),
('1.1', OpenStackMockHttp),
('2.0', OpenStack_2_0_MockHttp),
('2.0_apikey', OpenStack_2_0_MockHttp),
('2.0_password', OpenStack_2_0_MockHttp)
]
APPEND = 0
NOTAPPEND = 1
auth_urls = [
('https://auth.api.example.com', APPEND, ''),
('https://auth.api.example.com/', NOTAPPEND, '/'),
('https://auth.api.example.com/foo/bar', NOTAPPEND, '/foo/bar'),
('https://auth.api.example.com/foo/bar/', NOTAPPEND, '/foo/bar/')
]
actions = {
'1.0': '/v1.0',
'1.1': '/v1.1/auth',
'2.0': '/v2.0/tokens',
'2.0_apikey': '/v2.0/tokens',
'2.0_password': '/v2.0/tokens'
}
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
for (auth_version, mock_http_class) in tuples:
for (url, should_append_default_path, expected_path) in auth_urls:
connection = \
self._get_mock_connection(mock_http_class=mock_http_class,
auth_url=url)
auth_url = connection.auth_url
cls = get_class_for_auth_version(auth_version=auth_version)
osa = cls(auth_url=auth_url,
user_id=user_id,
key=key,
parent_conn=connection)
try:
osa = osa.authenticate()
except:
pass
if (should_append_default_path == APPEND):
expected_path = actions[auth_version]
self.assertEqual(osa.action, expected_path)
def test_basic_authentication(self):
tuples = [
('1.0', OpenStackMockHttp),
('1.1', OpenStackMockHttp),
('2.0', OpenStack_2_0_MockHttp),
('2.0_apikey', OpenStack_2_0_MockHttp),
('2.0_password', OpenStack_2_0_MockHttp)
]
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
for (auth_version, mock_http_class) in tuples:
connection = \
self._get_mock_connection(mock_http_class=mock_http_class)
auth_url = connection.auth_url
cls = get_class_for_auth_version(auth_version=auth_version)
osa = cls(auth_url=auth_url, user_id=user_id, key=key,
parent_conn=connection)
self.assertEqual(osa.urls, {})
self.assertEqual(osa.auth_token, None)
self.assertEqual(osa.auth_user_info, None)
osa = osa.authenticate()
self.assertTrue(len(osa.urls) >= 1)
self.assertTrue(osa.auth_token is not None)
if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']:
self.assertTrue(osa.auth_token_expires is not None)
if auth_version in ['2.0', '2.0_apikey', '2.0_password']:
self.assertTrue(osa.auth_user_info is not None)
def test_token_expiration_and_force_reauthentication(self):
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
connection = self._get_mock_connection(OpenStack_2_0_MockHttp)
auth_url = connection.auth_url
yesterday = datetime.datetime.today() - datetime.timedelta(1)
tomorrow = datetime.datetime.today() + datetime.timedelta(1)
osa = OpenStackIdentity_2_0_Connection(auth_url=auth_url,
user_id=user_id,
key=key,
parent_conn=connection)
mocked_auth_method = Mock(wraps=osa._authenticate_2_0_with_body)
osa._authenticate_2_0_with_body = mocked_auth_method
# Force re-auth, expired token
osa.auth_token = None
osa.auth_token_expires = yesterday
count = 5
for i in range(0, count):
osa.authenticate(force=True)
self.assertEqual(mocked_auth_method.call_count, count)
# No force reauth, expired token
osa.auth_token = None
osa.auth_token_expires = yesterday
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
osa.authenticate(force=False)
self.assertEqual(mocked_auth_method.call_count, 1)
# No force reauth, valid / non-expired token
osa.auth_token = None
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
osa.authenticate(force=False)
if i == 0:
osa.auth_token_expires = tomorrow
self.assertEqual(mocked_auth_method.call_count, 1)
# No force reauth, valid / non-expired token which is about to expire in
# less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS
soon = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1)
osa.auth_token = None
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
if i == 0:
osa.auth_token_expires = soon
osa.authenticate(force=False)
self.assertEqual(mocked_auth_method.call_count, 1)
def _get_mock_connection(self, mock_http_class, auth_url=None):
OpenStackBaseConnection.conn_class = mock_http_class
if auth_url is None:
auth_url = "https://auth.api.example.com"
OpenStackBaseConnection.auth_url = auth_url
connection = OpenStackBaseConnection(*OPENSTACK_PARAMS)
connection._ex_force_base_url = "https://www.foo.com"
connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS)
return connection
class OpenStackIdentity_2_0_ConnectionTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_2_0_MockHttp
mock_cls.type = None
OpenStackIdentity_2_0_Connection.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_2_0_Connection(auth_url='http://none',
user_id='test',
key='test',
tenant_name='test')
self.auth_instance.auth_token = 'mock'
def test_list_projects(self):
result = self.auth_instance.list_projects()
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, 'a')
self.assertEqual(result[0].name, 'test')
self.assertEqual(result[0].description, 'test project')
self.assertTrue(result[0].enabled)
class OpenStackIdentity_3_0_ConnectionTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_3_0_MockHttp
mock_cls.type = None
OpenStackIdentity_3_0_Connection.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
tenant_name='test')
self.auth_instance.auth_token = 'mock'
def test_token_scope_argument(self):
# Invalid token_scope value
expected_msg = 'Invalid value for "token_scope" argument: foo'
self.assertRaisesRegexp(ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='foo')
# Missing tenant_name
expected_msg = 'Must provide tenant_name and domain_name argument'
self.assertRaisesRegexp(ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='project')
# Missing domain_name
expected_msg = 'Must provide domain_name argument'
self.assertRaisesRegexp(ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='domain',
domain_name=None)
# Scope to project all ok
OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
token_scope='project',
tenant_name='test',
domain_name='Default')
# Scope to domain
OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
token_scope='domain',
tenant_name=None,
domain_name='Default')
def test_authenticate(self):
auth = OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test_user_id',
key='test_key',
token_scope='project',
tenant_name="test_tenant",
domain_name='test_domain')
auth.authenticate()
def test_list_supported_versions(self):
OpenStackIdentity_3_0_MockHttp.type = 'v3'
versions = self.auth_instance.list_supported_versions()
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].version, 'v2.0')
self.assertEqual(versions[0].url,
'http://192.168.18.100:5000/v2.0/')
self.assertEqual(versions[1].version, 'v3.0')
self.assertEqual(versions[1].url,
'http://192.168.18.100:5000/v3/')
def test_list_domains(self):
domains = self.auth_instance.list_domains()
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0].id, 'default')
self.assertEqual(domains[0].name, 'Default')
self.assertTrue(domains[0].enabled)
def test_list_projects(self):
projects = self.auth_instance.list_projects()
self.assertEqual(len(projects), 4)
self.assertEqual(projects[0].id, 'a')
self.assertEqual(projects[0].domain_id, 'default')
self.assertTrue(projects[0].enabled)
self.assertEqual(projects[0].description, 'Test project')
def test_list_users(self):
users = self.auth_instance.list_users()
self.assertEqual(len(users), 12)
self.assertEqual(users[0].id, 'a')
self.assertEqual(users[0].domain_id, 'default')
self.assertEqual(users[0].enabled, True)
self.assertEqual(users[0].email, 'openstack-test@localhost')
def test_list_roles(self):
roles = self.auth_instance.list_roles()
self.assertEqual(len(roles), 2)
self.assertEqual(roles[1].id, 'b')
self.assertEqual(roles[1].name, 'admin')
def test_list_user_projects(self):
user = self.auth_instance.list_users()[0]
projects = self.auth_instance.list_user_projects(user=user)
self.assertEqual(len(projects), 0)
def test_list_user_domain_roles(self):
user = self.auth_instance.list_users()[0]
domain = self.auth_instance.list_domains()[0]
roles = self.auth_instance.list_user_domain_roles(domain=domain,
user=user)
self.assertEqual(len(roles), 1)
self.assertEqual(roles[0].name, 'admin')
def test_get_domain(self):
domain = self.auth_instance.get_domain(domain_id='default')
self.assertEqual(domain.name, 'Default')
def test_create_user(self):
user = self.auth_instance.create_user(email='test2@localhost', password='test1',
name='test2', domain_id='default')
self.assertEqual(user.id, 'c')
self.assertEqual(user.name, 'test2')
def test_enable_user(self):
user = self.auth_instance.list_users()[0]
result = self.auth_instance.enable_user(user=user)
self.assertTrue(isinstance(result, OpenStackIdentityUser))
def test_disable_user(self):
user = self.auth_instance.list_users()[0]
result = self.auth_instance.disable_user(user=user)
self.assertTrue(isinstance(result, OpenStackIdentityUser))
def test_grant_domain_role_to_user(self):
domain = self.auth_instance.list_domains()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.grant_domain_role_to_user(domain=domain,
role=role,
user=user)
self.assertTrue(result)
def test_revoke_domain_role_from_user(self):
domain = self.auth_instance.list_domains()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.revoke_domain_role_from_user(domain=domain,
role=role,
user=user)
self.assertTrue(result)
def test_grant_project_role_to_user(self):
project = self.auth_instance.list_projects()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.grant_project_role_to_user(project=project,
role=role,
user=user)
self.assertTrue(result)
def test_revoke_project_role_from_user(self):
project = self.auth_instance.list_projects()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.revoke_project_role_from_user(project=project,
role=role,
user=user)
self.assertTrue(result)
class OpenStackIdentity_3_0_Connection_OIDC_access_tokenTests(
unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_3_0_MockHttp
mock_cls.type = None
OpenStackIdentity_3_0_Connection_OIDC_access_token.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
tenant_name='oidc',
domain_name='test_domain')
self.auth_instance.auth_token = 'mock'
def test_authenticate(self):
auth = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
token_scope='project',
tenant_name="oidc",
domain_name='test_domain')
auth.authenticate()
class OpenStackIdentity_2_0_Connection_VOMSTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_2_0_Connection_VOMSMockHttp
mock_cls.type = None
OpenStackIdentity_2_0_Connection_VOMS.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
user_id=None,
key='/tmp/proxy.pem',
tenant_name='VO')
self.auth_instance.auth_token = 'mock'
def test_authenticate(self):
auth = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
user_id=None,
key='/tmp/proxy.pem',
token_scope='test',
tenant_name="VO")
auth.authenticate()
class OpenStackServiceCatalogTestCase(unittest.TestCase):
fixtures = ComputeFileFixtures('openstack')
def test_parsing_auth_v1_1(self):
data = self.fixtures.load('_v1_1__auth.json')
data = json.loads(data)
service_catalog = data['auth']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='1.0')
entries = catalog.get_entries()
self.assertEqual(len(entries), 3)
entry = [e for e in entries if e.service_type == 'cloudFilesCDN'][0]
self.assertEqual(entry.service_type, 'cloudFilesCDN')
self.assertEqual(entry.service_name, None)
self.assertEqual(len(entry.endpoints), 2)
self.assertEqual(entry.endpoints[0].region, 'ORD')
self.assertEqual(entry.endpoints[0].url,
'https://cdn2.clouddrive.com/v1/MossoCloudFS')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
self.assertEqual(entry.endpoints[1].region, 'LON')
self.assertEqual(entry.endpoints[1].endpoint_type, 'external')
def test_parsing_auth_v2(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
entries = catalog.get_entries()
self.assertEqual(len(entries), 6)
entry = [e for e in entries if e.service_name == 'cloudServers'][0]
self.assertEqual(entry.service_type, 'compute')
self.assertEqual(entry.service_name, 'cloudServers')
self.assertEqual(len(entry.endpoints), 1)
self.assertEqual(entry.endpoints[0].region, None)
self.assertEqual(entry.endpoints[0].url,
'https://servers.api.rackspacecloud.com/v1.0/1337')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
def test_parsing_auth_v3(self):
data = self.fixtures.load('_v3__auth.json')
data = json.loads(data)
service_catalog = data['token']['catalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='3.x')
entries = catalog.get_entries()
self.assertEqual(len(entries), 6)
entry = [e for e in entries if e.service_type == 'volume'][0]
self.assertEqual(entry.service_type, 'volume')
self.assertEqual(entry.service_name, None)
self.assertEqual(len(entry.endpoints), 3)
self.assertEqual(entry.endpoints[0].region, 'regionOne')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
self.assertEqual(entry.endpoints[1].region, 'regionOne')
self.assertEqual(entry.endpoints[1].endpoint_type, 'admin')
self.assertEqual(entry.endpoints[2].region, 'regionOne')
self.assertEqual(entry.endpoints[2].endpoint_type, 'internal')
def test_get_public_urls(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
public_urls = catalog.get_public_urls(service_type='object-store')
expected_urls = ['https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111',
'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111']
self.assertEqual(public_urls, expected_urls)
def test_get_regions(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
regions = catalog.get_regions(service_type='object-store')
self.assertEqual(regions, ['LON', 'ORD'])
regions = catalog.get_regions(service_type='invalid')
self.assertEqual(regions, [])
def test_get_service_types(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
service_types = catalog.get_service_types()
self.assertEqual(service_types, ['compute', 'object-store',
'rax:object-cdn'])
service_types = catalog.get_service_types(region='ORD')
self.assertEqual(service_types, ['rax:object-cdn'])
def test_get_service_names(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
service_names = catalog.get_service_names()
self.assertEqual(service_names, ['cloudFiles', 'cloudFilesCDN',
'cloudServers',
'cloudServersOpenStack',
'cloudServersPreprod',
'nova'])
service_names = catalog.get_service_names(service_type='compute')
self.assertEqual(service_names, ['cloudServers',
'cloudServersOpenStack',
'cloudServersPreprod',
'nova'])
class OpenStackIdentity_2_0_MockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v2')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tenants(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v2_0_tenants.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
class OpenStackIdentity_3_0_MockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v3')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v3(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_versions.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_domains.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_projects(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_projects.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_auth_tokens(self, method, url, body, headers):
if method == 'POST':
status = httplib.OK
data = json.loads(body)
if 'password' in data['auth']['identity']:
if data['auth']['identity']['password']['user']['domain']['name'] != 'test_domain' or \
data['auth']['scope']['project']['domain']['name'] != 'test_domain':
status = httplib.UNAUTHORIZED
body = ComputeFileFixtures('openstack').load('_v3__auth.json')
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (status, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users(self, method, url, body, headers):
if method == 'GET':
# list users
body = self.fixtures.load('v3_users.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == 'POST':
# create user
body = self.fixtures.load('v3_create_user.json')
return (httplib.CREATED, body, self.json_content_headers,
httplib.responses[httplib.CREATED])
raise NotImplementedError()
def _v3_users_a(self, method, url, body, headers):
if method == 'PATCH':
# enable / disable user
body = self.fixtures.load('v3_users_a.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_roles(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_roles.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains_default_users_a_roles_a(self, method, url, body, headers):
if method == 'PUT':
# grant domain role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'DELETE':
# revoke domain role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError()
def _v3_projects_a_users_a_roles_a(self, method, url, body, headers):
if method == 'PUT':
# grant project role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'DELETE':
# revoke project role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError()
def _v3_domains_default(self, method, url, body, headers):
if method == 'GET':
# get domain
body = self.fixtures.load('v3_domains_default.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users_a_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = self.fixtures.load('v3_users_a_projects.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains_default_users_a_roles(self, method, url, body, headers):
if method == 'GET':
# get user domain roles
body = self.fixtures.load('v3_domains_default_users_a_roles.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_OS_FEDERATION_identity_providers_idp_protocols_oidc_auth(self, method, url, body, headers):
if method == 'GET':
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_OS_FEDERATION_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"projects": [{"id": "project_id"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
class OpenStackIdentity_2_0_Connection_VOMSMockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v2')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tokens(self, method, url, body, headers):
if method == 'POST':
status = httplib.UNAUTHORIZED
data = json.loads(body)
if 'voms' in data['auth'] and data['auth']['voms'] is True:
status = httplib.OK
body = ComputeFileFixtures('openstack').load('_v2_0__auth.json')
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (status, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v2_0_tenants(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"tenant": [{"name": "tenant_name"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
mozilla/relman-auto-nag | auto_nag/round_robin.py | 2 | 6925 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from random import randint
from dateutil.relativedelta import relativedelta
from libmozdata import utils as lmdutils
from libmozdata.bugzilla import BugzillaUser
from auto_nag import logger, utils
from auto_nag.people import People
from auto_nag.round_robin_calendar import Calendar
class RoundRobin(object):
_instances = {}
def __init__(self, rr=None, people=None, teams=None):
self.people = People.get_instance() if people is None else people
self.components_by_triager = {}
self.all_calendars = []
self.feed(teams, rr=rr)
self.nicks = {}
self.erroneous_bzmail = {}
utils.init_random()
@staticmethod
def get_instance(teams=None):
if teams is None:
if None not in RoundRobin._instances:
RoundRobin._instances[None] = RoundRobin()
return RoundRobin._instances[None]
teams = tuple(teams)
if teams not in RoundRobin._instances:
RoundRobin._instances[teams] = RoundRobin(teams=teams)
return RoundRobin._instances[teams]
def get_calendar(self, team, data):
fallback = data["fallback"]
strategies = set(data["components"].values())
res = {}
for strategy in strategies:
url = data[strategy]["calendar"]
res[strategy] = Calendar.get(url, fallback, team, people=self.people)
return res
def feed(self, teams, rr=None):
self.data = {}
filenames = {}
if rr is None:
rr = {}
for team, path in utils.get_config(
"round-robin", "teams", default={}
).items():
if teams is not None and team not in teams:
continue
with open("./auto_nag/scripts/configs/{}".format(path), "r") as In:
rr[team] = json.load(In)
filenames[team] = path
# rr is dictionary:
# - doc -> documentation
# - components -> dictionary: Product::Component -> strategy name
# - strategies: dictionary: {calendar: url}
# Get all the strategies for each team
for team, data in rr.items():
calendars = self.get_calendar(team, data)
self.all_calendars += list(calendars.values())
# finally self.data is a dictionary:
# - Product::Component -> dictionary {fallback: who to nag when we've nobody
# calendar}
for pc, strategy in data["components"].items():
self.data[pc] = calendars[strategy]
def get_components(self):
return list(self.data.keys())
def get_components_for_triager(self, triager):
return self.components_by_triager[triager]
def add_component_for_triager(self, component, triagers):
if not isinstance(triagers, list):
triagers = [triagers]
for triager in triagers:
if triager in self.components_by_triager:
self.components_by_triager[triager].add(component)
else:
self.components_by_triager[triager] = {component}
def get_fallback(self, bug):
pc = bug["product"] + "::" + bug["component"]
if pc not in self.data:
mail = bug.get("triage_owner")
else:
cal = self.data[pc]
mail = cal.get_fallback_bzmail()
return self.people.get_moz_mail(mail)
def get_erroneous_bzmail(self):
return self.erroneous_bzmail
def add_erroneous_bzmail(self, bzmail, prod_comp, cal):
logger.error(f"No nick for {bzmail} for {prod_comp}")
fb = cal.get_fallback_mozmail()
if fb not in self.erroneous_bzmail:
self.erroneous_bzmail[fb] = {bzmail}
else:
self.erroneous_bzmail[fb].add(bzmail)
def get_nick(self, bzmail, prod_comp, cal):
if bzmail not in self.nicks:
def handler(user):
self.nicks[bzmail] = user["nick"]
BugzillaUser(user_names=[bzmail], user_handler=handler).wait()
if bzmail not in self.nicks:
self.add_erroneous_bzmail(bzmail, prod_comp, cal)
return None
return self.nicks[bzmail]
def get(self, bug, date, only_one=True, has_nick=True):
pc = bug["product"] + "::" + bug["component"]
if pc not in self.data:
mail = bug.get("triage_owner")
nick = bug.get("triage_owner_detail", {}).get("nick")
if utils.is_no_assignee(mail):
mail, nick = None, None
if mail is None:
logger.error("No triage owner for {}".format(pc))
self.add_component_for_triager(pc, mail)
if has_nick:
return mail, nick if only_one else [(mail, nick)]
return mail if only_one else [mail]
cal = self.data[pc]
persons = cal.get_persons(date)
fb = cal.get_fallback_bzmail()
if not persons or all(p is None for _, p in persons):
# the fallback is the triage owner
self.add_component_for_triager(pc, [fb])
return (fb, self.get_nick(fb, pc, cal)) if has_nick else fb
bzmails = []
for _, p in persons:
bzmails.append(fb if p is None else p)
self.add_component_for_triager(pc, bzmails)
if only_one:
bzmail = bzmails[randint(0, len(bzmails) - 1)]
if has_nick:
nick = self.get_nick(bzmail, pc, cal)
return bzmail, nick
return bzmail
if has_nick:
return [(bzmail, self.get_nick(bzmail, pc, cal)) for bzmail in bzmails]
return bzmails
def get_who_to_nag(self, date):
fallbacks = {}
date = lmdutils.get_date_ymd(date)
days = utils.get_config("round-robin", "days_to_nag", 7)
next_date = date + relativedelta(days=days)
for cal in self.all_calendars:
persons = cal.get_persons(next_date)
if persons and all(p is not None for _, p in persons):
continue
name = cal.get_team_name()
fb = cal.get_fallback_mozmail()
if fb not in fallbacks:
fallbacks[fb] = {}
if name not in fallbacks[fb]:
fallbacks[fb][name] = {"nobody": False, "persons": []}
info = fallbacks[fb][name]
if not persons:
info["nobody"] = True
else:
people_names = [n for n, p in persons if p is None]
if people_names:
info["persons"] += people_names
return fallbacks
| bsd-3-clause |
mszewczy/odoo | addons/event/wizard/__init__.py | 435 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
overtherain/scriptfile | software/googleAppEngine/lib/jinja2/jinja2/nodes.py | 122 | 28750 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from itertools import chain, izip
from collections import deque
from jinja2.utils import Markup, MethodType, FunctionType
#: the types we support for context functions
_context_function_types = (FunctionType, MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(object):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are three major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
__metaclass__ = NodeType
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
iter(attributes).next())
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(unicode(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| mit |
xianggong/m2c_unit_test | test/operator/remainder_char8char8/compile.py | 1861 | 4430 | #!/usr/bin/python
import os
import subprocess
import re
def runCommand(command):
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
return iter(p.stdout.readline, b'')
def dumpRunCommand(command, dump_file_name, postfix):
dumpFile = open(dump_file_name + postfix, "w+")
dumpFile.write(command + "\n")
for line in runCommand(command.split()):
dumpFile.write(line)
def rmFile(file_name):
cmd = "rm -rf " + file_name
runCommand(cmd.split())
def rnm_ir(file_name):
# Append all unnamed variable with prefix 'tmp_'
ir_file_name = file_name + ".ll"
if os.path.isfile(ir_file_name):
fo = open(ir_file_name, "rw+")
lines = fo.readlines()
fo.seek(0)
fo.truncate()
for line in lines:
# Add entry block identifier
if "define" in line:
line += "entry:\n"
# Rename all unnamed variables
line = re.sub('\%([0-9]+)',
r'%tmp_\1',
line.rstrip())
# Also rename branch name
line = re.sub('(\;\ \<label\>\:)([0-9]+)',
r'tmp_\2:',
line.rstrip())
fo.write(line + '\n')
def gen_ir(file_name):
# Directories
root_dir = '../../../'
header_dir = root_dir + "inc/"
# Headers
header = " -I " + header_dir
header += " -include " + header_dir + "m2c_buildin_fix.h "
header += " -include " + header_dir + "clc/clc.h "
header += " -D cl_clang_storage_class_specifiers "
gen_ir = "clang -S -emit-llvm -O0 -target r600-- -mcpu=verde "
cmd_gen_ir = gen_ir + header + file_name + ".cl"
dumpRunCommand(cmd_gen_ir, file_name, ".clang.log")
def asm_ir(file_name):
if os.path.isfile(file_name + ".ll"):
# Command to assemble IR to bitcode
gen_bc = "llvm-as "
gen_bc_src = file_name + ".ll"
gen_bc_dst = file_name + ".bc"
cmd_gen_bc = gen_bc + gen_bc_src + " -o " + gen_bc_dst
runCommand(cmd_gen_bc.split())
def opt_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to optmize bitcode
opt_bc = "opt --mem2reg "
opt_ir_src = file_name + ".bc"
opt_ir_dst = file_name + ".opt.bc"
cmd_opt_bc = opt_bc + opt_ir_src + " -o " + opt_ir_dst
runCommand(cmd_opt_bc.split())
def dis_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to disassemble bitcode
dis_bc = "llvm-dis "
dis_ir_src = file_name + ".opt.bc"
dis_ir_dst = file_name + ".opt.ll"
cmd_dis_bc = dis_bc + dis_ir_src + " -o " + dis_ir_dst
runCommand(cmd_dis_bc.split())
def m2c_gen(file_name):
if os.path.isfile(file_name + ".opt.bc"):
# Command to disassemble bitcode
m2c_gen = "m2c --llvm2si "
m2c_gen_src = file_name + ".opt.bc"
cmd_m2c_gen = m2c_gen + m2c_gen_src
dumpRunCommand(cmd_m2c_gen, file_name, ".m2c.llvm2si.log")
# Remove file if size is 0
if os.path.isfile(file_name + ".opt.s"):
if os.path.getsize(file_name + ".opt.s") == 0:
rmFile(file_name + ".opt.s")
def m2c_bin(file_name):
if os.path.isfile(file_name + ".opt.s"):
# Command to disassemble bitcode
m2c_bin = "m2c --si2bin "
m2c_bin_src = file_name + ".opt.s"
cmd_m2c_bin = m2c_bin + m2c_bin_src
dumpRunCommand(cmd_m2c_bin, file_name, ".m2c.si2bin.log")
def main():
# Commands
for file in os.listdir("./"):
if file.endswith(".cl"):
file_name = os.path.splitext(file)[0]
# Execute commands
gen_ir(file_name)
rnm_ir(file_name)
asm_ir(file_name)
opt_bc(file_name)
dis_bc(file_name)
m2c_gen(file_name)
m2c_bin(file_name)
if __name__ == "__main__":
main()
| gpl-2.0 |
chenss/ChatRoom | 14.5 已经能运行(虽然有很多Warning)的Django-nonrel框架/django/core/files/uploadhandler.py | 136 | 7193 | """
Base file upload handler classes, and the built-in concrete subclasses
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile
from django.utils import importlib
__all__ = ['UploadFileException','StopUpload', 'SkipFile', 'FileUploadHandler',
'TemporaryFileUploadHandler', 'MemoryFileUploadHandler',
'load_handler', 'StopFutureHandlers']
class UploadFileException(Exception):
"""
Any error having to do with uploading files.
"""
pass
class StopUpload(UploadFileException):
"""
This exception is raised when an upload must abort.
"""
def __init__(self, connection_reset=False):
"""
If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
show a "connection reset" error.
"""
self.connection_reset = connection_reset
def __unicode__(self):
if self.connection_reset:
return u'StopUpload: Halt current upload.'
else:
return u'StopUpload: Consume request data, then halt.'
class SkipFile(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given file.
"""
pass
class StopFutureHandlers(UploadFileException):
"""
Upload handers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
class FileUploadHandler(object):
"""
Base class for streaming upload handlers.
"""
chunk_size = 64 * 2 ** 10 #: The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
self.charset = None
self.request = request
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two '--'.
"""
pass
def new_file(self, field_name, file_name, content_type, content_length,
charset=None, content_type_extra=None):
"""
Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won't even get it).
"""
self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
if content_type_extra is None:
content_type_extra = {}
self.content_type_extra = content_type_extra
def receive_data_chunk(self, raw_data, start):
"""
Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.
"""
raise NotImplementedError()
def file_complete(self, file_size):
"""
Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses should return a valid ``UploadedFile`` object.
"""
raise NotImplementedError()
def upload_complete(self):
"""
Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.
"""
pass
class TemporaryFileUploadHandler(FileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def __init__(self, *args, **kwargs):
super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs)
def new_file(self, file_name, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs)
self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset)
def receive_data_chunk(self, raw_data, start):
self.file.write(raw_data)
def file_complete(self, file_size):
self.file.seek(0)
self.file.size = file_size
return self.file
class MemoryFileUploadHandler(FileUploadHandler):
"""
File upload handler to stream uploads into memory (used for small files).
"""
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be in use.
"""
# Check the content-length header to see if we should
# If the post is too large, we cannot use the Memory handler.
if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
self.activated = False
else:
self.activated = True
def new_file(self, *args, **kwargs):
super(MemoryFileUploadHandler, self).new_file(*args, **kwargs)
if self.activated:
self.file = StringIO()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if self.activated:
self.file.write(raw_data)
else:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.activated:
return
self.file.seek(0)
return InMemoryUploadedFile(
file = self.file,
field_name = self.field_name,
name = self.file_name,
content_type = self.content_type,
size = file_size,
charset = self.charset
)
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = importlib.import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e))
except ValueError, e:
raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr))
return cls(*args, **kwargs)
| gpl-2.0 |
SUNY-Albany-CCI/INF_202_Survey | languages/fr.py | 13 | 7668 | # coding: utf8
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%s %%{row} deleted': '%s rangées supprimées',
'%s %%{row} updated': '%s rangées mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'Administrative Interface': 'Administrative Interface',
'Administrative interface': "Interface d'administration",
'Ajax Recipes': 'Recettes Ajax',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Contrôleur',
'Copyright': 'Copyright',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s select',
'db': 'db',
'DB Model': 'Modèle DB',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'E-mail',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Group ID': 'Groupe ID',
'Groups': 'Groups',
'Hello World': 'Bonjour le monde',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Importer/Exporter',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Internal State': 'État interne',
'Introduction': 'Introduction',
'Invalid email': 'E-mail invalide',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Key': 'Key',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live chat': 'Chat live',
'Live Chat': 'Live Chat',
'login': 'connectez-vous',
'Login': 'Connectez-vous',
'logout': 'déconnectez-vous',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Lost password?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu modèle',
'My Sites': 'My Sites',
'Name': 'Nom',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'Object or table name': 'Object or table name',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'Password': 'Mot de passe',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous 100 rows': '100 lignes précédentes',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Examples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID d'enregistrement",
'Record id': "id d'enregistrement",
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': 'Registration identifier',
'Registration key': "Clé d'enregistrement",
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Semantic': 'Sémantique',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'état',
'Statistics': 'Statistics',
'Stylesheet': 'Feuille de style',
'submit': 'submit',
'Submit': 'Soumettre',
'Support': 'Support',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "query" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Twitter': 'Twitter',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT pour construire des requêtes plus complexes.',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User ID': 'ID utilisateur',
'User Voice': 'User Voice',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenu',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'You are successfully running web2py': 'Vous roulez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You visited the url %s': "Vous avez visité l'URL %s",
}
| apache-2.0 |
etkirsch/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
maxamillion/anaconda | pyanaconda/ui/gui/spokes/advstorage/iscsi.py | 3 | 19069 | # iSCSI configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <[email protected]>
#
from IPy import IP
from collections import namedtuple
from gi.repository import GLib
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import escape_markup
from pyanaconda.i18n import _
from pyanaconda import nm
from pyanaconda.regexes import ISCSI_IQN_NAME_REGEX, ISCSI_EUI_NAME_REGEX
__all__ = ["ISCSIDialog"]
STYLE_NONE = 0
STYLE_CHAP = 1
STYLE_REVERSE_CHAP = 2
Credentials = namedtuple("Credentials", ["style",
"targetIP", "initiator", "username",
"password", "rUsername", "rPassword"])
NodeStoreRow = namedtuple("NodeStoreRow", ["selected", "notLoggedIn", "name", "iface", "portal"])
def discover_no_credentials(builder):
return Credentials(STYLE_NONE,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
"", "", "", "")
def discover_chap(builder):
return Credentials(STYLE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("chapUsernameEntry").get_text(),
builder.get_object("chapPasswordEntry").get_text(),
"", "")
def discover_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("rchapUsernameEntry").get_text(),
builder.get_object("rchapPasswordEntry").get_text(),
builder.get_object("rchapReverseUsername").get_text(),
builder.get_object("rchapReversePassword").get_text())
# This list maps the current page from the authNotebook to a function to grab
# credentials out of the UI. This works as long as authNotebook keeps the
# filler page at the front.
discoverMap = [discover_no_credentials, discover_chap, discover_reverse_chap]
def login_no_credentials(builder):
return Credentials(STYLE_NONE,
"", "",
"", "", "", "")
def login_chap(builder):
return Credentials(STYLE_CHAP,
"", "",
builder.get_object("loginChapUsernameEntry").get_text(),
builder.get_object("loginChapPasswordEntry").get_text(),
"", "")
def login_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
"", "",
builder.get_object("loginRchapUsernameEntry").get_text(),
builder.get_object("loginRchapPasswordEntry").get_text(),
builder.get_object("loginRchapReverseUsername").get_text(),
builder.get_object("loginRchapReversePassword").get_text())
# And this list maps the current page from the loginAuthNotebook to a function
# to grab credentials out of the UI. This works as long as loginAuthNotebook
# keeps the filler page at the front, and we check to make sure "Use the
# credentials from discovery" is not selected first.
loginMap = [login_no_credentials, login_chap, login_reverse_chap]
def credentials_valid(credentials):
if credentials.style == STYLE_NONE:
return True
elif credentials.style == STYLE_CHAP:
return credentials.username.strip() != "" and credentials.password != ""
elif credentials.style == STYLE_REVERSE_CHAP:
return credentials.username.strip() != "" and credentials.password != "" and \
credentials.rUsername.strip() != "" and credentials.rPassword != ""
class ISCSIDialog(GUIObject):
builderObjects = ["iscsiDialog", "nodeStore", "nodeStoreFiltered"]
mainWidgetName = "iscsiDialog"
uiFile = "spokes/advstorage/iscsi.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.iscsi = self.storage.iscsi()
self._discoveryError = None
self._loginError = False
self._discoveredNodes = []
self._update_devicetree = False
self._authTypeCombo = self.builder.get_object("authTypeCombo")
self._authNotebook = self.builder.get_object("authNotebook")
self._iscsiNotebook = self.builder.get_object("iscsiNotebook")
self._loginButton = self.builder.get_object("loginButton")
self._loginAuthTypeCombo = self.builder.get_object("loginAuthTypeCombo")
self._loginAuthNotebook = self.builder.get_object("loginAuthNotebook")
self._loginGrid = self.builder.get_object("loginGrid")
self._loginConditionNotebook = self.builder.get_object("loginConditionNotebook")
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._bindCheckbox = self.builder.get_object("bindCheckbutton")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._initiatorEntry = self.builder.get_object("initiatorEntry")
self._store = self.builder.get_object("nodeStore")
self._storeFilter = self.builder.get_object("nodeStoreFiltered")
def refresh(self):
self._bindCheckbox.set_active(bool(self.iscsi.ifaces))
self._bindCheckbox.set_sensitive(self.iscsi.mode == "none")
self._authTypeCombo.set_active(0)
self._startButton.set_sensitive(True)
self._loginAuthTypeCombo.set_active(0)
self._storeFilter.set_visible_column(1)
self._initiatorEntry.set_text(self.iscsi.initiator)
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
@property
def selectedNames(self):
return [itr[2] for itr in self._store if itr[0]]
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
self.storage.devicetree.populate()
return rc
##
## DISCOVERY
##
def on_auth_type_changed(self, widget, *args):
self._authNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Start button sensitivity.
self.on_discover_field_changed()
def _discover(self, credentials, bind):
# This needs to be in its own thread, not marked with gtk_action_* because it's
# called from on_start_clicked, which is in the GTK main loop. Those decorators
# won't do anything special in that case.
if not self.iscsi.initiatorSet:
self.iscsi.initiator = credentials.initiator
# interfaces created here affect nodes that iscsi.discover would return
if self.iscsi.mode == "none" and not bind:
self.iscsi.delete_interfaces()
elif (self.iscsi.mode == "bind"
or self.iscsi.mode == "none" and bind):
activated = set(nm.nm_activated_devices())
created = set(self.iscsi.ifaces.values())
self.iscsi.create_interfaces(activated - created)
try:
self._discoveredNodes = self.iscsi.discover(credentials.targetIP,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
except IOError as e:
self._discoveryError = str(e)
return
if len(self._discoveredNodes) == 0:
self._discoveryError = "No nodes discovered."
def _check_discover(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_DISCOVER):
return True
# When iscsi discovery is done, update the UI. We don't need to worry
# about the user escaping from the dialog because all the buttons are
# marked insensitive.
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure. Display some error message and leave the user on the
# dialog to try again.
self.builder.get_object("discoveryErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Success. Now populate the node store and kick the user on over to
# that subscreen.
self._add_nodes(self._discoveredNodes)
self._iscsiNotebook.set_current_page(1)
# If some form of login credentials were used for discovery,
# default to using the same for login.
if self._authTypeCombo.get_active() != 0:
self._loginAuthTypeCombo.set_active(3)
# We always want to enable this button, in case the user's had enough.
self._cancelButton.set_sensitive(True)
return False
def _set_configure_sensitive(self, sensitivity):
for child in self._configureGrid.get_children():
if child == self._initiatorEntry:
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
elif child == self._bindCheckbox:
self._bindCheckbox.set_sensitive(sensitivity and self.iscsi.mode == "none")
elif child != self._conditionNotebook:
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
# First, update some widgets to not be usable while discovery happens.
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._initiatorEntry.set_sensitive(False)
# Now get the node discovery credentials.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
discoveredLabelText = _("The following nodes were discovered using the iSCSI initiator "\
"<b>%(initiatorName)s</b> using the target IP address "\
"<b>%(targetAddress)s</b>. Please select which nodes you "\
"wish to log into:") % \
{"initiatorName": escape_markup(credentials.initiator),
"targetAddress": escape_markup(credentials.targetIP)}
discoveredLabel = self.builder.get_object("discoveredLabel")
discoveredLabel.set_markup(discoveredLabelText)
bind = self._bindCheckbox.get_active()
spinner = self.builder.get_object("waitSpinner")
spinner.start()
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_DISCOVER, target=self._discover,
args=(credentials, bind)))
GLib.timeout_add(250, self._check_discover)
# When the initiator name, ip address, and any auth fields are filled in
# valid, only then should the Start button be made sensitive.
def _target_ip_valid(self):
widget = self.builder.get_object("targetEntry")
text = widget.get_text()
try:
IP(text)
return True
except ValueError:
return False
def _initiator_name_valid(self):
widget = self.builder.get_object("initiatorEntry")
text = widget.get_text()
stripped = text.strip()
#iSCSI Naming Standards: RFC 3720 and RFC 3721
#iSCSI Name validation using regex. Name should either match IQN format or EUI format.
return bool(ISCSI_IQN_NAME_REGEX.match(stripped) or ISCSI_EUI_NAME_REGEX.match(stripped))
def on_discover_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
sensitive = self._target_ip_valid() and self._initiator_name_valid() and credentials_valid(credentials)
self._startButton.set_sensitive(sensitive)
##
## LOGGING IN
##
def _add_nodes(self, nodes):
for node in nodes:
iface = self.iscsi.ifaces.get(node.iface, node.iface)
portal = "%s:%s" % (node.address, node.port)
self._store.append([False, True, node.name, iface, portal])
# We should select the first node by default.
self._store[0][0] = True
def on_login_type_changed(self, widget, *args):
self._loginAuthNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Log In button sensitivity.
self.on_login_field_changed()
def on_row_toggled(self, button, path):
if not path:
return
# Then, go back and mark just this row as selected.
itr = self._storeFilter.get_iter(path)
itr = self._storeFilter.convert_iter_to_child_iter(itr)
self._store[itr][0] = not self._store[itr][0]
def _login(self, credentials):
for row in self._store:
obj = NodeStoreRow(*row)
if not obj.selected:
continue
for node in self._discoveredNodes:
if obj.notLoggedIn and node.name == obj.name \
and obj.portal == "%s:%s" % (node.address, node.port):
# when binding interfaces match also interface
if self.iscsi.ifaces and \
obj.iface != self.iscsi.ifaces[node.iface]:
continue
(rc, msg) = self.iscsi.log_into_node(node,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
if not rc:
self._loginError = msg
return
self._update_devicetree = True
row[1] = False
def _check_login(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_LOGIN):
return True
spinner = self.builder.get_object("loginSpinner")
spinner.stop()
spinner.hide()
if self._loginError:
self.builder.get_object("loginErrorLabel").set_text(self._loginError)
self._loginError = None
self._loginConditionNotebook.set_current_page(1)
self._cancelButton.set_sensitive(True)
self._loginButton.set_sensitive(True)
else:
anyLeft = False
self._loginConditionNotebook.set_current_page(0)
# Select the now-first target for the user in case they want to
# log into another one.
for row in self._store:
if row[1]:
row[0] = True
anyLeft = True
# And make the login button sensitive if there are any more
# nodes to login to.
self._loginButton.set_sensitive(True)
break
self._okButton.set_sensitive(True)
# Once a node has been logged into, it doesn't make much sense to let
# the user cancel. Cancel what, exactly?
self._cancelButton.set_sensitive(False)
if not anyLeft:
self.window.response(1)
self._set_login_sensitive(True)
return False
def _set_login_sensitive(self, sensitivity):
for child in self._loginGrid.get_children():
if child != self._loginConditionNotebook:
child.set_sensitive(sensitivity)
def on_login_clicked(self, *args):
# Make the buttons UI while we work.
self._okButton.set_sensitive(False)
self._cancelButton.set_sensitive(False)
self._loginButton.set_sensitive(False)
self._loginConditionNotebook.set_current_page(0)
self._set_login_sensitive(False)
spinner = self.builder.get_object("loginSpinner")
spinner.start()
spinner.set_visible(True)
spinner.show()
# Are we reusing the credentials from the discovery step? If so, grab them
# out of the UI again here. They should still be there.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_LOGIN, target=self._login,
args=(credentials,)))
GLib.timeout_add(250, self._check_login)
def on_login_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
self._loginButton.set_sensitive(credentials_valid(credentials))
| gpl-2.0 |
gaperez64/acacia4aiger | source/acacia_plus/library_linker.py | 1 | 16085 | # This file is part of Acacia+, a tool for synthesis of reactive systems using antichain-based techniques
# Copyright (C) 2011-2013 UMONS-ULB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from ctypes import *
import os
from constants import *
#### STRUCTURES ####
#### GList C structure
class GList(Structure):
pass
GList._fields_ = [("data", c_void_p),
("next", POINTER(GList)),
("pred", POINTER(GList))]
#### GNode C structure
class GNode(Structure):
pass
GNode._fields_ = [("data", POINTER(c_void_p)),
("next", POINTER(GNode)),
("pred", POINTER(GNode)),
("parent", POINTER(GNode)),
("children", POINTER(GNode))]
#### AlphabetInfo C structure
class AlphabetInfo(Structure):
_fields_ = [("input_size", c_int),
("output_size", c_int),
("input", POINTER(c_char_p)),
("output", POINTER(c_char_p)),
("sigma_input_size", c_int),
("sigma_output_size", c_int),
("sigma_input", POINTER(POINTER(c_ubyte))),
("sigma_output", POINTER(POINTER(c_ubyte)))]
#### Label C structure
class Label(Structure):
_fields_ = [("disjunction_size", c_int),
("disjunction", POINTER(POINTER(c_ubyte)))]
#### TBUCW_tran C structure
class TBUCW_tran(Structure):
pass
#### TBUCW_state C structure
class TBUCW_state(Structure):
_fields_ = [("state_label", c_int),
("nb_in_tran", c_int),
("nb_out_tran", c_int),
("in_tran", POINTER(POINTER(TBUCW_tran))),
("out_tran", POINTER(POINTER(TBUCW_tran))),
("is_accepting", c_byte),
("player", c_byte),
("unbounded", c_byte),
("is_complete", c_byte),
("is_trash", c_byte)]
TBUCW_tran._fields_ = [("state_from", POINTER(TBUCW_state)),
("state_to", POINTER(TBUCW_state)),
("label", POINTER(Label))]
#### TBUCW C structure
class TBUCW(Structure):
_fields_ = [("nb_states", c_int),
("initial_state_index", c_int),
("alphabet", POINTER(AlphabetInfo)),
("v_I", POINTER(POINTER(c_int))),
("v_O", POINTER(POINTER(c_int))),
("dimension", c_int),
("states", POINTER(POINTER(TBUCW_state)))]
#### Antichain C structure
class Antichain(Structure):
_fields_ = [("size", c_int),
("incomparable_elements", POINTER(GList))]
#### SafetyGame C structure
class SafetyGame(Structure):
_fields_ = [("positions_O", POINTER(Antichain)),
("positions_I", POINTER(Antichain)),
("first_to_play", c_byte)]
#### CFInfo C structure
class CFInfo(Structure):
_fields_ = [("starting_player", c_byte),
("composition_size", c_int),
("cf_range_size_sum", c_int),
("k_value", POINTER(c_int)),
("nb_lost_bits", POINTER(c_int)),
("nb_states_by_integer", POINTER(c_int)),
("cf_range_size", POINTER(c_int)),
("end_index_starting_p", POINTER(c_int)),
("start_index_other_p", POINTER(c_int)),
("first_state_other_p_index", POINTER(c_int)),
("automaton", POINTER(POINTER(TBUCW)))]
#### CountingFunction C structure
class CountingFunction(Structure):
_fields_ = [("player", c_byte),
("sum_of_counters", c_int),
("max_counter", c_int),
("mapping", POINTER(c_ubyte)),
("info", POINTER(GNode))]
#### Vector C structure
class Vector(Structure):
_fields_ = [("dimension", c_int),
("max_value", POINTER(c_int)),
("values", POINTER(c_int))]
#### Tuple C structure
class Tuple(Structure):
_fields_ = [("cf", POINTER(CountingFunction)),
("credits", POINTER(Vector))]
#### OtfurResult C structure
class OtfurResult(Structure):
_fields_ = [("winning_positions", POINTER(SafetyGame)),
("otfur_time", c_float),
("winning_positions_computation_time", c_float),
("nb_cf_passed", c_int),
("nb_iter", c_int)]
#### TSTransition C structure
class TSTransition(Structure):
_fields_ = [("from", c_int),
("to", c_int),
("label", POINTER(c_char))]
#### TSState C structure
class TSState(Structure):
_fields_ = [("player", c_byte),
("nb_tr", c_int),
("transitions", POINTER(GList))]
#### TransitionSystem C structure
class TransitionSystem(Structure):
_fields_ = [("nb_states_PO", c_int),
("nb_states_PI", c_int),
("size_states_PO", c_int),
("size_states_PI", c_int),
("nb_initial_states", c_int),
("initial_states", POINTER(c_int)),
("states", POINTER(POINTER(TSState)))]
#### FUNCTIONS LOADING ####
if os.uname()[0] == "Darwin":
lib = cdll.LoadLibrary(MAIN_DIR_PATH +
"lib/acacia_plus.dylib")
elif os.uname()[0] == "Linux":
lib = cdll.LoadLibrary(MAIN_DIR_PATH +
"lib/acacia_plus.so")
else:
print "OS not supported"
exit(0)
##TBUCW
init_tbucw_c = lib.init_tbucw
init_tbucw_c.argtypes = [c_int]
init_tbucw_c.restype = POINTER(TBUCW)
add_state_c = lib.add_state
add_state_c.argtypes = [POINTER(TBUCW), c_int, c_int, c_int, c_byte, c_byte, c_byte, c_byte]
add_state_c.restype = None
add_tran_c = lib.add_tran
add_tran_c.argtypes = [POINTER(TBUCW), c_char_p, c_int, c_int, c_int]
add_tran_c.restype = None
set_initial_state_c = lib.set_initial_state
set_initial_state_c.argtypes = [POINTER(TBUCW), c_int]
set_initial_state_c.restype = None
set_is_accepting_c = lib.set_is_accepting
set_is_accepting_c.argtypes = [POINTER(TBUCW), c_int, c_byte]
set_is_accepting_c.restype = None
set_alphabet_c = lib.set_alphabet
set_alphabet_c.argtypes = [POINTER(TBUCW), POINTER(AlphabetInfo)]
set_alphabet_c.restype = POINTER(TBUCW)
report_accepting_states_c = lib.report_accepting_states
report_accepting_states_c.argtypes = [POINTER(TBUCW)]
report_accepting_states_c.restype = None
duplicate_all_tran_c = lib.duplicate_all_tran
duplicate_all_tran_c.argtypes = [POINTER(TBUCW)]
duplicate_all_tran_c.restype = None
set_is_complete_c = lib.set_is_complete
set_is_complete_c.argtypes = [POINTER(TBUCW)]
set_is_complete_c.restype = None
is_accepting_c = lib.is_accepting
is_accepting_c.argtypes = [POINTER(TBUCW), c_int]
is_accepting_c.restype = c_byte
is_complete_c = lib.is_complete
is_complete_c.argtypes = [POINTER(TBUCW), c_int]
is_complete_c.restype = c_byte
get_player_id_c = lib.get_player_id
get_player_id_c.argtypes = [POINTER(TBUCW), c_int]
get_player_id_c.restype = c_byte
get_formula_c = lib.get_formula
get_formula_c.argtypes = [POINTER(AlphabetInfo), c_byte, c_int]
get_formula_c.restype = c_char_p
get_tbucw_size_c = lib.get_tbucw_size
get_tbucw_size_c.argtypes = [POINTER(TBUCW)]
get_tbucw_size_c.restype = c_int
init_alphabet_c = lib.init_alphabet
init_alphabet_c.argtypes = [c_int, c_int]
init_alphabet_c.restype = POINTER(AlphabetInfo)
add_input_prop_c = lib.add_input_prop
add_input_prop_c.argtypes = [POINTER(AlphabetInfo), c_char_p]
add_input_prop_c.restype = None
add_output_prop_c = lib.add_output_prop
add_output_prop_c.argtypes = [POINTER(AlphabetInfo), c_char_p]
add_output_prop_c.restype = None
compute_alphabets_c = lib.compute_alphabets
compute_alphabets_c.argtypes = [POINTER(AlphabetInfo)]
compute_alphabets_c.restype = None
get_succ_from_sigma_index_c = lib.get_succ_from_sigma_index
get_succ_from_sigma_index_c.argtypes = [POINTER(TBUCW), c_int, c_int]
get_succ_from_sigma_index_c.restype = POINTER(c_int)
get_all_succ_c = lib.get_all_succ
get_all_succ_c.argtypes = [POINTER(TBUCW), c_int]
get_all_succ_c.restype = POINTER(c_int)
print_tbucw_c = lib.print_tbucw
print_tbucw_c.argtypes = [POINTER(TBUCW)]
print_tbucw_c.restype = None
print_tbucw_stats_c = lib.print_tbucw_stats
print_tbucw_stats_c.argtypes = [POINTER(TBUCW)]
print_tbucw_stats_c.restype = None
print_formula_c = lib.print_formula
print_formula_c.argtypes = [POINTER(TBUCW), POINTER(c_ubyte), c_int, POINTER(c_char_p)]
print_formula_c.restype = None
free_tbucw_c = lib.free_tbucw
free_tbucw_c.argtypes = [POINTER(TBUCW)]
free_tbucw_c.restype = None
optimize_tbucw_c = lib.optimize_tbucw
optimize_tbucw_c.argtypes = [POINTER(TBUCW), POINTER(c_byte)]
optimize_tbucw_c.restype = POINTER(TBUCW)
reset_tbucw_states_labels_c = lib.reset_tbucw_states_labels
reset_tbucw_states_labels_c.argtypes = [POINTER(TBUCW)]
reset_tbucw_states_labels_c.restype = None
set_weight_function_c = lib.set_weight_function
set_weight_function_c.argtypes = [POINTER(TBUCW), c_byte, POINTER(POINTER(c_int))]
set_weight_function_c.restype = POINTER(TBUCW)
set_dimension_c = lib.set_dimension
set_dimension_c.argtypes = [POINTER(TBUCW), c_int]
set_dimension_c.restype = POINTER(TBUCW)
##GList
is_link_null_c = lib.is_link_null
is_link_null_c.argtypes = [POINTER(GList)]
is_link_null_c.restype = c_byte
get_link_data_c = lib.get_link_data
get_link_data_c.argtypes = [POINTER(GList)]
get_link_data_c.restype = POINTER(Tuple)
## CountingFunction
build_cf_info_c = lib.build_cf_info
build_cf_info_c.argtypes = [POINTER(TBUCW), c_int]
build_cf_info_c.restype = POINTER(GNode)
compose_cf_info_c = lib.compose_cf_info
compose_cf_info_c.argtypes = [POINTER(POINTER(GNode)), c_int]
compose_cf_info_c.restype = POINTER(GNode)
##Tuple
build_initial_tuple_c = lib.build_initial_tuple
build_initial_tuple_c.argtypes = [POINTER(GNode), c_int, POINTER(c_int)]
build_initial_tuple_c.restype = POINTER(Tuple)
set_not_defined_tuple_c = lib.set_not_defined_tuple
set_not_defined_tuple_c.argtypes = None
set_not_defined_tuple_c.restype = None
compare_tuples_c = lib.compare_tuples
compare_tuples_c.argtypes = [POINTER(Tuple), POINTER(Tuple)]
compare_tuples_c.restype = c_byte
tuple_succ_c = lib.tuple_succ
tuple_succ_c.argtypes = [POINTER(Tuple), c_int, POINTER(AlphabetInfo)]
tuple_succ_c.restype = POINTER(Tuple)
clone_tuple_c = lib.clone_tuple
clone_tuple_c.argtypes = [POINTER(Tuple)]
clone_tuple_c.restype = c_void_p
compose_tuples_c = lib.compose_tuples
compose_tuples_c.argtypes = [POINTER(POINTER(Tuple)), c_int, POINTER(GNode)]
compose_tuples_c.restype = c_void_p
print_tuple_c = lib.print_tuple
print_tuple_c.argtypes = [POINTER(Tuple)]
print_tuple_c.restype = None
free_tuple_full_c = lib.free_tuple_full
free_tuple_full_c.argtypes = [POINTER(Tuple)]
free_tuple_full_c.restype = None
free_not_defined_tuple_c = lib.free_not_defined_tuple
free_not_defined_tuple_c.argtypes = None
free_not_defined_tuple_c.restype = None
##Antichain
PRINT_ELEMENT_FUNC = CFUNCTYPE(None, c_void_p)
PRINT_TUPLE_FUNC = CFUNCTYPE(None, POINTER(Tuple))
COMPARE_TUPLES_FUNC = CFUNCTYPE(c_byte, POINTER(Tuple), POINTER(Tuple))
FREE_TUPLE_FULL_FUNC = CFUNCTYPE(None, POINTER(Tuple))
CLONE_TUPLE_FUNC = CFUNCTYPE(c_void_p, POINTER(Tuple))
COMPOSE_TUPLES_FUNC = CFUNCTYPE(c_void_p, POINTER(POINTER(Tuple)), c_int, POINTER(GNode))
compare_antichains_c = lib.compare_antichains
compare_antichains_c.argtypes = [POINTER(Antichain), POINTER(Antichain), COMPARE_TUPLES_FUNC]
compare_antichains_c.restype = c_byte
contains_element_c = lib.contains_element
contains_element_c.argtypes = [POINTER(Antichain), c_void_p, COMPARE_TUPLES_FUNC]
contains_element_c.restype = c_byte
compose_antichains_c = lib.compose_antichains
compose_antichains_c.argtypes = [POINTER(POINTER(Antichain)), c_int, COMPOSE_TUPLES_FUNC, POINTER(GNode)]
compose_antichains_c.restype = POINTER(Antichain)
clone_antichain_c = lib.clone_antichain
clone_antichain_c.argtypes = [POINTER(Antichain), CLONE_TUPLE_FUNC]
clone_antichain_c.restype = POINTER(Antichain)
free_antichain_full_c = lib.free_antichain_full
free_antichain_full_c.argtypes = [POINTER(Antichain), FREE_TUPLE_FULL_FUNC]
free_antichain_full_c.restype = None
print_antichain_c = lib.print_antichain
print_antichain_c.argtypes = [POINTER(Antichain), PRINT_TUPLE_FUNC]
print_antichain_c.restype = None
##BackwardAlgorithm
build_start_antichain_c = lib.build_start_antichain
build_start_antichain_c.argtypes = [c_byte, POINTER(GNode)]
build_start_antichain_c.restype = POINTER(Antichain)
pre_c = lib.pre
pre_c.argtypes = [POINTER(Antichain), POINTER(Antichain), c_byte, POINTER(AlphabetInfo)]
pre_c.restype = POINTER(Antichain)
pre_crit_c = lib.pre_crit
pre_crit_c.argtypes = [POINTER(Antichain), POINTER(Antichain), POINTER(c_int), POINTER(AlphabetInfo)]
pre_crit_c.restype = POINTER(Antichain)
compute_critical_set_c = lib.compute_critical_set
compute_critical_set_c.argtypes = [POINTER(Antichain), POINTER(AlphabetInfo)]
compute_critical_set_c.restype = POINTER(c_int)
##SafetyGame
new_safety_game_c = lib.new_safety_game
new_safety_game_c.argtypes = [POINTER(Antichain), POINTER(Antichain), c_byte]
new_safety_game_c.restype = POINTER(SafetyGame)
add_credits_to_safety_game_c = lib.add_credits_to_safety_game
add_credits_to_safety_game_c.argtypes = [POINTER(SafetyGame), c_int, POINTER(c_int)]
add_credits_to_safety_game_c.restype = POINTER(SafetyGame)
free_safety_game_c = lib.free_safety_game
free_safety_game_c.argtypes = [POINTER(SafetyGame)]
free_safety_game_c.restype = None
##Cache
initialize_cache_c = lib.initialize_cache
initialize_cache_c.argtypes = None
initialize_cache_c.restype = None
initialize_cache_critical_set_c = lib.initialize_cache_critical_set
initialize_cache_critical_set_c.argtypes = None
initialize_cache_critical_set_c.restype = None
clean_cache_c = lib.clean_cache
clean_cache_c.argtypes = None
clean_cache_c.restype = None
clean_cache_critical_set_c = lib.clean_cache_critical_set
clean_cache_critical_set_c.argtypes = None
clean_cache_critical_set_c.restype = None
##ForwardAlgorithm
otfur_c = lib.otfur
otfur_c.argtypes = [POINTER(Antichain), POINTER(Antichain), POINTER(GNode), POINTER(AlphabetInfo), c_byte, c_int, POINTER(c_int)]
otfur_c.restype = POINTER(OtfurResult)
##Synthesis
extract_strategies_from_safety_game_c = lib.extract_strategies_from_safety_game
extract_strategies_from_safety_game_c.argtypes = [POINTER(SafetyGame), POINTER(AlphabetInfo), c_byte, c_byte, c_byte]
extract_strategies_from_safety_game_c.restype = POINTER(TransitionSystem)
has_a_winning_strategy_c = lib.has_a_winning_strategy
has_a_winning_strategy_c.argtypes = [POINTER(SafetyGame), POINTER(AlphabetInfo), c_byte]
has_a_winning_strategy_c.restype = c_byte
##TransitionSystem
get_ts_state_c = lib.get_ts_state
get_ts_state_c.argtypes = [POINTER(TransitionSystem), c_int]
get_ts_state_c.restype = POINTER(TSState)
get_ts_transition_from_link_data_c = lib.get_ts_transition_from_link
get_ts_transition_from_link_data_c.argtypes = [POINTER(GList)]
get_ts_transition_from_link_data_c.restype = POINTER(TSTransition)
is_ts_state_null_c = lib.is_ts_state_null
is_ts_state_null_c.argtypes = [POINTER(TSState)]
is_ts_state_null_c.restype = c_byte
free_transition_system_c = lib.free_transition_system
free_transition_system_c.argtypes = [POINTER(TransitionSystem)]
free_transition_system_c.restype = None
#MemoryManagement
free_c = lib.free_memory
free_c.argtypes = [c_void_p]
free_c.restype = None
| gpl-3.0 |
mbucas/python-route53 | doc_src/conf.py | 4 | 8017 | # -*- coding: utf-8 -*-
#
# python-route53 documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 4 21:03:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-route53'
copyright = u'2012, Greg Taylor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-route53doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-route53.tex', u'python-route53 Documentation',
u'Greg Taylor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python-route53', u'python-route53 Documentation',
[u'Greg Taylor'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python-route53', u'python-route53 Documentation',
u'Greg Taylor', 'python-route53', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit |
karthiks1995/dejavu | dejavu/fingerprint.py | 15 | 5828 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks):
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
| mit |
mdkent/percona-xtrabackup | test/kewpie/percona_tests/xtrabackup_disabled/ib_slave_test.py | 42 | 7074 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
import time
import unittest
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[],[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup path
if os.path.exists(backup_path):
shutil.rmtree(backup_path)
os.mkdir(backup_path)
def test_basic1(self):
if servers[0].type not in ['mysql','percona']:
return
else:
self.servers = servers
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
slave_server = servers[1]
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
#self.assertEqual(retcode, 0, msg=output)
# take a backup
cmd = [ innobackupex
,"--defaults-file=%s" %master_server.cnf_file
,"--user=root"
,"--socket=%s" %master_server.socket_file
,"--slave-info"
," --ibbackup=%s" %xtrabackup
,backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
main_backup_path = self.find_backup_path(output)
self.assertEqual(retcode, 0, msg = output)
# shutdown our slave server
slave_server.stop()
# prepare our backup
cmd = [ innobackupex
, "--apply-log"
, "--use-memory=500M"
, "--ibbackup=%s" %xtrabackup
, main_backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode, 0, msg = output)
# remove old datadir
shutil.rmtree(slave_server.datadir)
os.mkdir(slave_server.datadir)
# restore from backup
cmd = [ innobackupex
, "--defaults-file=%s" %slave_server.cnf_file
, "--copy-back"
, "--ibbackup=%s" %xtrabackup
, main_backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0, msg = output)
# get binlog info for slave
slave_file_name = 'xtrabackup_binlog_pos_innodb'
"""
for slave_file in ['xtrabackup_slave_info', 'xtrabackup_binlog_pos_innodb']:
slave_file_path = os.path.join(slave_server.datadir,slave_file)
with open(slave_file_path,'r') as slave_data:
print "File: %s" %slave_file
for line in slave_data:
print line, '<<<<'
# end test code
"""
slave_file_path = os.path.join(slave_server.datadir,slave_file_name)
slave_file = open(slave_file_path,'r')
binlog_file, binlog_pos = slave_file.readline().strip().split('\t')
binlog_file = os.path.basename(binlog_file)
slave_file.close()
# restart server (and ensure it doesn't crash)
slave_server.start()
self.assertEqual( slave_server.status, 1
, msg = 'Server failed restart from restored datadir...')
# update our slave's master info/ start replication
# we don't use server.set_master() method as we want
# to use binlog info produced by xtrabackup
# TODO: add these as parameters?
query = ("CHANGE MASTER TO "
"MASTER_HOST='127.0.0.1',"
"MASTER_USER='root',"
"MASTER_PASSWORD='',"
"MASTER_PORT=%d,"
"MASTER_LOG_FILE='%s',"
"MASTER_LOG_POS=%d" % ( master_server.master_port
, binlog_file
, int(binlog_pos)))
retcode, result_set = self.execute_query(query, slave_server)
self.assertEqual(retcode, 0, msg=result_set)
# TODO: check the slave status?
# /implement method to handle the check?
slave_server.slave_start()
# compare master/slave states
result = self.check_slaves_by_checksum(master_server,[slave_server])
self.assertEqual(result,None,msg=result)
# create a new table on the master
query = ("CREATE TABLE t1 "
"(col1 int NOT NULL AUTO_INCREMENT PRIMARY KEY )"
)
retcode, result_set = self.execute_query(query, master_server)
# insert some rows
query = "INSERT INTO t1 VALUES (),(),(),(),()"
retcode, result_set = self.execute_query(query, master_server)
self.assertEqual(retcode,0,msg=result_set)
# wait a bit for the slave
# TODO: proper poll routine
time.sleep(5)
for query in ["SHOW CREATE TABLE t1"
,"SELECT * FROM t1"]:
diff = self.check_slaves_by_query(master_server, [slave_server], query)
self.assertEqual(diff,None,msg=diff)
| gpl-2.0 |
Callek/build-relengapi | relengapi/blueprints/slaveloan/tasks.py | 1 | 12195 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import random
import socket
from furl import furl
import bzrest
import requests
from requests import RequestException
from flask import current_app
from functools import wraps
from redo import retry
from relengapi.blueprints.slaveloan import bugzilla
from relengapi.blueprints.slaveloan import slave_mappings
from relengapi.blueprints.slaveloan.model import History
from relengapi.blueprints.slaveloan.model import Loans
from relengapi.blueprints.slaveloan.model import Machines
from relengapi.blueprints.slaveloan.model import ManualActions
from relengapi.lib.celery import task
from relengapi.util import tz
import celery
import structlog
logger = structlog.get_logger()
def add_task_to_history(loanid, msg):
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
history = History(for_loan=l,
timestamp=tz.utcnow(),
msg=msg)
session.add(history)
session.commit()
logger.debug("Log_line: %s" % msg)
def add_to_history(before=None, after=None):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
bound_task = None
loanid = kwargs.get("loanid", None)
if args and isinstance(args[0], celery.Task):
bound_task = args[0]
if before:
add_task_to_history(loanid, before.format(**locals()))
retval = f(*args, **kwargs)
if after:
add_task_to_history(loanid, after.format(**locals()))
return retval
return wrapper
return decorator
@task(bind=True)
@add_to_history(
before="Choosing an inhouse machine based on slavealloc",
after="Chose inhouse machine {retval!s}")
def choose_inhouse_machine(self, loanid, loan_class):
logger.debug("Choosing inhouse machine")
url = furl(current_app.config.get("SLAVEALLOC_URL", None))
# XXX: ToDo raise fatal if no slavealloc
url.path.add("slaves")
url.args["enabled"] = 1
try:
all_slaves = requests.get(str(url)).json()
except RequestException as exc:
logger.exception("Exception: %s" % exc)
self.retry(exc=exc)
# pylint silence
# available_slaves = filter(slave_mappings.slave_filter(loan_class), all_slaves)
available_slaves = [slave for slave in all_slaves
if slave_mappings.slave_filter(loan_class)(slave)]
chosen = random.choice(available_slaves)
logger.debug("Chosen Slave = %s" % chosen)
return chosen['name']
@task(bind=True)
@add_to_history(
before="Identifying aws machine name to use",
after="Chose aws machine {retval!s}")
def choose_aws_machine(self, loanid, loan_class):
logger.debug("Choosing aws machine name")
# We use foo-$user_shortname$N where $N is optional only if
# there exists another active loan with the foo-$user prefix
l = Loans.query.get(loanid)
prefix = slave_mappings.slavetype_to_awsprefix(loan_class)
user_shortname = l.human.ldap.split("@")[0]
bare_name = prefix + "-" + user_shortname
similar_loans = Loans.query \
.filter(Loans.machine_id == Machines.id) \
.filter(Machines.fqdn.like(bare_name + "%")) \
.filter(~Loans.status.in_(["COMPLETE"])) \
.order_by(Machines.fqdn.desc())
if similar_loans.count():
existing_aws_loan = similar_loans.first().machine.fqdn
shortname = existing_aws_loan.split(".")[0]
this_name = bare_name + str(int(shortname[len(bare_name):])) + 1
else:
this_name = bare_name
logger.debug("Chosen Slave Name = %s" % this_name)
return this_name
@task(bind=True, max_retries=None)
@add_to_history(
before="Identifying FQDN and IP of {args[1]}",
after="Acquired FQDN and IP")
def fixup_machine(self, machine, loanid):
try:
fqdn = socket.getfqdn("%s.build.mozilla.org" % machine)
ipaddress = socket.gethostbyname("%s.build.mozilla.org" % machine)
session = current_app.db.session('relengapi')
m = Machines.as_unique(session,
fqdn=fqdn,
ipaddress=ipaddress)
# Re-check validity of fqdn and ip
if m.fqdn != fqdn:
m.fqdn = fqdn
if m.ipaddress != ipaddress:
m.ipaddress = ipaddress
l = session.query(Loans).get(loanid)
l.machine = m
session.commit()
except Exception as exc: # pylint: disable=W0703
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True)
@add_to_history(
before="Setup tracking bug for {args[1]}",
after="Tracking bug {retval!s} linked with loan")
def bmo_set_tracking_bug(self, machine, loanid):
try:
l = Loans.query.get(loanid)
assert l.bug_id
bug_comment = "Being loaned to %s in Bug %s" % (l.human.ldap, l.bug_id)
tracking_bug = bugzilla.ProblemTrackingBug(machine, loadInfo=False)
try:
tracking_bug.refresh()
except bzrest.errors.BugNotFound:
logger.info("Couldn't find bug, creating it...")
tracking_bug.create(comment=bug_comment, depends_on=l.bug_id)
if tracking_bug.data:
data = {
"depends_on": {
"add": [l.bug_id],
},
}
if not tracking_bug.data["is_open"]:
data["status"] = "REOPENED"
tracking_bug.add_comment(bug_comment, data=data)
if not tracking_bug.id:
raise ValueError("Unexpected result from bmo, retry")
return tracking_bug.id
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
before="Disabling in slavealloc (via slaveapi)",
after="Disable request sent to slavealloc (via slaveapi)")
def slavealloc_disable(self, machine, loanid):
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("disable")
loan_bug = Loans.query.get(loanid).bug_id
postdata = dict(reason="Being loaned on slaveloan bug %s" % loan_bug)
retry(requests.post, args=(str(url),), kwargs=dict(data=postdata)).json()
return machine
except Exception as exc: # pylint: disable=W0703
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True)
@add_to_history(
before="Filing the loan bug if needed",
after="Loan is tracked in bug {retval!s}")
def bmo_file_loan_bug(self, loanid, slavetype, *args, **kwargs):
try:
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
if l.bug_id:
# Nothing to do, bug ID passed in
return l.bug_id
bmo_id = l.human.bugzilla
bug_id = bugzilla.create_loan_bug(loan_id=loanid,
slavetype=slavetype,
bugzilla_username=bmo_id)
if not bug_id:
raise ValueError("Unexpected result from bmo, retry")
l.bug_id = bug_id
session.commit()
return bug_id
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True)
@add_to_history(
after="Waiting for a human to perform {kwargs[action_name]} (id {retval!s})")
def register_action_needed(self, loanid, action_name):
if not action_name:
raise ValueError("must supply an action name")
try:
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
if action_name == "add_to_vpn":
action_message = (
"Add user (%s) and machine (%s) to the VPN. "
"Following https://wiki.mozilla.org/ReleaseEngineering/How_To/Update_VPN_ACL"
% (l.human.ldap, l.machine.fqdn)
)
elif action_name == "create_aws_system":
action_message = (
"Create an aws machine for %s of the type requested (see loan history)."
" Following "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave#AWS_machines"
% (l.human.ldap,)
)
elif action_name == "clean_secrets":
action_message = (
"Clean secrets from the machine. See instructions at "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave#Cleaning"
)
elif action_name == "notify_complete":
action_message = (
"Notify the loanee in e-mail and the loan bug (Bug %s) that the loan is ready. "
"See template text for both in "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave#Notifying"
% l.bug_id
)
elif action_name == "gpo_switch":
action_message = (
"Need to switch host (%s) to be in the Loaner GPO group. Follow "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave"
"#t-xp32-ix.2C_t-w732-ix.2C_t-w864-ix.2C_w64-ix-slave "
"for more information"
% (l.machine.fqdn)
)
else:
raise ValueError("Invalid action name")
action = ManualActions(for_loan=l,
timestamp_start=tz.utcnow(),
msg=action_message)
session.add(action)
session.commit()
return action.id
except ValueError:
raise # Don't indefinitely retry in this case
except Exception as exc:
self.retry(exc=exc)
@task(bind=True, max_retries=None, default_retry_delay=60)
@add_to_history(
after="Noticed that a human performed pending action (id {args[1]}), continuing")
def waitfor_action(self, action_id, loanid):
try:
action = ManualActions.query.get(action_id)
if not action.timestamp_complete:
raise Exception("Retry me")
except Exception as exc:
logger.debug("Retrying...")
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
before="Calling slaveapi's disable method to disable from buildbot",
after="Disable request sent")
def start_disable_slave(self, machine, loanid):
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("shutdown_buildslave")
ret = retry(requests.post, args=(str(url),), ).json()
return (ret["requestid"], machine)
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
after="Noticed that machine was disabled (or waiting timed out)")
def waitfor_disable_slave(self, data, loanid):
requestid, machine = data
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("shutdown_buildslave")
url.args["requestid"] = requestid
ret = retry(requests.get, args=(str(url),), kwargs=dict()).json()
if ret["state"] in (0, 1):
# 0 = PENDING, 1 = RUNNING (3=Failed and 2=Success)
raise Exception("Continue waiting for disabled slave")
except Exception as exc:
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
after="Marked loan as ACTIVE")
def mark_loan_status(self, loanid, status):
try:
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
l.status = status
session.commit()
except Exception as exc:
self.retry(exc=exc)
@task()
def dummy_task(*args, **kwargs):
pass
bmo_file_gpo_bug = dummy_task
bmo_waitfor_bug = dummy_task
clean_secrets = dummy_task
update_loan_bug_with_details = dummy_task
email_loan_details = dummy_task
| mpl-2.0 |
VishvajitP/Django-facebook | docs/docs_env/Lib/encodings/mac_greek.py | 593 | 13977 | """ Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-greek',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xb9' # 0x81 -> SUPERSCRIPT ONE
u'\xb2' # 0x82 -> SUPERSCRIPT TWO
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xb3' # 0x84 -> SUPERSCRIPT THREE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0384' # 0x8B -> GREEK TONOS
u'\xa8' # 0x8C -> DIAERESIS
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xa3' # 0x92 -> POUND SIGN
u'\u2122' # 0x93 -> TRADE MARK SIGN
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u2022' # 0x96 -> BULLET
u'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
u'\u2030' # 0x98 -> PER MILLE SIGN
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xa6' # 0x9B -> BROKEN BAR
u'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
u'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
u'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
u'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
u'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
u'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\xa7' # 0xAC -> SECTION SIGN
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xb0' # 0xAE -> DEGREE SIGN
u'\xb7' # 0xAF -> MIDDLE DOT
u'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
u'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
u'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
u'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
u'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
u'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
u'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
u'\xac' # 0xC2 -> NOT SIGN
u'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
u'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
u'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
u'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2015' # 0xD1 -> HORIZONTAL BAR
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
u'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
u'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03be' # 0xEA -> GREEK SMALL LETTER XI
u'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xED -> GREEK SMALL LETTER MU
u'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
u'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
u'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
u'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
u'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
u'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
kaiweifan/neutron | neutron/plugins/nec/drivers/__init__.py | 9 | 1528 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DRIVER_PATH = "neutron.plugins.nec.drivers.%s"
DRIVER_LIST = {
'trema': DRIVER_PATH % "trema.TremaPortBaseDriver",
'trema_port': DRIVER_PATH % "trema.TremaPortBaseDriver",
'trema_portmac': DRIVER_PATH % "trema.TremaPortMACBaseDriver",
'trema_mac': DRIVER_PATH % "trema.TremaMACBaseDriver",
'pfc': DRIVER_PATH % "pfc.PFCV4Driver",
'pfc_v3': DRIVER_PATH % "pfc.PFCV3Driver",
'pfc_v4': DRIVER_PATH % "pfc.PFCV4Driver",
'pfc_v5': DRIVER_PATH % "pfc.PFCV5Driver",
}
def get_driver(driver_name):
LOG.info(_("Loading OFC driver: %s"), driver_name)
driver_klass = DRIVER_LIST.get(driver_name) or driver_name
return importutils.import_class(driver_klass)
| apache-2.0 |
zhjunlang/kbengine | kbe/src/lib/python/Lib/tkinter/test/runtktests.py | 71 | 2271 | """
Use this module to get and run all tk tests.
tkinter tests should live in a package inside the directory where this file
lives, like test_tkinter.
Extensions also should live in packages following the same rule as above.
"""
import os
import sys
import unittest
import importlib
import test.support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc', '__init.pyo'):
return True
return False
def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
"""This will import and yield modules whose names start with test_
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that
want their tests collected.
"""
py_ext = '.py'
for dirpath, dirnames, filenames in os.walk(basepath):
for dirname in list(dirnames):
if dirname[0] == '.':
dirnames.remove(dirname)
if is_package(dirpath) and filenames:
pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.')
if packages and pkg_name not in packages:
continue
filenames = filter(
lambda x: x.startswith('test_') and x.endswith(py_ext),
filenames)
for name in filenames:
try:
yield importlib.import_module(
".%s.%s" % (pkg_name, name[:-len(py_ext)]),
"tkinter.test")
except test.support.ResourceDenied:
if gui:
raise
def get_tests(text=True, gui=True, packages=None):
"""Yield all the tests in the modules found by get_tests_modules.
If nogui is True, only tests that do not require a GUI will be
returned."""
attrs = []
if text:
attrs.append('tests_nogui')
if gui:
attrs.append('tests_gui')
for module in get_tests_modules(gui=gui, packages=packages):
for attr in attrs:
for test in getattr(module, attr, ()):
yield test
if __name__ == "__main__":
test.support.run_unittest(*get_tests())
| lgpl-3.0 |
lepisma/gaze | src/helper.py | 1 | 3299 | """
Helper functions
"""
import numpy as np
def xgrad(gray_image):
"""
Returns the X gradient of grayscale image,
imitating MatLab's gradient function
Parameters
----------
gray_image : numpy.ndarray
Grayscale image
Returns
-------
numpy.ndarray
X gradient of image
"""
gray = np.array(gray_image, dtype = np.float32)
grad = np.column_stack(((gray[:, 1] - gray[:, 0]), \
(gray[:, 2 :] - gray[:, 0 : -2]) / 2, \
(gray[:, -1] - gray[:, -2])))
return grad
def ygrad(gray_image):
"""
Returns the Y gradient of grayscale image,
imitating MatLab's gradient function
Parameters
----------
gray_image : numpy.ndarray
Grayscale image
Returns
-------
numpy.ndarray
Y gradient of image
"""
grad = xgrad(gray_image.T).T
return grad
def test_possible_centers(pos_x, pos_y, weight, grad_x, grad_y, out_image):
"""
Calculates the dot product between
- Vector from all possible centers to gradient origin
- Gradient vector at the given point of gradient origin
Parameters
----------
pos_x, pos_y : int
Position of gradient origin
weight : float
Weight of gradient
grad_x, grad_y : int
Value of gradients at pos
out_image : numpy.ndarray
Accumulator matrix (of same size as image) to keep track of
cumulative sum of dot products
"""
rows, columns = out_image.shape
x_accu = np.tile(np.linspace(1, columns - 1, columns), [rows, 1])
y_accu = np.tile(np.linspace(1, rows - 1, rows), [columns, 1]).T
x_accu = pos_x - x_accu
y_accu = pos_y - y_accu
mag = np.sqrt((x_accu ** 2) + (y_accu ** 2))
# Normalize
x_accu /= mag
y_accu /= mag
x_accu[np.isnan(x_accu)] = 0
y_accu[np.isnan(y_accu)] = 0
# Dot product
prod = (x_accu * grad_x) + (y_accu * grad_y)
prod[prod < 0] = 0
out_image += prod * prod * weight
return
def find_center(grad_x, grad_y, out_image):
"""
Finds the center of eye from given grayscale image's gradients
Parameters
----------
grad_x : numpy.ndarray
Array of x gradients
grad_y : numpy.ndarray
Array of y gradients
Returns
-------
(x, y) : tuple
The pixel index of eye's center, relative to grad images
"""
rows, columns = grad_x.shape
#pos_list = coords(np.arange(rows), np.arange(columns))
x_pos = np.repeat(np.arange(rows), columns)
y_pos = np.tile(np.arange(columns), rows)
x_grad = grad_x.ravel(order = 'F')
y_grad = grad_y.ravel(order = 'F')
v_possible_centers = np.vectorize(test_possible_centers, excluded = ["out_image"])
v_possible_centers(x_pos, y_pos, 1.0, x_grad, y_grad, out_image = out_image)
return np.unravel_index(out_image.argmax(), out_image.shape)
#out_image /= np.max(out_image)
#out_image *= 255
#return out_image
def coords(*arrays):
"""
Returns cartesian coordinate combinations from given arrays
"""
grid = np.meshgrid(*arrays)
coord_list = [entry.ravel() for entry in grid]
points = np.vstack(coord_list).T
return points.tolist()
| mit |
vinhlh/bite-project | deps/gdata-python-client/src/gdata/exif/__init__.py | 253 | 6981 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.exif, implementing the exif namespace in gdata
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
These elements describe image data, using exif attributes[2].
Picasa Web Albums uses the exif namespace to represent Exif data encoded
in a photo [3].
Picasa Web Albums uses the following exif elements:
exif:distance
exif:exposure
exif:flash
exif:focallength
exif:fstop
exif:imageUniqueID
exif:iso
exif:make
exif:model
exif:tags
exif:time
[1]: http://schemas.google.com/photos/exif/2007.
[2]: http://en.wikipedia.org/wiki/Exif
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
"""
__author__ = u'[email protected]'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
class ExifBaseElement(atom.AtomBase):
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
""" % EXIF_NAMESPACE
_tag = ''
_namespace = EXIF_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Distance(ExifBaseElement):
"(float) The distance to the subject, e.g. 0.0"
_tag = 'distance'
def DistanceFromString(xml_string):
return atom.CreateClassFromXMLString(Distance, xml_string)
class Exposure(ExifBaseElement):
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
_tag = 'exposure'
def ExposureFromString(xml_string):
return atom.CreateClassFromXMLString(Exposure, xml_string)
class Flash(ExifBaseElement):
"""(string) Boolean value indicating whether the flash was used.
The .text attribute will either be `true' or `false'
As a convenience, this object's .bool method will return what you want,
so you can say:
flash_used = bool(Flash)
"""
_tag = 'flash'
def __bool__(self):
if self.text.lower() in ('true','false'):
return self.text.lower() == 'true'
def FlashFromString(xml_string):
return atom.CreateClassFromXMLString(Flash, xml_string)
class Focallength(ExifBaseElement):
"(float) The focal length used, e.g. 23.7"
_tag = 'focallength'
def FocallengthFromString(xml_string):
return atom.CreateClassFromXMLString(Focallength, xml_string)
class Fstop(ExifBaseElement):
"(float) The fstop value used, e.g. 5.0"
_tag = 'fstop'
def FstopFromString(xml_string):
return atom.CreateClassFromXMLString(Fstop, xml_string)
class ImageUniqueID(ExifBaseElement):
"(string) The unique image ID for the photo. Generated by Google Photo servers"
_tag = 'imageUniqueID'
def ImageUniqueIDFromString(xml_string):
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
class Iso(ExifBaseElement):
"(int) The iso equivalent value used, e.g. 200"
_tag = 'iso'
def IsoFromString(xml_string):
return atom.CreateClassFromXMLString(Iso, xml_string)
class Make(ExifBaseElement):
"(string) The make of the camera used, e.g. Fictitious Camera Company"
_tag = 'make'
def MakeFromString(xml_string):
return atom.CreateClassFromXMLString(Make, xml_string)
class Model(ExifBaseElement):
"(string) The model of the camera used,e.g AMAZING-100D"
_tag = 'model'
def ModelFromString(xml_string):
return atom.CreateClassFromXMLString(Model, xml_string)
class Time(ExifBaseElement):
"""(int) The date/time the photo was taken, e.g. 1180294337000.
Represented as the number of milliseconds since January 1st, 1970.
The value of this element will always be identical to the value
of the <gphoto:timestamp>.
Look at this object's .isoformat() for a human friendly datetime string:
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'time'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimeFromString(xml_string):
return atom.CreateClassFromXMLString(Time, xml_string)
class Tags(ExifBaseElement):
"""The container for all exif elements.
The <exif:tags> element can appear as a child of a photo entry.
"""
_tag = 'tags'
_children = atom.AtomBase._children.copy()
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
ExifBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.fstop=None
self.make=None
self.model=None
self.distance=None
self.exposure=None
self.flash=None
self.focallength=None
self.iso=None
self.time=None
self.imageUniqueID=None
def TagsFromString(xml_string):
return atom.CreateClassFromXMLString(Tags, xml_string)
| apache-2.0 |
SteveXiSong/UW-Madison-ECE757-S15-MulticastSnooping | src/cpu/kvm/X86KvmCPU.py | 54 | 2012 | # Copyright (c) 2013 Andreas Sandberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from BaseKvmCPU import BaseKvmCPU
class X86KvmCPU(BaseKvmCPU):
type = 'X86KvmCPU'
cxx_header = "cpu/kvm/x86_cpu.hh"
@classmethod
def export_methods(cls, code):
code('''
void dumpFpuRegs();
void dumpIntRegs();
void dumpSpecRegs();
void dumpXCRs();
void dumpXSave();
void dumpVCpuEvents();
''')
useXSave = Param.Bool(True, "Use XSave to synchronize FPU/SIMD registers")
| bsd-3-clause |
liberorbis/libernext | apps/frappe/frappe/widgets/form/meta.py | 25 | 5903 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
from __future__ import unicode_literals
import frappe, os
from frappe.model.meta import Meta
from frappe.modules import scrub, get_module_path, load_doctype_module
from frappe.model.workflow import get_workflow_name
from frappe.utils import get_html_format
from frappe.translate import make_dict_from_messages, extract_messages_from_code
from frappe.utils.jinja import render_include
######
def get_meta(doctype, cached=True):
if cached:
meta = frappe.cache().get_value("form_meta:" + doctype, lambda: FormMeta(doctype))
else:
meta = FormMeta(doctype)
if frappe.local.lang != 'en':
meta.set_translations(frappe.local.lang)
return meta
class FormMeta(Meta):
def __init__(self, doctype):
super(FormMeta, self).__init__(doctype)
self.load_assets()
def load_assets(self):
self.add_search_fields()
if not self.istable:
self.add_linked_with()
self.add_code()
self.load_print_formats()
self.load_workflows()
self.load_templates()
def as_dict(self, no_nulls=False):
d = super(FormMeta, self).as_dict(no_nulls=no_nulls)
for k in ("__js", "__css", "__list_js", "__calendar_js", "__map_js",
"__linked_with", "__messages", "__print_formats", "__workflow_docs",
"__form_grid_templates", "__listview_template"):
d[k] = self.get(k)
for i, df in enumerate(d.get("fields")):
for k in ("link_doctype", "search_fields"):
df[k] = self.get("fields")[i].get(k)
return d
def add_code(self):
path = os.path.join(get_module_path(self.module), 'doctype', scrub(self.name))
def _get_path(fname):
return os.path.join(path, scrub(fname))
self._add_code(_get_path(self.name + '.js'), '__js')
self._add_code(_get_path(self.name + '.css'), "__css")
self._add_code(_get_path(self.name + '_list.js'), '__list_js')
self._add_code(_get_path(self.name + '_calendar.js'), '__calendar_js')
listview_template = _get_path(self.name + '_list.html')
if os.path.exists(listview_template):
self.set("__listview_template", get_html_format(listview_template))
self.add_code_via_hook("doctype_js", "__js")
self.add_custom_script()
def _add_code(self, path, fieldname):
js = frappe.read_file(path)
if js:
self.set(fieldname, (self.get(fieldname) or "") + "\n\n" + render_include(js))
def add_code_via_hook(self, hook, fieldname):
for app_name in frappe.get_installed_apps():
code_hook = frappe.get_hooks(hook, default={}, app_name=app_name)
if not code_hook:
continue
files = code_hook.get(self.name, [])
if not isinstance(files, list):
files = [files]
for file in files:
path = frappe.get_app_path(app_name, *file.strip("/").split("/"))
self._add_code(path, fieldname)
def add_custom_script(self):
"""embed all require files"""
# custom script
custom = frappe.db.get_value("Custom Script", {"dt": self.name,
"script_type": "Client"}, "script") or ""
self.set("__js", (self.get('__js') or '') + "\n\n" + custom)
def add_search_fields(self):
"""add search fields found in the doctypes indicated by link fields' options"""
for df in self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]}):
if df.options:
search_fields = frappe.get_meta(df.options).search_fields
if search_fields:
df.search_fields = map(lambda sf: sf.strip(), search_fields.split(","))
def add_linked_with(self):
"""add list of doctypes this doctype is 'linked' with"""
links = frappe.db.sql("""select parent, fieldname from tabDocField
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links += frappe.db.sql("""select dt as parent, fieldname from `tabCustom Field`
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links = dict(links)
if not links:
return {}
ret = {}
for dt in links:
ret[dt] = { "fieldname": links[dt] }
for grand_parent, options in frappe.db.sql("""select parent, options from tabDocField
where fieldtype="Table"
and options in (select name from tabDocType
where istable=1 and name in (%s))""" % ", ".join(["%s"] * len(links)) ,tuple(links)):
ret[grand_parent] = {"child_doctype": options, "fieldname": links[options] }
if options in ret:
del ret[options]
self.set("__linked_with", ret)
def load_print_formats(self):
print_formats = frappe.db.sql("""select * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2 and ifnull(disabled, 0)=0""", (self.name,), as_dict=1,
update={"doctype":"Print Format"})
self.set("__print_formats", print_formats)
def load_workflows(self):
# get active workflow
workflow_name = get_workflow_name(self.name)
workflow_docs = []
if workflow_name and frappe.db.exists("Workflow", workflow_name):
workflow = frappe.get_doc("Workflow", workflow_name)
workflow_docs.append(workflow)
for d in workflow.get("workflow_document_states"):
workflow_docs.append(frappe.get_doc("Workflow State", d.state))
self.set("__workflow_docs", workflow_docs)
def load_templates(self):
module = load_doctype_module(self.name)
app = module.__name__.split(".")[0]
templates = {}
if hasattr(module, "form_grid_templates"):
for key, path in module.form_grid_templates.iteritems():
templates[key] = get_html_format(frappe.get_app_path(app, path))
self.set("__form_grid_templates", templates)
def set_translations(self, lang):
self.set("__messages", frappe.get_lang_dict("doctype", self.name))
# set translations for grid templates
if self.get("__form_grid_templates"):
for content in self.get("__form_grid_templates").values():
messages = extract_messages_from_code(content)
messages = make_dict_from_messages(messages)
self.get("__messages").update(messages)
| gpl-2.0 |
ashemedai/ansible | lib/ansible/modules/packaging/os/pkg5.py | 42 | 5050 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5:
name: editor/vim
# Remove finger daemon:
- pkg5:
name: service/network/finger
state: absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
)
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: not is_latest(module, p),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
douglaswei/stock | fxcm/prepare/parse.py | 1 | 2352 | # coding=utf-8
import struct
import time
import os, sys
def parse_hst_csv(src_path, des_path, time_from=None, time_to=None):
content = open(src_path, 'rb').read()
# 读取头文件结构信息
# 基础版本
print "basic verison: %i" % struct.unpack("i", content[0:4])[0]
# 版本信息
print "versoin: %s" % "".join(struct.unpack("64c", content[4:68]))
# 货币对名称
print "detail: %s" % "".join(struct.unpack("12c", content[68:80]))
# 周期 (单位分钟)
print "period: %i min" % struct.unpack("i", content[80:84])[0]
# 小数点位数
# print struct.unpack("i", content[84:88])
# 基准报时
# print struct.unpack("i", content[88:92])
# 同步时间
# print struct.unpack("i", content[92:96])
# 将来应用
# print struct.unpack("13i", content[96:148])
# 循环结构
content_len = len(content)
time_f = None if time_from is None else time.strptime(time_from, "%Y-%m-%d %H:%M")
time_t = None if time_to is None else time.strptime(time_to, "%Y-%m-%d %H:%M")
with open(des_path, "w") as des_file:
des_file.write("time,open,high,low,close,vol\n")
for tip in range(148, content_len, 60):
time_d = time.gmtime(struct.unpack("i", content[tip:tip + 4])[0])
# time_raw = time.strptime(time_d, "%a %b %d %H:%M:%S %Y")
if time_f is not None and time_f >= time_d:
continue
if time_to is not None and time_t < time_d:
continue
beg = struct.unpack("d", content[tip + 8:tip + 16])[0]
high = struct.unpack("d", content[tip + 16:tip + 24])[0]
low = struct.unpack("d", content[tip + 24:tip + 32])[0]
close = struct.unpack("d", content[tip + 32:tip + 40])[0]
vol = struct.unpack("i", content[tip + 40:tip + 44])[0]
des_file.write("%s,%f,%f,%f,%f,%d\n" % (time.strftime("%Y-%m-%d %H:%M:%S", time_d), beg, high, low, close, vol))
# print time.strftime("%Y-%m-%d-%H:%M", time_r), beg, high, low, close, vol
def process_data_dir(src_dir, des_dir, time_from=None, time_to=None):
for filename in os.listdir(src_dir):
if os.path.isfile(os.path.join(src_dir, filename)) and filename.endswith(".hst"):
src_file_path = os.path.join(src_dir, filename)
des_file_path = os.path.join(des_dir, filename.replace('.hst', '.csv'))
parse_hst_csv(src_file_path, des_file_path, time_from, time_to)
| gpl-2.0 |
elit3ge/SickRage | lib/github/AuthenticatedUser.py | 70 | 46818 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.Gist
import github.Repository
import github.NamedUser
import github.Plan
import github.Organization
import github.UserKey
import github.Issue
import github.Event
import github.Authorization
import github.Notification
class AuthenticatedUser(github.GithubObject.CompletableGithubObject):
"""
This class represents AuthenticatedUsers as returned for example by http://developer.github.com/v3/todo
"""
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def bio(self):
"""
:type: string
"""
self._completeIfNotSet(self._bio)
return self._bio.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def followers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._followers_url)
return self._followers_url.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def following_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._following_url)
return self._following_url.value
@property
def gists_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._gists_url)
return self._gists_url.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def hireable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._hireable)
return self._hireable.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def organizations_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._organizations_url)
return self._organizations_url.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def received_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._received_events_url)
return self._received_events_url.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def site_admin(self):
"""
:type: bool
"""
self._completeIfNotSet(self._site_admin)
return self._site_admin.value
@property
def starred_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._starred_url)
return self._starred_url.value
@property
def subscriptions_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscriptions_url)
return self._subscriptions_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_emails(self, *emails):
"""
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/emails",
input=post_parameters
)
def add_to_following(self, following):
"""
:calls: `PUT /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/following/" + following._identity
)
def add_to_starred(self, starred):
"""
:calls: `PUT /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/starred/" + starred._identity
)
def add_to_subscriptions(self, subscription):
"""
:calls: `PUT /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/subscriptions/" + subscription._identity
)
def add_to_watched(self, watched):
"""
:calls: `PUT /user/watched/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/watched/" + watched._identity
)
def create_authorization(self, scopes=github.GithubObject.NotSet, note=github.GithubObject.NotSet, note_url=github.GithubObject.NotSet, client_id=github.GithubObject.NotSet, client_secret=github.GithubObject.NotSet, onetime_password=None):
"""
:calls: `POST /authorizations <http://developer.github.com/v3/oauth>`_
:param scopes: list of string
:param note: string
:param note_url: string
:param client_id: string
:param client_secret: string
:param onetime_password: string
:rtype: :class:`github.Authorization.Authorization`
"""
assert scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in scopes), scopes
assert note is github.GithubObject.NotSet or isinstance(note, (str, unicode)), note
assert note_url is github.GithubObject.NotSet or isinstance(note_url, (str, unicode)), note_url
assert client_id is github.GithubObject.NotSet or isinstance(client_id, (str, unicode)), client_id
assert client_secret is github.GithubObject.NotSet or isinstance(client_secret, (str, unicode)), client_secret
assert onetime_password is None or isinstance(onetime_password, (str, unicode)), onetime_password
post_parameters = dict()
if scopes is not github.GithubObject.NotSet:
post_parameters["scopes"] = scopes
if note is not github.GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not github.GithubObject.NotSet:
post_parameters["note_url"] = note_url
if client_id is not github.GithubObject.NotSet:
post_parameters["client_id"] = client_id
if client_secret is not github.GithubObject.NotSet:
post_parameters["client_secret"] = client_secret
if onetime_password is not None:
request_header = {'X-GitHub-OTP': onetime_password} # pragma no cover (Should be covered)
else:
request_header = None
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/authorizations",
input=post_parameters,
headers=request_header,
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks"
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_gist(self, public, files, description=github.GithubObject.NotSet):
"""
:calls: `POST /gists <http://developer.github.com/v3/gists>`_
:param public: bool
:param files: dict of string to :class:`github.InputFileContent.InputFileContent`
:param description: string
:rtype: :class:`github.Gist.Gist`
"""
assert isinstance(public, bool), public
assert all(isinstance(element, github.InputFileContent) for element in files.itervalues()), files
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
post_parameters = {
"public": public,
"files": dict((key, value._identity) for key, value in files.iteritems()),
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/gists",
input=post_parameters
)
return github.Gist.Gist(self._requester, headers, data, completed=True)
def create_key(self, title, key):
"""
:calls: `POST /user/keys <http://developer.github.com/v3/users/keys>`_
:param title: string
:param key: string
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(title, (str, unicode)), title
assert isinstance(key, (str, unicode)), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/keys",
input=post_parameters
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, gitignore_template=github.GithubObject.NotSet):
"""
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param auto_init: bool
:param gitignore_template: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def edit(self, name=github.GithubObject.NotSet, email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, location=github.GithubObject.NotSet, hireable=github.GithubObject.NotSet, bio=github.GithubObject.NotSet):
"""
:calls: `PATCH /user <http://developer.github.com/v3/users>`_
:param name: string
:param email: string
:param blog: string
:param company: string
:param location: string
:param hireable: bool
:param bio: string
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, (str, unicode)), name
assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert blog is github.GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is github.GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert location is github.GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert hireable is github.GithubObject.NotSet or isinstance(hireable, bool), hireable
assert bio is github.GithubObject.NotSet or isinstance(bio, (str, unicode)), bio
post_parameters = dict()
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if hireable is not github.GithubObject.NotSet:
post_parameters["hireable"] = hireable
if bio is not github.GithubObject.NotSet:
post_parameters["bio"] = bio
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
"/user",
input=post_parameters
)
self._useAttributes(data)
def get_authorization(self, id):
"""
:calls: `GET /authorizations/:id <http://developer.github.com/v3/oauth>`_
:param id: integer
:rtype: :class:`github.Authorization.Authorization`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/authorizations/" + str(id)
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def get_authorizations(self):
"""
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
"""
return github.PaginatedList.PaginatedList(
github.Authorization.Authorization,
self._requester,
"/authorizations",
None
)
def get_emails(self):
"""
:calls: `GET /user/emails <http://developer.github.com/v3/users/emails>`_
:rtype: list of string
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/emails"
)
return data
def get_events(self):
"""
:calls: `GET /events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/events",
None
)
def get_followers(self):
"""
:calls: `GET /user/followers <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/followers",
None
)
def get_following(self):
"""
:calls: `GET /user/following <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/following",
None
)
def get_gists(self):
"""
:calls: `GET /gists <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists",
None
)
def get_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, (str, unicode)), filter
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_user_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /user/issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, (str, unicode)), filter
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_key(self, id):
"""
:calls: `GET /user/keys/:id <http://developer.github.com/v3/users/keys>`_
:param id: integer
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/keys/" + str(id)
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def get_keys(self):
"""
:calls: `GET /user/keys <http://developer.github.com/v3/users/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.UserKey.UserKey`
"""
return github.PaginatedList.PaginatedList(
github.UserKey.UserKey,
self._requester,
"/user/keys",
None
)
def get_notification(self, id):
"""
:calls: `GET /notifications/threads/:id <http://developer.github.com/v3/activity/notifications>`_
:rtype: :class:`github.Notification.Notification`
"""
assert isinstance(id, (str, unicode)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/notifications/threads/" + id
)
return github.Notification.Notification(self._requester, headers, data, completed=True)
def get_notifications(self, all=github.GithubObject.NotSet, participating=github.GithubObject.NotSet):
"""
:calls: `GET /notifications <http://developer.github.com/v3/activity/notifications>`_
:param all: bool
:param participating: bool
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Notification.Notification`
"""
assert all is github.GithubObject.NotSet or isinstance(all, bool), all
assert participating is github.GithubObject.NotSet or isinstance(participating, bool), participating
params = dict()
if all is not github.GithubObject.NotSet:
params["all"] = all
if participating is not github.GithubObject.NotSet:
params["participating"] = participating
# TODO: implement parameter "since"
return github.PaginatedList.PaginatedList(
github.Notification.Notification,
self._requester,
"/notifications",
params
)
def get_organization_events(self, org):
"""
:calls: `GET /users/:user/events/orgs/:org <http://developer.github.com/v3/activity/events>`_
:param org: :class:`github.Organization.Organization`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
assert isinstance(org, github.Organization.Organization), org
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/users/" + self.login + "/events/orgs/" + org.login,
None
)
def get_orgs(self):
"""
:calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
"/user/orgs",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, type=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
"""
:calls: `GET /user/repos <http://developer.github.com/v3/repos>`_
:param type: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
url_parameters = dict()
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/repos",
url_parameters
)
def get_starred(self):
"""
:calls: `GET /user/starred <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/starred",
None
)
def get_starred_gists(self):
"""
:calls: `GET /gists/starred <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists/starred",
None
)
def get_subscriptions(self):
"""
:calls: `GET /user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/subscriptions",
None
)
def get_teams(self):
"""
:calls: `GET /user/teams <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
"/user/teams",
None
)
def get_watched(self):
"""
:calls: `GET /user/watched <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/watched",
None
)
def has_in_following(self, following):
"""
:calls: `GET /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(following, github.NamedUser.NamedUser), following
status, headers, data = self._requester.requestJson(
"GET",
"/user/following/" + following._identity
)
return status == 204
def has_in_starred(self, starred):
"""
:calls: `GET /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(starred, github.Repository.Repository), starred
status, headers, data = self._requester.requestJson(
"GET",
"/user/starred/" + starred._identity
)
return status == 204
def has_in_subscriptions(self, subscription):
"""
:calls: `GET /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(subscription, github.Repository.Repository), subscription
status, headers, data = self._requester.requestJson(
"GET",
"/user/subscriptions/" + subscription._identity
)
return status == 204
def has_in_watched(self, watched):
"""
:calls: `GET /user/watched/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param watched: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(watched, github.Repository.Repository), watched
status, headers, data = self._requester.requestJson(
"GET",
"/user/watched/" + watched._identity
)
return status == 204
def remove_from_emails(self, *emails):
"""
:calls: `DELETE /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/emails",
input=post_parameters
)
def remove_from_following(self, following):
"""
:calls: `DELETE /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/following/" + following._identity
)
def remove_from_starred(self, starred):
"""
:calls: `DELETE /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/starred/" + starred._identity
)
def remove_from_subscriptions(self, subscription):
"""
:calls: `DELETE /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/subscriptions/" + subscription._identity
)
def remove_from_watched(self, watched):
"""
:calls: `DELETE /user/watched/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/watched/" + watched._identity
)
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._site_admin = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(attributes["organizations_url"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(attributes["received_events_url"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "site_admin" in attributes: # pragma no branch
self._site_admin = self._makeBoolAttribute(attributes["site_admin"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(attributes["subscriptions_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
fritsvanveen/QGIS | python/plugins/processing/algs/gdal/contour.py | 5 | 3823 | # -*- coding: utf-8 -*-
"""
***************************************************************************
contour.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class contour(GdalAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
OUTPUT_VECTOR = 'OUTPUT_VECTOR'
INTERVAL = 'INTERVAL'
FIELD_NAME = 'FIELD_NAME'
EXTRA = 'EXTRA'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'contour.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Contour')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Extraction')
self.addParameter(ParameterRaster(self.INPUT_RASTER,
self.tr('Input layer'), False))
self.addParameter(ParameterNumber(self.INTERVAL,
self.tr('Interval between contour lines'), 0.0,
99999999.999999, 10.0))
self.addParameter(ParameterString(self.FIELD_NAME,
self.tr('Attribute name (if not set, no elevation attribute is attached)'),
'ELEV', optional=True))
self.addParameter(ParameterString(self.EXTRA,
self.tr('Additional creation parameters'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_VECTOR,
self.tr('Contours')))
def getConsoleCommands(self):
output = self.getOutputValue(self.OUTPUT_VECTOR)
interval = str(self.getParameterValue(self.INTERVAL))
fieldName = str(self.getParameterValue(self.FIELD_NAME))
extra = self.getParameterValue(self.EXTRA)
if extra is not None:
extra = str(extra)
arguments = []
if len(fieldName) > 0:
arguments.append('-a')
arguments.append(fieldName)
arguments.append('-i')
arguments.append(interval)
driver = GdalUtils.getVectorDriverFromFileName(output)
arguments.append('-f')
arguments.append(driver)
if extra and len(extra) > 0:
arguments.append(extra)
arguments.append(self.getParameterValue(self.INPUT_RASTER))
arguments.append(output)
return ['gdal_contour', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
jofomah/rules | testpy/waltzdb.py | 1 | 50347 | from durable.lang import *
import math
import datetime
import json
_fact_count = 0
def create_and_post(host, fact):
global _fact_count
fact['id'] = _fact_count
fact['sid'] = 1
host.post('waltzdb', fact)
_fact_count += 1
def create_and_assert(host, fact):
global _fact_count
fact['id'] = _fact_count
fact['sid'] = 1
host.assert_fact('waltzdb', fact)
_fact_count += 1
def get_x(val):
return math.floor(val / 100)
def get_y(val):
return val % 100
def get_angle(p1, p2):
delta_x = get_x(p2) - get_x(p1)
delta_y = get_y(p2) - get_y(p1)
if delta_x == 0:
if delta_y > 0:
return math.pi / 2
elif delta_y < 0:
return -math.pi / 2
elif delta_y == 0:
if delta_x > 0:
return 0
elif delta_x < 0:
return math.pi
else:
return math.atan2(delta_y, delta_x)
def get_inscribable_angle(base_point, p1, p2):
angle1 = get_angle(base_point, p1)
angle2 = get_angle(base_point, p2)
temp = math.fabs(angle1 - angle2)
if temp > math.pi:
return math.fabs(2 * math.pi - temp)
return temp
def make_3j_junction(j, base_point, p1, p2, p3):
angle12 = get_inscribable_angle(base_point, p1, p2)
angle13 = get_inscribable_angle(base_point, p1, p3)
angle23 = get_inscribable_angle(base_point, p2, p3)
sum1213 = angle12 + angle13
sum1223 = angle12 + angle23
sum1323 = angle13 + angle23
total = 0
if sum1213 < sum1223:
if sum1213 < sum1323:
total = sum1213
j['p2'] = p1; j['p1'] = p2; j['p3'] = p3
else:
total = sum1323
j['p2'] = p3; j['p1'] = p1; j['p3'] = p2
else:
if sum1223 < sum1323:
total = sum1223
j['p2'] = p2; j['p1'] = p1; j['p3'] = p3
else:
total = sum1323
j['p2'] = p3; j['p1'] = p1; j['p3'] = p2
if math.fabs(total - math.pi) < 0.001:
j['name'] = 'tee'
elif total > math.pi:
j['name'] = 'fork'
else:
j['name'] = 'arrow'
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000.0
with statechart('waltzdb'):
with state('start'):
@to('duplicate')
def starting(c):
c.s.gid = 1000
c.s.start_time = unix_time_millis(datetime.datetime.now())
with state('duplicate'):
@to('duplicate')
@when_all(cap(1000),
c.line << m.t == 'line')
def reverse_edges(c):
for frame in c.m:
print('Edge {0} {1}'.format(frame.line.p1, frame.line.p2))
print('Edge {0} {1}'.format(frame.line.p2, frame.line.p1))
c.post({'id': c.s.gid, 't': 'edge', 'p1': frame.line.p1, 'p2': frame.line.p2, 'joined': False})
c.post({'id': c.s.gid + 1, 't': 'edge', 'p1': frame.line.p2, 'p2': frame.line.p1, 'joined': False})
c.s.gid += 2
@to('detect_junctions')
@when_all(pri(1))
def done_reversing(c):
print('detect_junctions')
with state('detect_junctions'):
@to('detect_junctions')
@when_all(cap(1000),
c.e1 << (m.t == 'edge') & (m.joined == False),
c.e2 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2),
c.e3 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2) & (m.p2 != c.e2.p2))
def make_3_junction(c):
for frame in c.m:
j = {'id': c.s.gid, 't': 'junction', 'base_point': frame.e1.p1, 'j_t': '3j', 'visited': 'no'}
make_3j_junction(j, frame.e1.p1, frame.e1.p2, frame.e2.p2, frame.e3.p2)
print('Junction {0} {1} {2} {3} {4}'.format(j['name'], j['base_point'], j['p1'], j['p2'], j['p3']))
c.assert_fact(j)
frame.e1.id = c.s.gid + 1; frame.e1.joined = True; frame.e1.j_t = '3j'; c.assert_fact(frame.e1)
frame.e2.id = c.s.gid + 2; frame.e2.joined = True; frame.e2.j_t = '3j'; c.assert_fact(frame.e2)
frame.e3.id = c.s.gid + 3; frame.e3.joined = True; frame.e3.j_t = '3j'; c.assert_fact(frame.e3)
c.s.gid += 4
@to('detect_junctions')
@when_all(cap(1000),
c.e1 << (m.t == 'edge') & (m.joined == False),
c.e2 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2),
none((m.t == 'edge') & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2) & (m.p2 != c.e2.p2)))
def make_l(c):
for frame in c.m:
j = {'id': c.s.gid, 't': 'junction', 'base_point': frame.e1.p1, 'j_t': '2j', 'visited': 'no', 'name': 'L', 'p1': frame.e1.p2, 'p2': frame.e2.p2}
print('Junction L {0} {1} {2}'.format(frame.e1.p1, frame.e1.p2, frame.e2.p2))
c.assert_fact(j)
frame.e1.id = c.s.gid + 1; frame.e1.joined = True; frame.e1.j_t = '2j'; c.assert_fact(frame.e1)
frame.e2.id = c.s.gid + 2; frame.e2.joined = True; frame.e2.j_t = '2j'; c.assert_fact(frame.e2)
c.s.gid += 3
@to('find_initial_boundary')
@when_all(pri(1))
def done_detecting(c):
print('find_initial_boundary')
with state('find_initial_boundary'):
@to('find_second_boundary')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
none((m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no') & (m.base_point > c.j.base_point)))
def initial_boundary_junction_l(c):
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '1'})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': 'B', 'lid': '1'})
c.s.gid += 2
print('find_second_boundary')
@to('find_second_boundary')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.name == 'arrow') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
c.e3 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3),
none((m.t == 'junction') & (m.j_t == '3j') & (m.visited == 'no') & (m.base_point > c.j.base_point)))
def initial_boundary_junction_arrow(c):
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': '+', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': 'B', 'lid': '14'})
c.s.gid += 3
print('find_second_boundary')
with state('find_second_boundary'):
@to('labeling')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
none((m.t == 'junction') & (m.visited != 'no') & (m.base_point < c.j.base_point)))
def second_boundary_junction_l(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '1'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': 'B', 'lid': '1'})
c.s.gid += 3
print('labeling')
@to('labeling')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.name == 'arrow') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
c.e3 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3),
none((m.t == 'junction') & (m.visited != 'no') & (m.base_point < c.j.base_point)))
def second_boundary_junction_arrow(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': '+', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 3, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': 'B', 'lid': '14'})
c.s.gid += 4
print('labeling')
with state('labeling'):
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.visited == 'no'))
def start_visit_3_junction(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'now'; c.assert_fact(c.j)
c.s.gid += 1
print('visiting_3j')
@to('visiting_2j')
@when_all(pri(1),
c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'))
def start_visit_2_junction(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'now'; c.assert_fact(c.j)
c.s.gid += 1
print('visiting_2j')
@to('end')
@when_all(pri(2))
def done_labeling(c):
print('end {0}'.format(unix_time_millis(datetime.datetime.now()) - c.s.start_time))
with state('visiting_3j'):
def visit_3j(c):
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p1, c.l.n1, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p2, c.l.n2, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p3, c.l.n3, c.l.lid))
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': c.l.n1, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': c.l.n2, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': c.l.n3, 'lid': c.l.lid})
c.s.gid += 3
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_0(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_1(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_2(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_3(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_4(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_5(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_6(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_7(c):
visit_3j(c)
@to('marking')
@when_all(pri(1), (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'))
def end_visit(c):
print('marking')
with state('visiting_2j'):
def visit_2j(c):
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p1, c.l.n1, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p2, c.l.n2, c.l.lid))
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': c.l.n1, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': c.l.n2, 'lid': c.l.lid})
c.s.gid += 2
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_0(c):
visit_2j(c)
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_1(c):
visit_2j(c)
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_2(c):
visit_2j(c)
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_3(c):
visit_2j(c)
@to('marking')
@when_all(pri(1), (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'))
def end_visit(c):
print('marking')
with state('marking'):
@to('marking')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now'),
c.e << (m.t == 'edge') & (m.p2 == c.j.base_point),
c.junction << (m.t == 'junction') & (m.base_point == c.e.p1) & (m.visited == 'yes'))
def marking(c):
c.retract_fact(c.junction); c.junction.id = c.s.gid; c.junction.visited = 'check'; c.assert_fact(c.junction)
c.s.gid += 1
@to('marking')
@when_all(pri(1), c.j << (m.t == 'junction') & (m.visited == 'now'))
def stop_marking(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.s.gid += 1
@to('checking')
@when_all(pri(2))
def start_checking(c):
print('checking')
with state('checking'):
@to('remove_label')
@when_all(c.junction << (m.t == 'junction') & (m.visited == 'check'),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.junction.base_point),
c.j << (m.t == 'junction') & (m.base_point == c.el1.p2) & (m.visited == 'yes'),
none((m.t == 'edge_label') & (m.p1 == c.el1.p2) & (m.p2 == c.junction.base_point) & (m.label_name == c.el1.label_name)))
def checking(c):
print('remove_label')
c.assert_fact({'id': c.s.gid, 't': 'illegal', 'base_point': c.junction.base_point, 'lid': c.el1.lid})
c.s.gid += 1
@to('checking')
@when_all(pri(1), c.j << (m.t == 'junction') & (m.visited == 'check'))
def checking2(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.s.gid += 1
@to('labeling')
@when_all(pri(2))
def stop_checking(c):
print('labeling')
with state('remove_label'):
@to('checking')
@when_all(c.i << (m.t == 'illegal'),
c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.base_point == c.i.base_point),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1) & (m.lid == c.i.lid),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2) & (m.lid == c.i.lid),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3) & (m.lid == c.i.lid))
def remove_label_3j(c):
print('checking')
c.retract_fact(c.i)
c.retract_fact(c.el1)
c.retract_fact(c.el2)
c.retract_fact(c.el3)
@to('checking')
@when_all(c.i << (m.t == 'illegal'),
c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.base_point == c.i.base_point),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1) & (m.lid == c.i.lid),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2) & (m.lid == c.i.lid))
def remove_edge_2j(c):
print('checking')
c.retract_fact(c.i)
c.retract_fact(c.el1)
c.retract_fact(c.el2)
state('end')
@when_start
def start(host):
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'1' ,'n1':'B' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'2' ,'n1':'+' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'3' ,'n1':'B' ,'n2':'+'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'4' ,'n1':'-' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'5' ,'n1':'B' ,'n2':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'6' ,'n1':'+' ,'n2':'+' ,'n3':'+'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'7' ,'n1':'-' ,'n2':'-' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'8' ,'n1':'B' ,'n2':'-' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'9' ,'n1':'-' ,'n2':'B' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'10' ,'n1':'B' ,'n2':'B' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'11' ,'n1':'B' ,'n2':'+' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'12' ,'n1':'B' ,'n2':'-' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'13' ,'n1':'B' ,'n2':'B' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'14' ,'n1':'B' ,'n2':'+' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'15' ,'n1':'-' ,'n2':'+' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'16' ,'n1':'+' ,'n2':'-' ,'n3':'+'})
create_and_post(host, {'t':'line' ,'p1':50003 ,'p2':60003})
create_and_post(host, {'t':'line' ,'p1':30005 ,'p2':30006})
create_and_post(host, {'t':'line' ,'p1':80005 ,'p2':80006})
create_and_post(host, {'t':'line' ,'p1':50008 ,'p2':60008})
create_and_post(host, {'t':'line' ,'p1':0 ,'p2':20000})
create_and_post(host, {'t':'line' ,'p1':20000 ,'p2':30000})
create_and_post(host, {'t':'line' ,'p1':30000 ,'p2':40000})
create_and_post(host, {'t':'line' ,'p1':0 ,'p2':2})
create_and_post(host, {'t':'line' ,'p1':2 ,'p2':3})
create_and_post(host, {'t':'line' ,'p1':3 ,'p2':4})
create_and_post(host, {'t':'line' ,'p1':4 ,'p2':40004})
create_and_post(host, {'t':'line' ,'p1':40004 ,'p2':40000})
create_and_post(host, {'t':'line' ,'p1':40000 ,'p2':50001})
create_and_post(host, {'t':'line' ,'p1':50001 ,'p2':50002})
create_and_post(host, {'t':'line' ,'p1':50002 ,'p2':50003})
create_and_post(host, {'t':'line' ,'p1':50003 ,'p2':50005})
create_and_post(host, {'t':'line' ,'p1':50005 ,'p2':40004})
create_and_post(host, {'t':'line' ,'p1':50005 ,'p2':30005})
create_and_post(host, {'t':'line' ,'p1':30005 ,'p2':20005})
create_and_post(host, {'t':'line' ,'p1':20005 ,'p2':10005})
create_and_post(host, {'t':'line' ,'p1':10005 ,'p2':4})
create_and_post(host, {'t':'line' ,'p1':60000 ,'p2':80000})
create_and_post(host, {'t':'line' ,'p1':80000 ,'p2':90000})
create_and_post(host, {'t':'line' ,'p1':90000 ,'p2':100000})
create_and_post(host, {'t':'line' ,'p1':60000 ,'p2':60002})
create_and_post(host, {'t':'line' ,'p1':60002 ,'p2':60003})
create_and_post(host, {'t':'line' ,'p1':60003 ,'p2':60004})
create_and_post(host, {'t':'line' ,'p1':60004 ,'p2':100004})
create_and_post(host, {'t':'line' ,'p1':100004 ,'p2':100000})
create_and_post(host, {'t':'line' ,'p1':100000 ,'p2':110001})
create_and_post(host, {'t':'line' ,'p1':110001 ,'p2':110002})
create_and_post(host, {'t':'line' ,'p1':110002 ,'p2':110003})
create_and_post(host, {'t':'line' ,'p1':110003 ,'p2':110005})
create_and_post(host, {'t':'line' ,'p1':110005 ,'p2':100004})
create_and_post(host, {'t':'line' ,'p1':110005 ,'p2':90005})
create_and_post(host, {'t':'line' ,'p1':90005 ,'p2':80005})
create_and_post(host, {'t':'line' ,'p1':80005 ,'p2':70005})
create_and_post(host, {'t':'line' ,'p1':70005 ,'p2':60004})
create_and_post(host, {'t':'line' ,'p1':6 ,'p2':20006})
create_and_post(host, {'t':'line' ,'p1':20006 ,'p2':30006})
create_and_post(host, {'t':'line' ,'p1':30006 ,'p2':40006})
create_and_post(host, {'t':'line' ,'p1':6 ,'p2':8})
create_and_post(host, {'t':'line' ,'p1':8 ,'p2':9})
create_and_post(host, {'t':'line' ,'p1':9 ,'p2':10})
create_and_post(host, {'t':'line' ,'p1':10 ,'p2':40010})
create_and_post(host, {'t':'line' ,'p1':40010 ,'p2':40006})
create_and_post(host, {'t':'line' ,'p1':40006 ,'p2':50007})
create_and_post(host, {'t':'line' ,'p1':50007 ,'p2':50008})
create_and_post(host, {'t':'line' ,'p1':50008 ,'p2':50009})
create_and_post(host, {'t':'line' ,'p1':50009 ,'p2':50011})
create_and_post(host, {'t':'line' ,'p1':50011 ,'p2':40010})
create_and_post(host, {'t':'line' ,'p1':50011 ,'p2':30011})
create_and_post(host, {'t':'line' ,'p1':30011 ,'p2':20011})
create_and_post(host, {'t':'line' ,'p1':20011 ,'p2':10011})
create_and_post(host, {'t':'line' ,'p1':10011 ,'p2':10})
create_and_post(host, {'t':'line' ,'p1':60006 ,'p2':80006})
create_and_post(host, {'t':'line' ,'p1':80006 ,'p2':90006})
create_and_post(host, {'t':'line' ,'p1':90006 ,'p2':100006})
create_and_post(host, {'t':'line' ,'p1':60006 ,'p2':60008})
create_and_post(host, {'t':'line' ,'p1':60008 ,'p2':60009})
create_and_post(host, {'t':'line' ,'p1':60009 ,'p2':60010})
create_and_post(host, {'t':'line' ,'p1':60010 ,'p2':100010})
create_and_post(host, {'t':'line' ,'p1':100010 ,'p2':100006})
create_and_post(host, {'t':'line' ,'p1':100006 ,'p2':110007})
create_and_post(host, {'t':'line' ,'p1':110007 ,'p2':110008})
create_and_post(host, {'t':'line' ,'p1':110008 ,'p2':110009})
create_and_post(host, {'t':'line' ,'p1':110009 ,'p2':110011})
create_and_post(host, {'t':'line' ,'p1':110011 ,'p2':100010})
create_and_post(host, {'t':'line' ,'p1':110011 ,'p2':90011})
create_and_post(host, {'t':'line' ,'p1':90011 ,'p2':80011})
create_and_post(host, {'t':'line' ,'p1':80011 ,'p2':70011})
create_and_post(host, {'t':'line' ,'p1':70011 ,'p2':60010})
create_and_post(host, {'t':'line' ,'p1':170003 ,'p2':180003})
create_and_post(host, {'t':'line' ,'p1':150005 ,'p2':150006})
create_and_post(host, {'t':'line' ,'p1':200005 ,'p2':200006})
create_and_post(host, {'t':'line' ,'p1':170008 ,'p2':180008})
create_and_post(host, {'t':'line' ,'p1':120000 ,'p2':140000})
create_and_post(host, {'t':'line' ,'p1':140000 ,'p2':150000})
create_and_post(host, {'t':'line' ,'p1':150000 ,'p2':160000})
create_and_post(host, {'t':'line' ,'p1':120000 ,'p2':120002})
create_and_post(host, {'t':'line' ,'p1':120002 ,'p2':120003})
create_and_post(host, {'t':'line' ,'p1':120003 ,'p2':120004})
create_and_post(host, {'t':'line' ,'p1':120004 ,'p2':160004})
create_and_post(host, {'t':'line' ,'p1':160004 ,'p2':160000})
create_and_post(host, {'t':'line' ,'p1':160000 ,'p2':170001})
create_and_post(host, {'t':'line' ,'p1':170001 ,'p2':170002})
create_and_post(host, {'t':'line' ,'p1':170002 ,'p2':170003})
create_and_post(host, {'t':'line' ,'p1':170003 ,'p2':170005})
create_and_post(host, {'t':'line' ,'p1':170005 ,'p2':160004})
create_and_post(host, {'t':'line' ,'p1':170005 ,'p2':150005})
create_and_post(host, {'t':'line' ,'p1':150005 ,'p2':140005})
create_and_post(host, {'t':'line' ,'p1':140005 ,'p2':130005})
create_and_post(host, {'t':'line' ,'p1':130005 ,'p2':120004})
create_and_post(host, {'t':'line' ,'p1':180000 ,'p2':200000})
create_and_post(host, {'t':'line' ,'p1':200000 ,'p2':210000})
create_and_post(host, {'t':'line' ,'p1':210000 ,'p2':220000})
create_and_post(host, {'t':'line' ,'p1':180000 ,'p2':180002})
create_and_post(host, {'t':'line' ,'p1':180002 ,'p2':180003})
create_and_post(host, {'t':'line' ,'p1':180003 ,'p2':180004})
create_and_post(host, {'t':'line' ,'p1':180004 ,'p2':220004})
create_and_post(host, {'t':'line' ,'p1':220004 ,'p2':220000})
create_and_post(host, {'t':'line' ,'p1':220000 ,'p2':230001})
create_and_post(host, {'t':'line' ,'p1':230001 ,'p2':230002})
create_and_post(host, {'t':'line' ,'p1':230002 ,'p2':230003})
create_and_post(host, {'t':'line' ,'p1':230003 ,'p2':230005})
create_and_post(host, {'t':'line' ,'p1':230005 ,'p2':220004})
create_and_post(host, {'t':'line' ,'p1':230005 ,'p2':210005})
create_and_post(host, {'t':'line' ,'p1':210005 ,'p2':200005})
create_and_post(host, {'t':'line' ,'p1':200005 ,'p2':190005})
create_and_post(host, {'t':'line' ,'p1':190005 ,'p2':180004})
create_and_post(host, {'t':'line' ,'p1':120006 ,'p2':140006})
create_and_post(host, {'t':'line' ,'p1':140006 ,'p2':150006})
create_and_post(host, {'t':'line' ,'p1':150006 ,'p2':160006})
create_and_post(host, {'t':'line' ,'p1':120006 ,'p2':120008})
create_and_post(host, {'t':'line' ,'p1':120008 ,'p2':120009})
create_and_post(host, {'t':'line' ,'p1':120009 ,'p2':120010})
create_and_post(host, {'t':'line' ,'p1':120010 ,'p2':160010})
create_and_post(host, {'t':'line' ,'p1':160010 ,'p2':160006})
create_and_post(host, {'t':'line' ,'p1':160006 ,'p2':170007})
create_and_post(host, {'t':'line' ,'p1':170007 ,'p2':170008})
create_and_post(host, {'t':'line' ,'p1':170008 ,'p2':170009})
create_and_post(host, {'t':'line' ,'p1':170009 ,'p2':170011})
create_and_post(host, {'t':'line' ,'p1':170011 ,'p2':160010})
create_and_post(host, {'t':'line' ,'p1':170011 ,'p2':150011})
create_and_post(host, {'t':'line' ,'p1':150011 ,'p2':140011})
create_and_post(host, {'t':'line' ,'p1':140011 ,'p2':130011})
create_and_post(host, {'t':'line' ,'p1':130011 ,'p2':120010})
create_and_post(host, {'t':'line' ,'p1':180006 ,'p2':200006})
create_and_post(host, {'t':'line' ,'p1':200006 ,'p2':210006})
create_and_post(host, {'t':'line' ,'p1':210006 ,'p2':220006})
create_and_post(host, {'t':'line' ,'p1':180006 ,'p2':180008})
create_and_post(host, {'t':'line' ,'p1':180008 ,'p2':180009})
create_and_post(host, {'t':'line' ,'p1':180009 ,'p2':180010})
create_and_post(host, {'t':'line' ,'p1':180010 ,'p2':220010})
create_and_post(host, {'t':'line' ,'p1':220010 ,'p2':220006})
create_and_post(host, {'t':'line' ,'p1':220006 ,'p2':230007})
create_and_post(host, {'t':'line' ,'p1':230007 ,'p2':230008})
create_and_post(host, {'t':'line' ,'p1':230008 ,'p2':230009})
create_and_post(host, {'t':'line' ,'p1':230009 ,'p2':230011})
create_and_post(host, {'t':'line' ,'p1':230011 ,'p2':220010})
create_and_post(host, {'t':'line' ,'p1':230011 ,'p2':210011})
create_and_post(host, {'t':'line' ,'p1':210011 ,'p2':200011})
create_and_post(host, {'t':'line' ,'p1':200011 ,'p2':190011})
create_and_post(host, {'t':'line' ,'p1':190011 ,'p2':180010})
create_and_post(host, {'t':'line' ,'p1':110003 ,'p2':120003})
create_and_post(host, {'t':'line' ,'p1':90005 ,'p2':90006})
create_and_post(host, {'t':'line' ,'p1':140005 ,'p2':140006})
create_and_post(host, {'t':'line' ,'p1':110008 ,'p2':120008})
create_and_post(host, {'t':'line' ,'p1':290003 ,'p2':300003})
create_and_post(host, {'t':'line' ,'p1':270005 ,'p2':270006})
create_and_post(host, {'t':'line' ,'p1':320005 ,'p2':320006})
create_and_post(host, {'t':'line' ,'p1':290008 ,'p2':300008})
create_and_post(host, {'t':'line' ,'p1':240000 ,'p2':260000})
create_and_post(host, {'t':'line' ,'p1':260000 ,'p2':270000})
create_and_post(host, {'t':'line' ,'p1':270000 ,'p2':280000})
create_and_post(host, {'t':'line' ,'p1':240000 ,'p2':240002})
create_and_post(host, {'t':'line' ,'p1':240002 ,'p2':240003})
create_and_post(host, {'t':'line' ,'p1':240003 ,'p2':240004})
create_and_post(host, {'t':'line' ,'p1':240004 ,'p2':280004})
create_and_post(host, {'t':'line' ,'p1':280004 ,'p2':280000})
create_and_post(host, {'t':'line' ,'p1':280000 ,'p2':290001})
create_and_post(host, {'t':'line' ,'p1':290001 ,'p2':290002})
create_and_post(host, {'t':'line' ,'p1':290002 ,'p2':290003})
create_and_post(host, {'t':'line' ,'p1':290003 ,'p2':290005})
create_and_post(host, {'t':'line' ,'p1':290005 ,'p2':280004})
create_and_post(host, {'t':'line' ,'p1':290005 ,'p2':270005})
create_and_post(host, {'t':'line' ,'p1':270005 ,'p2':260005})
create_and_post(host, {'t':'line' ,'p1':260005 ,'p2':250005})
create_and_post(host, {'t':'line' ,'p1':250005 ,'p2':240004})
create_and_post(host, {'t':'line' ,'p1':300000 ,'p2':320000})
create_and_post(host, {'t':'line' ,'p1':320000 ,'p2':330000})
create_and_post(host, {'t':'line' ,'p1':330000 ,'p2':340000})
create_and_post(host, {'t':'line' ,'p1':300000 ,'p2':300002})
create_and_post(host, {'t':'line' ,'p1':300002 ,'p2':300003})
create_and_post(host, {'t':'line' ,'p1':300003 ,'p2':300004})
create_and_post(host, {'t':'line' ,'p1':300004 ,'p2':340004})
create_and_post(host, {'t':'line' ,'p1':340004 ,'p2':340000})
create_and_post(host, {'t':'line' ,'p1':340000 ,'p2':350001})
create_and_post(host, {'t':'line' ,'p1':350001 ,'p2':350002})
create_and_post(host, {'t':'line' ,'p1':350002 ,'p2':350003})
create_and_post(host, {'t':'line' ,'p1':350003 ,'p2':350005})
create_and_post(host, {'t':'line' ,'p1':350005 ,'p2':340004})
create_and_post(host, {'t':'line' ,'p1':350005 ,'p2':330005})
create_and_post(host, {'t':'line' ,'p1':330005 ,'p2':320005})
create_and_post(host, {'t':'line' ,'p1':320005 ,'p2':310005})
create_and_post(host, {'t':'line' ,'p1':310005 ,'p2':300004})
create_and_post(host, {'t':'line' ,'p1':240006 ,'p2':260006})
create_and_post(host, {'t':'line' ,'p1':260006 ,'p2':270006})
create_and_post(host, {'t':'line' ,'p1':270006 ,'p2':280006})
create_and_post(host, {'t':'line' ,'p1':240006 ,'p2':240008})
create_and_post(host, {'t':'line' ,'p1':240008 ,'p2':240009})
create_and_post(host, {'t':'line' ,'p1':240009 ,'p2':240010})
create_and_post(host, {'t':'line' ,'p1':240010 ,'p2':280010})
create_and_post(host, {'t':'line' ,'p1':280010 ,'p2':280006})
create_and_post(host, {'t':'line' ,'p1':280006 ,'p2':290007})
create_and_post(host, {'t':'line' ,'p1':290007 ,'p2':290008})
create_and_post(host, {'t':'line' ,'p1':290008 ,'p2':290009})
create_and_post(host, {'t':'line' ,'p1':290009 ,'p2':290011})
create_and_post(host, {'t':'line' ,'p1':290011 ,'p2':280010})
create_and_post(host, {'t':'line' ,'p1':290011 ,'p2':270011})
create_and_post(host, {'t':'line' ,'p1':270011 ,'p2':260011})
create_and_post(host, {'t':'line' ,'p1':260011 ,'p2':250011})
create_and_post(host, {'t':'line' ,'p1':250011 ,'p2':240010})
create_and_post(host, {'t':'line' ,'p1':300006 ,'p2':320006})
create_and_post(host, {'t':'line' ,'p1':320006 ,'p2':330006})
create_and_post(host, {'t':'line' ,'p1':330006 ,'p2':340006})
create_and_post(host, {'t':'line' ,'p1':300006 ,'p2':300008})
create_and_post(host, {'t':'line' ,'p1':300008 ,'p2':300009})
create_and_post(host, {'t':'line' ,'p1':300009 ,'p2':300010})
create_and_post(host, {'t':'line' ,'p1':300010 ,'p2':340010})
create_and_post(host, {'t':'line' ,'p1':340010 ,'p2':340006})
create_and_post(host, {'t':'line' ,'p1':340006 ,'p2':350007})
create_and_post(host, {'t':'line' ,'p1':350007 ,'p2':350008})
create_and_post(host, {'t':'line' ,'p1':350008 ,'p2':350009})
create_and_post(host, {'t':'line' ,'p1':350009 ,'p2':350011})
create_and_post(host, {'t':'line' ,'p1':350011 ,'p2':340010})
create_and_post(host, {'t':'line' ,'p1':350011 ,'p2':330011})
create_and_post(host, {'t':'line' ,'p1':330011 ,'p2':320011})
create_and_post(host, {'t':'line' ,'p1':320011 ,'p2':310011})
create_and_post(host, {'t':'line' ,'p1':310011 ,'p2':300010})
create_and_post(host, {'t':'line' ,'p1':230003 ,'p2':240003})
create_and_post(host, {'t':'line' ,'p1':210005 ,'p2':210006})
create_and_post(host, {'t':'line' ,'p1':260005 ,'p2':260006})
create_and_post(host, {'t':'line' ,'p1':230008 ,'p2':240008})
create_and_post(host, {'t':'line' ,'p1':410003 ,'p2':420003})
create_and_post(host, {'t':'line' ,'p1':390005 ,'p2':390006})
create_and_post(host, {'t':'line' ,'p1':440005 ,'p2':440006})
create_and_post(host, {'t':'line' ,'p1':410008 ,'p2':420008})
create_and_post(host, {'t':'line' ,'p1':360000 ,'p2':380000})
create_and_post(host, {'t':'line' ,'p1':380000 ,'p2':390000})
create_and_post(host, {'t':'line' ,'p1':390000 ,'p2':400000})
create_and_post(host, {'t':'line' ,'p1':360000 ,'p2':360002})
create_and_post(host, {'t':'line' ,'p1':360002 ,'p2':360003})
create_and_post(host, {'t':'line' ,'p1':360003 ,'p2':360004})
create_and_post(host, {'t':'line' ,'p1':360004 ,'p2':400004})
create_and_post(host, {'t':'line' ,'p1':400004 ,'p2':400000})
create_and_post(host, {'t':'line' ,'p1':400000 ,'p2':410001})
create_and_post(host, {'t':'line' ,'p1':410001 ,'p2':410002})
create_and_post(host, {'t':'line' ,'p1':410002 ,'p2':410003})
create_and_post(host, {'t':'line' ,'p1':410003 ,'p2':410005})
create_and_post(host, {'t':'line' ,'p1':410005 ,'p2':400004})
create_and_post(host, {'t':'line' ,'p1':410005 ,'p2':390005})
create_and_post(host, {'t':'line' ,'p1':390005 ,'p2':380005})
create_and_post(host, {'t':'line' ,'p1':380005 ,'p2':370005})
create_and_post(host, {'t':'line' ,'p1':370005 ,'p2':360004})
create_and_post(host, {'t':'line' ,'p1':420000 ,'p2':440000})
create_and_post(host, {'t':'line' ,'p1':440000 ,'p2':450000})
create_and_post(host, {'t':'line' ,'p1':450000 ,'p2':460000})
create_and_post(host, {'t':'line' ,'p1':420000 ,'p2':420002})
create_and_post(host, {'t':'line' ,'p1':420002 ,'p2':420003})
create_and_post(host, {'t':'line' ,'p1':420003 ,'p2':420004})
create_and_post(host, {'t':'line' ,'p1':420004 ,'p2':460004})
create_and_post(host, {'t':'line' ,'p1':460004 ,'p2':460000})
create_and_post(host, {'t':'line' ,'p1':460000 ,'p2':470001})
create_and_post(host, {'t':'line' ,'p1':470001 ,'p2':470002})
create_and_post(host, {'t':'line' ,'p1':470002 ,'p2':470003})
create_and_post(host, {'t':'line' ,'p1':470003 ,'p2':470005})
create_and_post(host, {'t':'line' ,'p1':470005 ,'p2':460004})
create_and_post(host, {'t':'line' ,'p1':470005 ,'p2':450005})
create_and_post(host, {'t':'line' ,'p1':450005 ,'p2':440005})
create_and_post(host, {'t':'line' ,'p1':440005 ,'p2':430005})
create_and_post(host, {'t':'line' ,'p1':430005 ,'p2':420004})
create_and_post(host, {'t':'line' ,'p1':360006 ,'p2':380006})
create_and_post(host, {'t':'line' ,'p1':380006 ,'p2':390006})
create_and_post(host, {'t':'line' ,'p1':390006 ,'p2':400006})
create_and_post(host, {'t':'line' ,'p1':360006 ,'p2':360008})
create_and_post(host, {'t':'line' ,'p1':360008 ,'p2':360009})
create_and_post(host, {'t':'line' ,'p1':360009 ,'p2':360010})
create_and_post(host, {'t':'line' ,'p1':360010 ,'p2':400010})
create_and_post(host, {'t':'line' ,'p1':400010 ,'p2':400006})
create_and_post(host, {'t':'line' ,'p1':400006 ,'p2':410007})
create_and_post(host, {'t':'line' ,'p1':410007 ,'p2':410008})
create_and_post(host, {'t':'line' ,'p1':410008 ,'p2':410009})
create_and_post(host, {'t':'line' ,'p1':410009 ,'p2':410011})
create_and_post(host, {'t':'line' ,'p1':410011 ,'p2':400010})
create_and_post(host, {'t':'line' ,'p1':410011 ,'p2':390011})
create_and_post(host, {'t':'line' ,'p1':390011 ,'p2':380011})
create_and_post(host, {'t':'line' ,'p1':380011 ,'p2':370011})
create_and_post(host, {'t':'line' ,'p1':370011 ,'p2':360010})
create_and_post(host, {'t':'line' ,'p1':420006 ,'p2':440006})
create_and_post(host, {'t':'line' ,'p1':440006 ,'p2':450006})
create_and_post(host, {'t':'line' ,'p1':450006 ,'p2':460006})
create_and_post(host, {'t':'line' ,'p1':420006 ,'p2':420008})
create_and_post(host, {'t':'line' ,'p1':420008 ,'p2':420009})
create_and_post(host, {'t':'line' ,'p1':420009 ,'p2':420010})
create_and_post(host, {'t':'line' ,'p1':420010 ,'p2':460010})
create_and_post(host, {'t':'line' ,'p1':460010 ,'p2':460006})
create_and_post(host, {'t':'line' ,'p1':460006 ,'p2':470007})
create_and_post(host, {'t':'line' ,'p1':470007 ,'p2':470008})
create_and_post(host, {'t':'line' ,'p1':470008 ,'p2':470009})
create_and_post(host, {'t':'line' ,'p1':470009 ,'p2':470011})
create_and_post(host, {'t':'line' ,'p1':470011 ,'p2':460010})
create_and_post(host, {'t':'line' ,'p1':470011 ,'p2':450011})
create_and_post(host, {'t':'line' ,'p1':450011 ,'p2':440011})
create_and_post(host, {'t':'line' ,'p1':440011 ,'p2':430011})
create_and_post(host, {'t':'line' ,'p1':430011 ,'p2':420010})
create_and_post(host, {'t':'line' ,'p1':350003 ,'p2':360003})
create_and_post(host, {'t':'line' ,'p1':330005 ,'p2':330006})
create_and_post(host, {'t':'line' ,'p1':380005 ,'p2':380006})
create_and_post(host, {'t':'line' ,'p1':350008 ,'p2':360008})
create_and_post(host, {'t':'line' ,'p1':530003 ,'p2':540003})
create_and_post(host, {'t':'line' ,'p1':510005 ,'p2':510006})
create_and_post(host, {'t':'line' ,'p1':560005 ,'p2':560006})
create_and_post(host, {'t':'line' ,'p1':530008 ,'p2':540008})
create_and_post(host, {'t':'line' ,'p1':480000 ,'p2':500000})
create_and_post(host, {'t':'line' ,'p1':500000 ,'p2':510000})
create_and_post(host, {'t':'line' ,'p1':510000 ,'p2':520000})
create_and_post(host, {'t':'line' ,'p1':480000 ,'p2':480002})
create_and_post(host, {'t':'line' ,'p1':480002 ,'p2':480003})
create_and_post(host, {'t':'line' ,'p1':480003 ,'p2':480004})
create_and_post(host, {'t':'line' ,'p1':480004 ,'p2':520004})
create_and_post(host, {'t':'line' ,'p1':520004 ,'p2':520000})
create_and_post(host, {'t':'line' ,'p1':520000 ,'p2':530001})
create_and_post(host, {'t':'line' ,'p1':530001 ,'p2':530002})
create_and_post(host, {'t':'line' ,'p1':530002 ,'p2':530003})
create_and_post(host, {'t':'line' ,'p1':530003 ,'p2':530005})
create_and_post(host, {'t':'line' ,'p1':530005 ,'p2':520004})
create_and_post(host, {'t':'line' ,'p1':530005 ,'p2':510005})
create_and_post(host, {'t':'line' ,'p1':510005 ,'p2':500005})
create_and_post(host, {'t':'line' ,'p1':500005 ,'p2':490005})
create_and_post(host, {'t':'line' ,'p1':490005 ,'p2':480004})
create_and_post(host, {'t':'line' ,'p1':540000 ,'p2':560000})
create_and_post(host, {'t':'line' ,'p1':560000 ,'p2':570000})
create_and_post(host, {'t':'line' ,'p1':570000 ,'p2':580000})
create_and_post(host, {'t':'line' ,'p1':540000 ,'p2':540002})
create_and_post(host, {'t':'line' ,'p1':540002 ,'p2':540003})
create_and_post(host, {'t':'line' ,'p1':540003 ,'p2':540004})
create_and_post(host, {'t':'line' ,'p1':540004 ,'p2':580004})
create_and_post(host, {'t':'line' ,'p1':580004 ,'p2':580000})
create_and_post(host, {'t':'line' ,'p1':580000 ,'p2':590001})
create_and_post(host, {'t':'line' ,'p1':590001 ,'p2':590002})
create_and_post(host, {'t':'line' ,'p1':590002 ,'p2':590003})
create_and_post(host, {'t':'line' ,'p1':590003 ,'p2':590005})
create_and_post(host, {'t':'line' ,'p1':590005 ,'p2':580004})
create_and_post(host, {'t':'line' ,'p1':590005 ,'p2':570005})
create_and_post(host, {'t':'line' ,'p1':570005 ,'p2':560005})
create_and_post(host, {'t':'line' ,'p1':560005 ,'p2':550005})
create_and_post(host, {'t':'line' ,'p1':550005 ,'p2':540004})
create_and_post(host, {'t':'line' ,'p1':480006 ,'p2':500006})
create_and_post(host, {'t':'line' ,'p1':500006 ,'p2':510006})
create_and_post(host, {'t':'line' ,'p1':510006 ,'p2':520006})
create_and_post(host, {'t':'line' ,'p1':480006 ,'p2':480008})
create_and_post(host, {'t':'line' ,'p1':480008 ,'p2':480009})
create_and_post(host, {'t':'line' ,'p1':480009 ,'p2':480010})
create_and_post(host, {'t':'line' ,'p1':480010 ,'p2':520010})
create_and_post(host, {'t':'line' ,'p1':520010 ,'p2':520006})
create_and_post(host, {'t':'line' ,'p1':520006 ,'p2':530007})
create_and_post(host, {'t':'line' ,'p1':530007 ,'p2':530008})
create_and_post(host, {'t':'line' ,'p1':530008 ,'p2':530009})
create_and_post(host, {'t':'line' ,'p1':530009 ,'p2':530011})
create_and_post(host, {'t':'line' ,'p1':530011 ,'p2':520010})
create_and_post(host, {'t':'line' ,'p1':530011 ,'p2':510011})
create_and_post(host, {'t':'line' ,'p1':510011 ,'p2':500011})
create_and_post(host, {'t':'line' ,'p1':500011 ,'p2':490011})
create_and_post(host, {'t':'line' ,'p1':490011 ,'p2':480010})
create_and_post(host, {'t':'line' ,'p1':540006 ,'p2':560006})
create_and_post(host, {'t':'line' ,'p1':560006 ,'p2':570006})
create_and_post(host, {'t':'line' ,'p1':570006 ,'p2':580006})
create_and_post(host, {'t':'line' ,'p1':540006 ,'p2':540008})
create_and_post(host, {'t':'line' ,'p1':540008 ,'p2':540009})
create_and_post(host, {'t':'line' ,'p1':540009 ,'p2':540010})
create_and_post(host, {'t':'line' ,'p1':540010 ,'p2':580010})
create_and_post(host, {'t':'line' ,'p1':580010 ,'p2':580006})
create_and_post(host, {'t':'line' ,'p1':580006 ,'p2':590007})
create_and_post(host, {'t':'line' ,'p1':590007 ,'p2':590008})
create_and_post(host, {'t':'line' ,'p1':590008 ,'p2':590009})
create_and_post(host, {'t':'line' ,'p1':590009 ,'p2':590011})
create_and_post(host, {'t':'line' ,'p1':590011 ,'p2':580010})
create_and_post(host, {'t':'line' ,'p1':590011 ,'p2':570011})
create_and_post(host, {'t':'line' ,'p1':570011 ,'p2':560011})
create_and_post(host, {'t':'line' ,'p1':560011 ,'p2':550011})
create_and_post(host, {'t':'line' ,'p1':550011 ,'p2':540010})
create_and_post(host, {'t':'line' ,'p1':470003 ,'p2':480003})
create_and_post(host, {'t':'line' ,'p1':450005 ,'p2':450006})
create_and_post(host, {'t':'line' ,'p1':500005 ,'p2':500006})
create_and_post(host, {'t':'line' ,'p1':470008 ,'p2':480008})
run_all(['/tmp/redis0.sock'])
| mit |
Leila20/django | tests/check_framework/test_security.py | 17 | 19560 | from django.conf import settings
from django.core.checks.security import base, csrf, sessions
from django.core.checks.utils import patch_middleware_message
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckSessionCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_secure
return check_session_cookie_secure
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[])
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=[])
def test_session_cookie_secure_with_installed_app_middleware_classes(self):
self.assertEqual(self.func(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(self.func(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_with_middleware_middleware_classes(self):
self.assertEqual(self.func(None), [patch_middleware_message(sessions.W011)])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_both_middleware_classes(self):
self.assertEqual(self.func(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckSessionCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_httponly
return check_session_cookie_httponly
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[])
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(self.func(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckCSRFMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_middleware
return check_csrf_middleware
@override_settings(MIDDLEWARE=[], MIDDLEWARE_CLASSES=[])
def test_no_csrf_middleware(self):
"""
Warn if CsrfViewMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [csrf.W003])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"])
def test_with_csrf_middleware(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_secure
return check_csrf_cookie_secure
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(self.func(None), [csrf.W016])
@override_settings(MIDDLEWARE=[], MIDDLEWARE_CLASSES=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_httponly
return check_csrf_cookie_httponly
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE but
CSRF_COOKIE_HTTPONLY isn't True.
"""
self.assertEqual(self.func(None), [csrf.W017])
@override_settings(MIDDLEWARE=[], MIDDLEWARE_CLASSES=[], CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if
CSRF_COOKIE_HTTPONLY is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=True)
def test_with_csrf_cookie_httponly_true(self):
self.assertEqual(self.func(None), [])
class CheckSecurityMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_security_middleware
return check_security_middleware
@override_settings(MIDDLEWARE=[])
def test_no_security_middleware(self):
"""
Warn if SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [base.W001])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"])
def test_with_security_middleware(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecurityTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts
return check_sts
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(self.func(None), [base.W004])
@override_settings(
MIDDLEWARE=[],
SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middleware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600)
def test_with_sts(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts_include_subdomains
return check_sts_include_subdomains
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(self.func(None), [base.W005])
@override_settings(
MIDDLEWARE=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600)
def test_with_sts_subdomains(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecurityPreloadTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts_preload
return check_sts_preload
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_preload(self):
"""
Warn if SECURE_HSTS_PRELOAD isn't True.
"""
self.assertEqual(self.func(None), [base.W021])
@override_settings(MIDDLEWARE=[], SECURE_HSTS_PRELOAD=False, SECURE_HSTS_SECONDS=3600)
def test_no_sts_preload_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None,
)
def test_no_sts_preload_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=True,
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts_preload(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_options_middleware
return check_xframe_options_middleware
@override_settings(MIDDLEWARE=[])
def test_middleware_not_installed(self):
"""
Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [base.W002])
@override_settings(MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"])
def test_middleware_installed(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsDenyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_deny
return check_xframe_deny
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='SAMEORIGIN',
)
def test_x_frame_options_not_deny(self):
"""
Warn if XFrameOptionsMiddleware is in MIDDLEWARE but
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [base.W019])
@override_settings(MIDDLEWARE=[], X_FRAME_OPTIONS='SAMEORIGIN')
def test_middleware_not_installed(self):
"""
No error if XFrameOptionsMiddleware isn't in MIDDLEWARE even if
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='DENY',
)
def test_xframe_deny(self):
self.assertEqual(self.func(None), [])
class CheckContentTypeNosniffTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_content_type_nosniff
return check_content_type_nosniff
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(self.func(None), [base.W006])
@override_settings(
MIDDLEWARE=[],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_with_content_type_nosniff(self):
self.assertEqual(self.func(None), [])
class CheckXssFilterTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xss_filter
return check_xss_filter
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter(self):
"""
Warn if SECURE_BROWSER_XSS_FILTER isn't True.
"""
self.assertEqual(self.func(None), [base.W007])
@override_settings(
MIDDLEWARE=[],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter_no_middleware(self):
"""
Don't warn if SECURE_BROWSER_XSS_FILTER isn't True and
SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=True)
def test_with_xss_filter(self):
self.assertEqual(self.func(None), [])
class CheckSSLRedirectTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_ssl_redirect
return check_ssl_redirect
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(self.func(None), [base.W008])
@override_settings(
MIDDLEWARE=[],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middleware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True)
def test_with_ssl_redirect(self):
self.assertEqual(self.func(None), [])
class CheckSecretKeyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_secret_key
return check_secret_key
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab')
def test_okay_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [])
@override_settings(SECRET_KEY='')
def test_empty_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_missing_secret_key(self):
del settings.SECRET_KEY
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_none_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a')
def test_low_length_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1)
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY='abcd' * 20)
def test_low_entropy_secret_key(self):
self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [base.W009])
class CheckDebugTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_debug
return check_debug
@override_settings(DEBUG=True)
def test_debug_true(self):
"""
Warn if DEBUG is True.
"""
self.assertEqual(self.func(None), [base.W018])
@override_settings(DEBUG=False)
def test_debug_false(self):
self.assertEqual(self.func(None), [])
class CheckAllowedHostsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_allowed_hosts
return check_allowed_hosts
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(self.func(None), [base.W020])
@override_settings(ALLOWED_HOSTS=['.example.com', ])
def test_allowed_hosts_set(self):
self.assertEqual(self.func(None), [])
| bsd-3-clause |
kittiu/sale-workflow | sale_payment_term_interest/model/account_payment_term.py | 31 | 4714 | # -*- coding: utf-8 -*-
#
#
# Authors: Guewen Baconnier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from __future__ import division
from datetime import datetime
from dateutil.relativedelta import relativedelta
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api
from openerp.tools.float_utils import float_round as round, float_compare
class AccountPaymentTerm(models.Model):
_inherit = 'account.payment.term'
interest_min = fields.Float(
string='Minimum Interest Amount',
digits=dp.get_precision('Account'),
help="The minimum amount of interest added to a sales "
"order.")
@api.multi
def compute_total_interest(self, value):
self.ensure_one()
values = self.compute_interest(value)
interest = sum(interest for __, __, interest in values)
precision_model = self.env['decimal.precision']
precision = precision_model.precision_get('Account')
compare = float_compare(interest,
self.interest_min,
precision_digits=precision)
if compare == -1: # interest < interest_min
return self.interest_min
else:
return interest
@api.multi
def compute_interest(self, value, date_ref=False):
if date_ref:
date_ref = fields.Date.from_string(date_ref)
else:
date_ref = datetime.today().date()
amount = value
result = []
lines_total = 0.0
precision_model = self.env['decimal.precision']
# The computation of the amount for each term is the exact same
# than the one in 'account_payment_term.compute()', this is
# required to ensure that the interest fees are based on the
# same amounts. This is why the 'account' precision is used:
# this is the one used in 'account_payment_term.compute()'.
prec = precision_model.precision_get('Account')
for line in self.line_ids:
if line.value == 'fixed':
line_amount = round(line.value_amount, precision_digits=prec)
elif line.value == 'procent':
line_amount = round(value * line.value_amount,
precision_digits=prec)
elif line.value == 'balance':
line_amount = round(amount, prec)
if not line_amount:
continue
next_date = date_ref + relativedelta(days=line.days)
if line.days2 < 0:
# Getting 1st of next month
next_first_date = next_date + relativedelta(day=1,
months=1)
next_date = (next_first_date +
relativedelta(days=line.days2))
if line.days2 > 0:
next_date += relativedelta(day=line.days2, months=1)
interest = 0.0
if line.interest_rate:
days = (next_date - date_ref).days
rate = line.interest_rate / 100 / (12 * 30) # %/(months*days)
interest = line_amount * rate * days
result.append((fields.Date.to_string(next_date),
line_amount,
interest))
amount -= line_amount
lines_total += line_amount
dist = round(value - lines_total, precision_digits=prec)
if dist:
result.append((fields.Date.today(), dist, 0.0))
return result
class AccountPaymentTermLine(models.Model):
_inherit = 'account.payment.term.line'
interest_rate = fields.Float(
string='Interest Rate',
digits=dp.get_precision('Payment Term'),
help="The annual interest rate applied on a sales order. "
"Value between 0 and 100.\n"
"The interest is computed as: "
"'Amount * (Interest Rate / 100 / "
" (12 months * 30 days)) * Term Days'")
| agpl-3.0 |
jmehnle/ansible | lib/ansible/modules/cloud/ovirt/ovirt_networks.py | 15 | 9604 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks
short_description: Module to manage logical networks in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage logical networks in oVirt/RHV"
options:
name:
description:
- "Name of the network to manage."
required: true
state:
description:
- "Should the network be present or absent"
choices: ['present', 'absent']
default: present
data_center:
description:
- "Datacenter name where network reside."
description:
description:
- "Description of the network."
comment:
description:
- "Comment of the network."
vlan_tag:
description:
- "Specify VLAN tag."
vm_network:
description:
- "If I(True) network will be marked as network for VM."
- "VM network carries traffic relevant to the virtual machine."
mtu:
description:
- "Maximum transmission unit (MTU) of the network."
clusters:
description:
- "List of dictionaries describing how the network is managed in specific cluster."
- "C(name) - Cluster name."
- "C(assigned) - I(true) if the network should be assigned to cluster. Default is I(true)."
- "C(required) - I(true) if the network must remain operational for all hosts associated with this network."
- "C(display) - I(true) if the network should marked as display network."
- "C(migration) - I(true) if the network should marked as migration network."
- "C(gluster) - I(true) if the network should marked as gluster network."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create network
- ovirt_networks:
data_center: mydatacenter
name: mynetwork
vlan_tag: 1
vm_network: true
# Remove network
- ovirt_networks:
state: absent
name: mynetwork
'''
RETURN = '''
id:
description: "ID of the managed network"
returned: "On success if network is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
network:
description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
returned: "On success if network is found."
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class NetworksModule(BaseModule):
def build_entity(self):
return otypes.Network(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
data_center=otypes.DataCenter(
name=self._module.params['data_center'],
) if self._module.params['data_center'] else None,
vlan=otypes.Vlan(
self._module.params['vlan_tag'],
) if self._module.params['vlan_tag'] else None,
usages=[
otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
] if self._module.params['vm_network'] is not None else None,
mtu=self._module.params['mtu'],
)
def update_check(self, entity):
return (
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None)) and
equal(self._module.params.get('vm_network'), True if entity.usages else False) and
equal(self._module.params.get('mtu'), entity.mtu)
)
class ClusterNetworksModule(BaseModule):
def __init__(self, network_id, cluster_network, *args, **kwargs):
super(ClusterNetworksModule, self).__init__(*args, **kwargs)
self._network_id = network_id
self._cluster_network = cluster_network
def build_entity(self):
return otypes.Network(
id=self._network_id,
name=self._module.params['name'],
required=self._cluster_network.get('required'),
display=self._cluster_network.get('display'),
usages=[
otypes.NetworkUsage(usage)
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
] if (
self._cluster_network.get('display') is not None or
self._cluster_network.get('gluster') is not None or
self._cluster_network.get('migration') is not None
) else None,
)
def update_check(self, entity):
return (
equal(self._cluster_network.get('required'), entity.required) and
equal(self._cluster_network.get('display'), entity.display) and
equal(
sorted([
usage
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
]),
sorted([
str(usage)
for usage in getattr(entity, 'usages', [])
# VM + MANAGEMENT is part of root network
if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
]),
)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
data_center=dict(default=None, required=True),
name=dict(default=None, required=True),
description=dict(default=None),
comment=dict(default=None),
vlan_tag=dict(default=None, type='int'),
vm_network=dict(default=None, type='bool'),
mtu=dict(default=None, type='int'),
clusters=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
networks_service = connection.system_service().networks_service()
networks_module = NetworksModule(
connection=connection,
module=module,
service=networks_service,
)
state = module.params['state']
network = networks_module.search_entity(
search_params={
'name': module.params['name'],
'datacenter': module.params['data_center'],
},
)
if state == 'present':
ret = networks_module.create(entity=network)
# Update clusters networks:
if module.params.get('clusters') is not None:
for param_cluster in module.params.get('clusters'):
cluster = search_by_name(clusters_service, param_cluster.get('name'))
if cluster is None:
raise Exception("Cluster '%s' was not found." % param_cluster.get('name'))
cluster_networks_service = clusters_service.service(cluster.id).networks_service()
cluster_networks_module = ClusterNetworksModule(
network_id=ret['id'],
cluster_network=param_cluster,
connection=connection,
module=module,
service=cluster_networks_service,
)
if param_cluster.get('assigned', True):
ret = cluster_networks_module.create()
else:
ret = cluster_networks_module.remove()
elif state == 'absent':
ret = networks_module.remove(entity=network)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
rjpower/spark | python/pyspark/files.py | 5 | 1885 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
class SparkFiles(object):
"""
Resolves paths to files added through
L{SparkContext.addFile()<pyspark.context.SparkContext.addFile>}.
SparkFiles contains only classmethods; users should not create SparkFiles
instances.
"""
_root_directory = None
_is_running_on_worker = False
_sc = None
def __init__(self):
raise NotImplementedError("Do not construct SparkFiles objects")
@classmethod
def get(cls, filename):
"""
Get the absolute path of a file added through C{SparkContext.addFile()}.
"""
path = os.path.join(SparkFiles.getRootDirectory(), filename)
return os.path.abspath(path)
@classmethod
def getRootDirectory(cls):
"""
Get the root directory that contains files added through
C{SparkContext.addFile()}.
"""
if cls._is_running_on_worker:
return cls._root_directory
else:
# This will have to change if we support multiple SparkContexts:
return cls._sc._jvm.spark.SparkFiles.getRootDirectory()
| apache-2.0 |
srsman/odoo | addons/account_payment/account_move_line.py | 241 | 4455 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from operator import itemgetter
class account_move_line(osv.osv):
_inherit = "account.move.line"
# delegate to parent, used for local fields.function redefinition
def _amount_to_pay(self, cr, uid, ids, field_names, args, context=None):
return {
id: value['amount_residual']
for id, value in self._amount_residual(cr, uid, ids, field_names, args,
context=context).items()
}
def _to_pay_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
line_obj = self.pool.get('account.move.line')
query = line_obj._query_get(cr, uid, context={})
where = ' and '.join(map(lambda x: '''(SELECT
CASE WHEN l.amount_currency < 0
THEN - l.amount_currency
ELSE l.credit
END - coalesce(sum(pl.amount_currency), 0)
FROM payment_line pl
INNER JOIN payment_order po ON (pl.order_id = po.id)
WHERE move_line_id = l.id
AND po.state != 'cancel'
) %(operator)s %%s ''' % {'operator': x[1]}, args))
sql_args = tuple(map(itemgetter(2), args))
cr.execute(('''SELECT id
FROM account_move_line l
WHERE account_id IN (select id
FROM account_account
WHERE type=%s AND active)
AND reconcile_id IS null
AND credit > 0
AND ''' + where + ' and ' + query), ('payable',)+sql_args )
res = cr.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', map(lambda x:x[0], res))]
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if not line2bank.get(line.id) and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
_columns = {
'amount_to_pay': fields.function(_amount_to_pay,
type='float', string='Amount to pay', fnct_search=_to_pay_search),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Andrei-Stepanov/avocado-vt | virttest/libvirt_xml/devices/interface.py | 12 | 9271 | """
interface device support class(es)
http://libvirt.org/formatdomain.html#elementsNICS
http://libvirt.org/formatnwfilter.html#nwfconceptsvars
"""
from virttest.libvirt_xml import accessors, xcepts
from virttest.libvirt_xml.devices import base, librarian
class Interface(base.TypedDeviceBase):
__slots__ = ('source', 'mac_address', 'bandwidth',
'model', 'link_state', 'target',
'driver', 'address', 'boot_order',
'filterref', 'backend', 'virtualport_type')
def __init__(self, type_name, virsh_instance=base.base.virsh):
super(Interface, self).__init__(device_tag='interface',
type_name=type_name,
virsh_instance=virsh_instance)
accessors.XMLElementDict(property_name="source",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='source')
accessors.XMLElementDict(property_name="target",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='target')
accessors.XMLElementDict(property_name="backend",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='backend')
accessors.XMLAttribute(property_name="mac_address",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='mac',
attribute='address')
accessors.XMLAttribute(property_name="link_state",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='link',
attribute='state')
accessors.XMLAttribute(property_name="boot_order",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='boot',
attribute='order')
accessors.XMLElementNest("bandwidth", self,
parent_xpath='/',
tag_name='bandwidth',
subclass=self.Bandwidth,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementNest("driver", self,
parent_xpath='/',
tag_name='driver',
subclass=self.Driver,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementNest("filterref", self,
parent_xpath='/',
tag_name='filterref',
subclass=self.Filterref,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLAttribute(property_name="model",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='model',
attribute='type')
accessors.XMLElementNest('address', self, parent_xpath='/',
tag_name='address', subclass=self.Address,
subclass_dargs={'type_name': 'pci',
'virsh_instance': virsh_instance})
accessors.XMLAttribute('virtualport_type', self, parent_xpath='/',
tag_name='virtualport', attribute='type')
# For convenience
Address = librarian.get('address')
def new_bandwidth(self, **dargs):
"""
Return a new interafce banwidth instance from dargs
"""
new_one = self.Bandwidth(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def new_driver(self, **dargs):
"""
Return a new interafce driver instance from dargs
"""
new_one = self.Driver(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def new_iface_address(self, **dargs):
"""
Return a new interface Address instance and set properties from dargs
"""
new_one = self.Address("pci", virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def new_filterref(self, **dargs):
"""
Return a new interafce filterref instance from dargs
"""
new_one = self.Filterref(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
class Bandwidth(base.base.LibvirtXMLBase):
"""
Interface bandwidth xml class.
Properties:
inbound:
dict. Keys: average, peak, floor, burst
outbound:
dict. Keys: average, peak, floor, burst
"""
__slots__ = ("inbound", "outbound")
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLElementDict("inbound", self, parent_xpath="/",
tag_name="inbound")
accessors.XMLElementDict("outbound", self, parent_xpath="/",
tag_name="outbound")
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<bandwidth/>'
class Driver(base.base.LibvirtXMLBase):
"""
Interface Driver xml class.
Properties:
driver:
dict.
host:
dict. Keys: csum, gso, tso4, tso6, ecn, ufo
guest:
dict. Keys: csum, gso, tso4, tso6, ecn, ufo
"""
__slots__ = ("driver_attr", "driver_host", "driver_guest")
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLElementDict("driver_attr", self, parent_xpath="/",
tag_name="driver")
accessors.XMLElementDict("driver_host", self, parent_xpath="/",
tag_name="host")
accessors.XMLElementDict("driver_guest", self, parent_xpath="/",
tag_name="guest")
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<driver/>'
class Filterref(base.base.LibvirtXMLBase):
"""
Interface filterref xml class.
Properties:
name:
string. filter name
parameters:
list. parameters element dict list
"""
__slots__ = ("name", "parameters")
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute(property_name="name",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='filterref',
attribute='filter')
accessors.XMLElementList(property_name='parameters',
libvirtxml=self,
parent_xpath='/',
marshal_from=self.marshal_from_parameter,
marshal_to=self.marshal_to_parameter)
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<filterref/>'
@staticmethod
def marshal_from_parameter(item, index, libvirtxml):
"""Convert a dictionary into a tag + attributes"""
del index # not used
del libvirtxml # not used
if not isinstance(item, dict):
raise xcepts.LibvirtXMLError("Expected a dictionary of parameter "
"attributes, not a %s"
% str(item))
# return copy of dict, not reference
return ('parameter', dict(item))
@staticmethod
def marshal_to_parameter(tag, attr_dict, index, libvirtxml):
"""Convert a tag + attributes into a dictionary"""
del index # not used
del libvirtxml # not used
if tag != 'parameter':
return None # skip this one
return dict(attr_dict) # return copy of dict, not reference
| gpl-2.0 |
Natim/sentry | src/sentry/migrations/0051_auto__del_pendingprojectmember__del_unique_pendingprojectmember_projec.py | 36 | 21164 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'PendingProjectMember', fields ['project', 'email']
db.delete_unique('sentry_pendingprojectmember', ['project_id', 'email'])
# Deleting model 'PendingProjectMember'
db.delete_table('sentry_pendingprojectmember')
# Adding model 'PendingTeamMember'
db.create_table('sentry_pendingteammember', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('team', self.gf('sentry.db.models.fields.FlexibleForeignKey')(related_name='pending_member_set', to=orm['sentry.Team'])),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['PendingTeamMember'])
# Adding unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.create_unique('sentry_pendingteammember', ['team_id', 'email'])
def backwards(self, orm):
# Removing unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.delete_unique('sentry_pendingteammember', ['team_id', 'email'])
# Adding model 'PendingProjectMember'
db.create_table('sentry_pendingprojectmember', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(related_name='pending_member_set', to=orm['sentry.Project'])),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal('sentry', ['PendingProjectMember'])
# Adding unique constraint on 'PendingProjectMember', fields ['project', 'email']
db.create_unique('sentry_pendingprojectmember', ['project_id', 'email'])
# Deleting model 'PendingTeamMember'
db.delete_table('sentry_pendingteammember')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 29, 45, 137609)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 29, 45, 137481)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
haarcuba/testix | test/test_argumentexpectations.py | 1 | 3090 | import hypothesis
import hypothesis.strategies as strategies
import pytest
from testix import fake
from testix import scenario
from testix import testixexception
from testix import argumentexpectations
class TestArgumentExpectations:
@hypothesis.given(A=strategies.integers(),B=strategies.integers())
def test_argument_equals_raises_when_called_with_wrong_arguments(self, A, B):
hypothesis.assume( A != B )
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( A ) >> 'first'
s.some_object( B ) >> 'second'
assert fakeObject( A ) == 'first'
with pytest.raises( testixexception.ExpectationException ):
fakeObject( A )
def test_argument_is_fake_object_with_path( self ):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( argumentexpectations.ArgumentIsFakeObjectWithPath( 'another_fake_object' ) ) >> 'the result'
s.some_object( argumentexpectations.ArgumentIsFakeObjectWithPath( 'yet_another' ) ) >> 'another result'
assert fakeObject(fake.Fake('another_fake_object')) == 'the result'
assert fakeObject(fake.Fake('yet_another')) == 'another result'
def test_FakeObjectExpectation( self ):
fakeObject = fake.Fake('some_object')
fakeArgument = fake.Fake('fake_argument')
with scenario.Scenario() as s:
s.some_object(fake.Fake('fake_argument'))
fakeObject( fakeArgument )
def test_IgnoreArgument( self ):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( 10 ) >> 'first'
s.some_object( argumentexpectations.IgnoreArgument() ) >> 'second'
assert fakeObject( 10 ) == 'first'
assert fakeObject( "this doens't matter" ) == 'second'
def test_IgnoreCallDetails(self):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( 10 ) >> 'first'
s.some_object( argumentexpectations.IgnoreCallDetails() ) >> 'second'
s.another_object(argumentexpectations.IgnoreCallDetails())
assert fakeObject( 10 ) == 'first'
assert fakeObject( "this doens't matter", "this doens'nt either", this='does not matter also', that='neither' ) == 'second'
with pytest.raises( testixexception.ExpectationException ):
fakeObject("this is an unexpected call: verify that IgnoreCallDetails() still leaves the Fake object's path verification intact")
def test_KeywordArguments( self ):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( 10, name = 'Lancelot' ).returns( 'first' )
s.some_object( 11, name = 'Galahad' ).returns( 'second' )
assert fakeObject( 10, name = 'Lancelot' ) == 'first'
with pytest.raises( testixexception.ExpectationException ):
fakeObject( 11, name = 'not Galahad' )
| mit |
kirti3192/spoken-website | cms/admin.py | 2 | 2857 | from django.template.defaultfilters import slugify
from django.contrib import admin
from cms.models import *
from django.conf import settings
from PIL import Image
import glob, os
from cms.forms import *
class SubNavInline(admin.TabularInline):
model = SubNav
extra = 0
class NavAdmin(admin.ModelAdmin):
list_display = ('nav_title', 'permalink', 'position', 'target_new', 'visible', 'created')
inlines = [SubNavInline]
class BlockAdmin(admin.ModelAdmin):
form = AdminBodyForm
list_display = ('title', 'block_location', 'position', 'visible', 'created')
class PageAdmin(admin.ModelAdmin):
form = CmsPageForm
list_display = ('title', 'permalink', 'target_new', 'visible', 'created')
class EventAdmin(admin.ModelAdmin):
form = AdminBodyForm
exclude = ('user',)
list_display = ('user', 'title', 'body', 'event_date', 'source_link', 'created')
def save_model(self, request, obj, form, change):
obj.user = request.user
obj.save()
class NotificationAdmin(admin.ModelAdmin):
exclude = ('user',)
list_display = ('user', 'body', 'start_date', 'expiry_date', 'updated')
def save_model(self, request, obj, form, change):
obj.user = request.user
obj.save()
class NewsTypeAdmin(admin.ModelAdmin):
exclude = ('slug',)
list_display = ('name',)
def save_model(self, request, obj, form, change):
obj.slug = slugify(request.POST['name'])
obj.save()
class NewsAdmin(admin.ModelAdmin):
#form = AdminBodyForm
form = NewsAdditionaFieldAdmin
exclude = ('created_by', 'slug')
list_display = ('title', 'weight','state','picture', 'body', 'url', 'url_title', 'created_by', 'created')
list_filter = ('news_type','state')
def save_model(self, request, obj, form, change):
obj.created_by = request.user
obj.picture = None
obj.slug = slugify(request.POST['title'])
obj.save()
if 'picture' in request.FILES and request.FILES['picture']:
obj.picture = request.FILES['picture']
obj.save()
size = 128, 128
filename = str(obj.picture)
file, ext = os.path.splitext(filename)
if ext != '.pdf' and ext != '':
im = Image.open(obj.picture)
im.thumbnail(size, Image.ANTIALIAS)
ext = ext[1:]
mimeType = ext.upper()
if mimeType == 'JPG':
mimeType = 'JPEG'
im.save(settings.MEDIA_ROOT + "news/" + str(obj.id) + "/" + str(obj.id) + "-thumb." + ext, mimeType)
admin.site.register(Block, BlockAdmin)
admin.site.register(Nav, NavAdmin)
admin.site.register(Page, PageAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Notification, NotificationAdmin)
admin.site.register(NewsType, NewsTypeAdmin)
admin.site.register(News, NewsAdmin)
| gpl-3.0 |
izelnakri/moses | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 270 | 8338 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| mit |
somic/paasta | tests/cli/test_cmds_get_latest_deployment.py | 1 | 1863 | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import MagicMock
from mock import patch
from paasta_tools.cli.cmds import get_latest_deployment
def test_get_latest_deployment(capfd):
mock_args = MagicMock(
service='',
deploy_group='',
soa_dir='',
)
with patch(
'paasta_tools.cli.cmds.get_latest_deployment.get_currently_deployed_sha',
return_value="FAKE_SHA", autospec=True,
), patch(
'paasta_tools.cli.cmds.get_latest_deployment.validate_service_name', autospec=True,
):
assert get_latest_deployment.paasta_get_latest_deployment(mock_args) == 0
assert "FAKE_SHA" in capfd.readouterr()[0]
def test_get_latest_deployment_no_deployment_tag(capfd):
mock_args = MagicMock(
service='fake_service',
deploy_group='fake_deploy_group',
soa_dir='',
)
with patch(
'paasta_tools.cli.cmds.get_latest_deployment.get_currently_deployed_sha',
return_value=None, autospec=True,
), patch(
'paasta_tools.cli.cmds.get_latest_deployment.validate_service_name', autospec=True,
):
assert get_latest_deployment.paasta_get_latest_deployment(mock_args) == 1
assert "A deployment could not be found for fake_deploy_group in fake_service" in \
capfd.readouterr()[1]
| apache-2.0 |
sunyihuan326/DeltaLab | shuwei_fengge/practice_one/model/tt.py | 1 | 3958 | # coding:utf-8
'''
Created on 2017/12/8.
@author: chk01
'''
import scipy.io as scio
# data = scio.loadmat(file)
# from sklearn.model_selection import train_test_split
#
# print(data['X'].shape)
# print(data['Y'].shape)
# X_train, X_test, Y_train, Y_test = train_test_split(data['X'], data['Y'], test_size=0.2)
# print(X_train.shape)
# print(Y_train.shape)
# print(X_test.shape)
# print(Y_test.shape)
import numpy as np
import scipy.io as scio
import tensorflow as tf
from practice_one.model.utils import *
from tensorflow.contrib.factorization import KMeans
from sklearn.ensemble import AdaBoostClassifier
# print(np.e)
# print(-np.log(np.e / (np.e + 8)))
# ZL = tf.Variable([[0, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)
# print(ZL.shape)
# Y = tf.constant([[0, 0, 0, 0, 0, 0, 1, 0, 0]], dtype=tf.float32)
# Y = tf.get_variable(dtype=tf.float32, shape=(1, 2), name='tt',initializer=tf.contrib.layers.xavier_initializer())
# cor_op = tf.argmax(Y, 1)
# pre_op = tf.argmax(ZL, 1)
# cost1 = tf.square(tf.cast(cor_op - pre_op, dtype=tf.float32))
# lost = tf.reduce_mean(
# cost1 + tf.nn.softmax_cross_entropy_with_logits(logits=ZL,
# labels=Y))
# # loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
# train_op = tf.train.GradientDescentOptimizer(0.1).minimize(lost)
# init = tf.global_variables_initializer()
# with tf.Session() as sess:
# sess.run(init)
# for i in range(30):
# sess.run(train_op)
# print(sess.run(lost))
# print(sess.run(tf.reduce_mean(cost1)))
# print(sess.run(tf.argmax(ZL, 1)))
# 1.37195
# 2.37195
# parameters = scio.loadmat('kmeans_parameters.mat')
# X_train, X_test, Y_train, Y_test = load_data("face_1_channel_sense.mat")
# print(X_test.shape)
# num_features = 28
# num_classes = 3
#
# X = tf.placeholder(tf.float32, shape=[None, num_features])
# Y = tf.placeholder(tf.float32, shape=[None, num_classes])
#
# kmeans = KMeans(inputs=X, num_clusters=300,
# distance_metric='cosine',
# use_mini_batch=True)
#
# (all_scores, cluster_idx, scores, cluster_centers_initialized, cluster_centers_var, init_op,
# train_op) = kmeans.training_graph()
# cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple
#
# # Initialize the variables (i.e. assign their default value)
# init_vars = tf.global_variables_initializer()
#
# # Start TensorFlow session
# sess = tf.Session()
# sess.run(init_vars, feed_dict={X: X_test})
# sess.run(init_op, feed_dict={X: X_test})
# cl = sess.run(cluster_idx, feed_dict={X: X_train})
# print("cl",cl)
# print(len(cl))
# parameters = scio.loadmat('kmeans_parameters.mat')
# print("parameters",parameters['labels_map'][0])
# labels_map = tf.convert_to_tensor(parameters['labels_map'][0])
#
# # Evaluation ops
# # Lookup: centroid_id -> label
# cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)
#
# # Test Model
# test_x, test_y = X_test, Y_test
# with sess.as_default():
# cluster_label = cluster_label.eval(feed_dict={X: X_test})
#
# c = 0
# for i in range(len(cluster_label)):
# if abs(cluster_label[i] - np.argmax(Y_train, 1)[i]) > 1:
# c += 1. / len(cluster_label)
# print(c)
# tt = scio.loadmat("tt_cluster_label.mat")
# sense = scio.loadmat("sense_cluster.mat")
# tt = tt["tt"][0]
# se = sense["sense"][0]
# for i in range(len(tt)):
# if tt[i] != se[i]:
# print(i, tt[i], se[i])
# # print('correct_prediction', correct_prediction)
# index = [1, 2, 0, 2, 1, 2]
# indice = [[0, 2, 1, 1, 1], [0, 1, 1, 2, 1]]
# a = tf.one_hot(index, 3, axis=0)
# b = tf.one_hot(indice, 3, axis=1)
# with tf.Session() as sess:
# print(sess.run(a))
# print("b", sess.run(b))
file = "face_1_channel_sense"
X_train, X_test, Y_train, Y_test = load_data(file)
clf = AdaBoostClassifier(n_estimators=100)
Y_train = np.argmax(Y_train, 1)
c = clf.fit(X_train, Y_train)
print(c)
| mit |
huanchenz/STX-h-store | third_party/python/boto/ec2/autoscale/policy.py | 24 | 5549 | # Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.elb.listelement import ListElement
class Alarm(object):
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.alarm_arn = None
def __repr__(self):
return 'Alarm:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'AlarmARN':
self.alarm_arn = value
else:
setattr(self, name, value)
class AdjustmentType(object):
def __init__(self, connection=None):
self.connection = connection
self.adjustment_types = ListElement([])
def __repr__(self):
return 'AdjustmentType:%s' % self.adjustment_types
def startElement(self, name, attrs, connection):
if name == 'AdjustmentType':
return self.adjustment_types
def endElement(self, name, value, connection):
return
class MetricCollectionTypes(object):
class BaseType(object):
arg = ''
def __init__(self, connection):
self.connection = connection
self.val = None
def __repr__(self):
return '%s:%s' % (self.arg, self.val)
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == self.arg:
self.val = value
class Metric(BaseType):
arg = 'Metric'
class Granularity(BaseType):
arg = 'Granularity'
def __init__(self, connection=None):
self.connection = connection
self.metrics = []
self.granularities = []
def __repr__(self):
return 'MetricCollectionTypes:<%s, %s>' % (self.metrics, self.granularities)
def startElement(self, name, attrs, connection):
if name == 'Granularities':
self.granularities = ResultSet([('member', self.Granularity)])
return self.granularities
elif name == 'Metrics':
self.metrics = ResultSet([('member', self.Metric)])
return self.metrics
def endElement(self, name, value, connection):
return
class ScalingPolicy(object):
def __init__(self, connection=None, **kwargs):
"""
Scaling Policy
:type name: str
:param name: Name of scaling policy.
:type adjustment_type: str
:param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`.
:type as_name: str or int
:param as_name: Name or ARN of the Auto Scaling Group.
:type scaling_adjustment: int
:param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`).
:type cooldown: int
:param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends.
"""
self.name = kwargs.get('name', None)
self.adjustment_type = kwargs.get('adjustment_type', None)
self.as_name = kwargs.get('as_name', None)
self.scaling_adjustment = kwargs.get('scaling_adjustment', None)
self.cooldown = kwargs.get('cooldown', None)
self.connection = connection
def __repr__(self):
return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name,
self.as_name,
self.adjustment_type)
def startElement(self, name, attrs, connection):
if name == 'Alarms':
self.alarms = ResultSet([('member', Alarm)])
return self.alarms
def endElement(self, name, value, connection):
if name == 'PolicyName':
self.name = value
elif name == 'AutoScalingGroupName':
self.as_name = value
elif name == 'PolicyARN':
self.policy_arn = value
elif name == 'ScalingAdjustment':
self.scaling_adjustment = int(value)
elif name == 'Cooldown':
self.cooldown = int(value)
elif name == 'AdjustmentType':
self.adjustment_type = value
def delete(self):
return self.connection.delete_policy(self.name, self.as_name)
| gpl-3.0 |
ebar0n/django | django/contrib/postgres/validators.py | 87 | 2675 | from django.core.exceptions import ValidationError
from django.core.validators import (
MaxLengthValidator, MaxValueValidator, MinLengthValidator,
MinValueValidator,
)
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _, ngettext_lazy
class ArrayMaxLengthValidator(MaxLengthValidator):
message = ngettext_lazy(
'List contains %(show_value)d item, it should contain no more than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no more than %(limit_value)d.',
'limit_value')
class ArrayMinLengthValidator(MinLengthValidator):
message = ngettext_lazy(
'List contains %(show_value)d item, it should contain no fewer than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no fewer than %(limit_value)d.',
'limit_value')
@deconstructible
class KeysValidator:
"""A validator designed for HStore to require/restrict keys."""
messages = {
'missing_keys': _('Some keys were missing: %(keys)s'),
'extra_keys': _('Some unknown keys were provided: %(keys)s'),
}
strict = False
def __init__(self, keys, strict=False, messages=None):
self.keys = set(keys)
self.strict = strict
if messages is not None:
self.messages = {**self.messages, **messages}
def __call__(self, value):
keys = set(value)
missing_keys = self.keys - keys
if missing_keys:
raise ValidationError(
self.messages['missing_keys'],
code='missing_keys',
params={'keys': ', '.join(missing_keys)},
)
if self.strict:
extra_keys = keys - self.keys
if extra_keys:
raise ValidationError(
self.messages['extra_keys'],
code='extra_keys',
params={'keys': ', '.join(extra_keys)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.keys == other.keys and
self.messages == other.messages and
self.strict == other.strict
)
class RangeMaxValueValidator(MaxValueValidator):
def compare(self, a, b):
return a.upper is None or a.upper > b
message = _('Ensure that this range is completely less than or equal to %(limit_value)s.')
class RangeMinValueValidator(MinValueValidator):
def compare(self, a, b):
return a.lower is None or a.lower < b
message = _('Ensure that this range is completely greater than or equal to %(limit_value)s.')
| bsd-3-clause |
Hasimir/brython | www/src/Lib/test/test_sched.py | 23 | 6660 | #!/usr/bin/env python
import queue
import sched
import time
import unittest
from test import support
try:
import threading
except ImportError:
threading = None
TIMEOUT = 10
class Timer:
def __init__(self):
self._cond = threading.Condition()
self._time = 0
self._stop = 0
def time(self):
with self._cond:
return self._time
# increase the time but not beyond the established limit
def sleep(self, t):
assert t >= 0
with self._cond:
t += self._time
while self._stop < t:
self._time = self._stop
self._cond.wait()
self._time = t
# advance time limit for user code
def advance(self, t):
assert t >= 0
with self._cond:
self._stop += t
self._cond.notify_all()
class TestCase(unittest.TestCase):
def test_enter(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.5, 0.4, 0.3, 0.2, 0.1]:
z = scheduler.enter(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_enter_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
scheduler.enter(1, 1, fun, (1,))
scheduler.enter(3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
for x in [4, 5, 2]:
z = scheduler.enter(x - 1, 1, fun, (x,))
timer.advance(2)
self.assertEqual(q.get(timeout=TIMEOUT), 2)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 5)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 5)
def test_priority(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for priority in [1, 2, 3, 4, 5]:
z = scheduler.enterabs(0.01, priority, fun, (priority,))
scheduler.run()
self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,))
event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,))
event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,))
event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,))
event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,))
scheduler.cancel(event1)
scheduler.cancel(event5)
scheduler.run()
self.assertEqual(l, [0.02, 0.03, 0.04])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_cancel_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
now = timer.time()
event1 = scheduler.enterabs(now + 1, 1, fun, (1,))
event2 = scheduler.enterabs(now + 2, 1, fun, (2,))
event4 = scheduler.enterabs(now + 4, 1, fun, (4,))
event5 = scheduler.enterabs(now + 5, 1, fun, (5,))
event3 = scheduler.enterabs(now + 3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
scheduler.cancel(event2)
scheduler.cancel(event5)
timer.advance(1)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 4)
def test_empty(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
self.assertTrue(scheduler.empty())
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
self.assertFalse(scheduler.empty())
scheduler.run()
self.assertTrue(scheduler.empty())
def test_queue(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
e5 = scheduler.enterabs(now + 0.05, 1, fun)
e1 = scheduler.enterabs(now + 0.01, 1, fun)
e2 = scheduler.enterabs(now + 0.02, 1, fun)
e4 = scheduler.enterabs(now + 0.04, 1, fun)
e3 = scheduler.enterabs(now + 0.03, 1, fun)
# queue property is supposed to return an order list of
# upcoming events
self.assertEqual(scheduler.queue, [e1, e2, e3, e4, e5])
def test_args_kwargs(self):
flag = []
def fun(*a, **b):
flag.append(None)
self.assertEqual(a, (1,2,3))
self.assertEqual(b, {"foo":1})
scheduler = sched.scheduler(time.time, time.sleep)
z = scheduler.enterabs(0.01, 1, fun, argument=(1,2,3), kwargs={"foo":1})
scheduler.run()
self.assertEqual(flag, [None])
def test_run_non_blocking(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [10, 9, 8, 7, 6]:
scheduler.enter(x, 1, fun, (x,))
scheduler.run(blocking=False)
self.assertEqual(l, [])
def test_main():
support.run_unittest(TestCase)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
t794104/ansible | test/units/modules/network/junos/test_junos_command.py | 68 | 6199 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from lxml.etree import fromstring
except ImportError:
from xml.etree.ElementTree import fromstring
from units.compat.mock import patch
from ansible.modules.network.junos import junos_command
from units.modules.utils import set_module_args
from .junos_module import TestJunosModule, load_fixture
RPC_CLI_MAP = {
'get-software-information': 'show version'
}
class TestJunosCommandModule(TestJunosModule):
module = junos_command
def setUp(self):
super(TestJunosCommandModule, self).setUp()
self.mock_conn = patch('ansible.module_utils.network.junos.junos.Connection')
self.conn = self.mock_conn.start()
self.mock_netconf = patch('ansible.module_utils.network.junos.junos.NetconfConnection')
self.netconf_conn = self.mock_netconf.start()
self.mock_exec_rpc = patch('ansible.modules.network.junos.junos_command.exec_rpc')
self.exec_rpc = self.mock_exec_rpc.start()
self.mock_netconf_rpc = patch('ansible.module_utils.network.common.netconf.NetconfConnection')
self.netconf_rpc = self.mock_netconf_rpc.start()
self.mock_get_connection = patch('ansible.modules.network.junos.junos_command.get_connection')
self.get_connection = self.mock_get_connection.start()
self.mock_get_capabilities = patch('ansible.modules.network.junos.junos_command.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'netconf'}
def tearDown(self):
super(TestJunosCommandModule, self).tearDown()
self.mock_conn.stop()
self.mock_netconf.stop()
self.mock_get_capabilities.stop()
self.mock_netconf_rpc.stop()
self.mock_exec_rpc.stop()
self.mock_get_connection.stop()
def load_fixtures(self, commands=None, format='text', changed=False):
def load_from_file(*args, **kwargs):
element = fromstring(args[1])
if element.text:
path = str(element.text)
else:
path = RPC_CLI_MAP[str(element.tag)]
filename = path.replace(' ', '_')
filename = '%s_%s.txt' % (filename, format)
return load_fixture(filename)
self.exec_rpc.side_effect = load_from_file
def test_junos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_wait_for(self):
wait_for = 'result[0] contains "Junos:"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_junos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.exec_rpc.call_count, 10)
def test_junos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.exec_rpc.call_count, 2)
def test_junos_command_match_any(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_junos_command_match_all(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "JUNOS Software Release"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_junos_command_match_all_failure(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_junos_command_simple_json(self):
set_module_args(dict(commands=['show version'], display='json'))
result = self.execute_module(format='json')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue("software-information" in result['stdout'][0])
def test_junos_command_simple_rpc_text(self):
set_module_args(dict(rpcs=['get-software-information'], display='text'))
result = self.execute_module(format='text')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_simple_rpc_json(self):
set_module_args(dict(rpcs=['get-software-information'], display='json'))
result = self.execute_module(format='json')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue("software-information" in result['stdout'][0])
| gpl-3.0 |
laayis/yowsup | yowsup/demos/echoclient/layer.py | 60 | 1646 | from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
class EchoLayer(YowInterfaceLayer):
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
if messageProtocolEntity.getType() == 'text':
self.onTextMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == 'media':
self.onMediaMessage(messageProtocolEntity)
self.toLower(messageProtocolEntity.forward(messageProtocolEntity.getFrom()))
self.toLower(messageProtocolEntity.ack())
self.toLower(messageProtocolEntity.ack(True))
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
self.toLower(entity.ack())
def onTextMessage(self,messageProtocolEntity):
# just print info
print("Echoing %s to %s" % (messageProtocolEntity.getBody(), messageProtocolEntity.getFrom(False)))
def onMediaMessage(self, messageProtocolEntity):
# just print info
if messageProtocolEntity.getMediaType() == "image":
print("Echoing image %s to %s" % (messageProtocolEntity.url, messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "location":
print("Echoing location (%s, %s) to %s" % (messageProtocolEntity.getLatitude(), messageProtocolEntity.getLongitude(), messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "vcard":
print("Echoing vcard (%s, %s) to %s" % (messageProtocolEntity.getName(), messageProtocolEntity.getCardData(), messageProtocolEntity.getFrom(False)))
| gpl-3.0 |
dgarros/ansible | lib/ansible/modules/cloud/cloudstack/cs_portforward.py | 51 | 14301 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
required: false
default: null
state:
description:
- State of the port forwarding rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
protocol:
description:
- Protocol of the port forwarding rule.
required: false
default: 'tcp'
choices: [ 'tcp', 'udp' ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
required: false
default: null
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
required: false
default: null
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
required: false
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
required: false
default: false
network:
description:
- Name of the network.
required: false
default: null
version_added: "2.3"
vpc:
description:
- Name of the VPC.
required: false
default: null
version_added: "2.3"
domain:
description:
- Domain the C(vm) is related to.
required: false
default: null
account:
description:
- Account the C(vm) is related to.
required: false
default: null
project:
description:
- Name of the project the C(vm) is located in.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# 1.2.3.4:80 -> web01:8080
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
# forward SSH and open firewall
- local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
# forward DNS traffic, but do not open firewall
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
# remove ssh port forwarding
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
vpc:
description: Name of the VPC.
returned: success
type: string
sample: my_vpc
network:
description: Name of the network.
returned: success
type: string
sample: dmz
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
public_end_port = self.get_or_fallback('public_end_port', 'public_port')
private_port = self.module.params.get('private_port')
private_end_port = self.get_or_fallback('private_end_port', 'private_port')
args = {}
args['ipaddressid'] = self.get_ip_address(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
portforwarding_rules = self.cs.listPortForwardingRules(**args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule=portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['networkid'] = self.get_network(key='id')
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['networkid'] = self.get_network(key='id')
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.cs.updatePortForwardingRule(**args)
self.absent_portforwarding_rule()
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {}
args['id'] = portforwarding_rule['id']
if not self.module.check_mode:
res = self.cs.deletePortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
network_name = self.get_network(key='name')
if network_name:
self.result['network'] = network_name
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.result['vpc'] = vpc_name
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
protocol= dict(choices=['tcp', 'udp'], default='tcp'),
public_port = dict(type='int', required=True),
public_end_port = dict(type='int', default=None),
private_port = dict(type='int', required=True),
private_end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
open_firewall = dict(type='bool', default=False),
vm_guest_ip = dict(default=None),
vm = dict(default=None),
vpc = dict(default=None),
network = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag'], default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Sarah-Alsinan/muypicky | lib/python3.6/site-packages/pip/_vendor/pkg_resources/__init__.py | 320 | 103230 | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
import itertools
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*' + part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (None,)
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version == self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.6 and 3.2 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(os.listdir(path_item))
for entry in path_item_entries:
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
if len(os.listdir(fullpath)) == 0:
# Empty egg directory, skip.
continue
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs = []
elif not evaluate_marker(marker):
reqs = []
extra = safe_extra(extra) or None
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object):
pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
dist = None # ensure dist is defined for del dist below
for dist in working_set:
dist.activate(replace=False)
del dist
add_activation_listener(lambda dist: dist.activate(replace=True), existing=False)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| mit |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/Django-1.6.10/tests/one_to_one_regress/tests.py | 107 | 8452 | from __future__ import absolute_import
from django.test import TestCase
from .models import Place, Restaurant, Bar, Favorites, Target, UndergroundBar
class OneToOneRegressionTests(TestCase):
def setUp(self):
self.p1 = Place(name='Demon Dogs', address='944 W. Fullerton')
self.p1.save()
self.r1 = Restaurant(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.r1.save()
self.b1 = Bar(place=self.p1, serves_cocktails=False)
self.b1.save()
def test_reverse_relationship_cache_cascade(self):
"""
Regression test for #9023: accessing the reverse relationship shouldn't
result in a cascading delete().
"""
bar = UndergroundBar.objects.create(place=self.p1, serves_cocktails=False)
# The bug in #9023: if you access the one-to-one relation *before*
# setting to None and deleting, the cascade happens anyway.
self.p1.undergroundbar
bar.place.name='foo'
bar.place = None
bar.save()
self.p1.delete()
self.assertEqual(Place.objects.all().count(), 0)
self.assertEqual(UndergroundBar.objects.all().count(), 1)
def test_create_models_m2m(self):
"""
Regression test for #1064 and #1506
Check that we create models via the m2m relation if the remote model
has a OneToOneField.
"""
f = Favorites(name = 'Fred')
f.save()
f.restaurants = [self.r1]
self.assertQuerysetEqual(
f.restaurants.all(),
['<Restaurant: Demon Dogs the restaurant>']
)
def test_reverse_object_cache(self):
"""
Regression test for #7173
Check that the name of the cache for the reverse object is correct.
"""
self.assertEqual(self.p1.restaurant, self.r1)
self.assertEqual(self.p1.bar, self.b1)
def test_related_object_cache(self):
""" Regression test for #6886 (the related-object cache) """
# Look up the objects again so that we get "fresh" objects
p = Place.objects.get(name="Demon Dogs")
r = p.restaurant
# Accessing the related object again returns the exactly same object
self.assertTrue(p.restaurant is r)
# But if we kill the cache, we get a new object
del p._restaurant_cache
self.assertFalse(p.restaurant is r)
# Reassigning the Restaurant object results in an immediate cache update
# We can't use a new Restaurant because that'll violate one-to-one, but
# with a new *instance* the is test below will fail if #6886 regresses.
r2 = Restaurant.objects.get(pk=r.pk)
p.restaurant = r2
self.assertTrue(p.restaurant is r2)
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
ug_bar.place = None
self.assertTrue(ug_bar.place is None)
# Assigning None fails: Place.restaurant is null=False
self.assertRaises(ValueError, setattr, p, 'restaurant', None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, p, 'restaurant', p)
# Creation using keyword argument should cache the related object.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place=p)
self.assertTrue(r.place is p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Place()
r = Restaurant(place=p)
self.assertTrue(r.place is p)
# Creation using attname keyword argument and an id will cause the related
# object to be fetched.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place_id=p.id)
self.assertFalse(r.place is p)
self.assertEqual(r.place, p)
def test_filter_one_to_one_relations(self):
"""
Regression test for #9968
filtering reverse one-to-one relations with primary_key=True was
misbehaving. We test both (primary_key=True & False) cases here to
prevent any reappearance of the problem.
"""
t = Target.objects.create()
self.assertQuerysetEqual(
Target.objects.filter(pointer=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(pointer=None),
[]
)
self.assertQuerysetEqual(
Target.objects.filter(pointer2=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(pointer2=None),
[]
)
def test_reverse_object_does_not_exist_cache(self):
"""
Regression for #13839 and #17439.
DoesNotExist on a reverse one-to-one relation is cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
with self.assertNumQueries(1):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
with self.assertNumQueries(0):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
def test_reverse_object_cached_when_related_is_accessed(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is cached
when the origin is accessed through the reverse relation.
"""
# Use a fresh object without caches
r = Restaurant.objects.get(pk=self.r1.pk)
p = r.place
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, r)
def test_related_object_cached_when_reverse_is_accessed(self):
"""
Regression for #13839 and #17439.
The origin of a one-to-one relation is cached
when the target is accessed through the reverse relation.
"""
# Use a fresh object without caches
p = Place.objects.get(pk=self.p1.pk)
r = p.restaurant
with self.assertNumQueries(0):
self.assertEqual(r.place, p)
def test_reverse_object_cached_when_related_is_set(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
self.r1.place = p
self.r1.save()
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, self.r1)
def test_reverse_object_cached_when_related_is_unset(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
b = UndergroundBar(place=self.p1, serves_cocktails=True)
b.save()
with self.assertNumQueries(0):
self.assertEqual(self.p1.undergroundbar, b)
b.place = None
b.save()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
self.p1.undergroundbar
def test_get_reverse_on_unsaved_object(self):
"""
Regression for #18153 and #19089.
Accessing the reverse relation on an unsaved object
always raises an exception.
"""
p = Place()
# When there's no instance of the origin of the one-to-one
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there's one instance of the origin
# (p.undergroundbar used to return that instance)
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there are several instances of the origin
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
def test_set_reverse_on_unsaved_object(self):
"""
Writing to the reverse relation on an unsaved object
is impossible too.
"""
p = Place()
b = UndergroundBar.objects.create()
with self.assertNumQueries(0):
with self.assertRaises(ValueError):
p.undergroundbar = b
| apache-2.0 |
fedral/ITK | Wrapping/Generators/Python/Tests/BinaryErodeImageFilter.py | 19 | 1673 | #!/usr/bin/env python
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Test BinaryDilateImageFilter
#
import sys
import itk
itk.auto_progress(2)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(radiusValue)
ErodeFilterType = itk.BinaryErodeImageFilter[
ImageType, ImageType, StructuringElementType]
erodeFilter = ErodeFilterType.New()
erodeFilter.SetInput(reader.GetOutput())
erodeFilter.SetKernel(structuringElement)
erodeFilter.SetErodeValue(200)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(erodeFilter.GetOutput())
writer.Update()
| apache-2.0 |
2014c2g12/c2g12 | wsgi/exts/w2/static/Brython2.0.0-20140209-164925/Lib/reprlib.py | 923 | 5110 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| gpl-2.0 |
timcera/mettoolbox | mettoolbox/pet.py | 1 | 10467 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import warnings
from typing import Optional, Union
import numpy as np
import pandas as pd
import typic
from solarpy import declination
from tstoolbox import tsutils
from . import meteolib, utils
warnings.filterwarnings("ignore")
def _columns(tsd, req_column_list=[], optional_column_list=[]):
if None in req_column_list:
raise ValueError(
tsutils.error_wrapper(
"""
You need to supply the column (name or number, data column numbering
starts at 1) for {0} time-series.
Instead you gave {1}""".format(
len(req_column_list), req_column_list
)
)
)
collect = []
for loopvar in req_column_list + optional_column_list:
try:
nloopvar = int(loopvar) - 1
except TypeError:
nloopvar = loopvar
if nloopvar is None:
collect.append(None)
else:
collect.append(tsd.ix[:, nloopvar])
return collect
def _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
):
columns, column_names = utils._check_temperature_cols(
temp_min_col=temp_min_col,
temp_max_col=temp_max_col,
temp_mean_col=temp_mean_col,
temp_min_required=temp_min_required,
temp_max_required=temp_max_required,
)
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
)
if source_units is None:
# If "source_units" keyword is None must have source_units in column name.
source_units = []
for units in tsd.columns:
words = units.split(":")
if len(words) >= 2:
source_units.append(words[1])
else:
raise ValueError(
tsutils.error_wrapper(
"""
If "source_units" are not supplied as the second ":" delimited field in the column name
they must be supplied with the "source_units" keyword. """
)
)
else:
source_units = tsutils.make_list(source_units)
if len(source_units) != len(tsd.columns):
raise ValueError(
tsutils.error_wrapper(
"""
The number of "source_units" terms must match the number of temperature columns.
"""
)
)
interim_target_units = ["degC"] * len(tsd.columns)
tsd = tsutils.common_kwds(
tsd,
source_units=source_units,
target_units=interim_target_units,
)
tsd.columns = column_names
tsd = utils._validate_temperatures(tsd, temp_min_col, temp_max_col)
return tsd
def et0_pm(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
source_units=None,
target_units=None,
print_input=False,
tablefmt="csv",
avp=None,
avp_from_tdew=None,
avp_from_twet_tdry=None,
avp_from_rhmin_rh_max=None,
avp_from_rhmax=None,
avp_from_rhmean=None,
avp_from_tmin=None,
lat=None,
):
"""Penman-Monteith evaporation."""
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
return tsd
@typic.constrained(ge=-90, le=90)
class FloatLatitude(float):
"""-90 <= float <= 90"""
@typic.al
def hamon(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
k: float = 1,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""hamon"""
temp_min_required = True
temp_max_required = True
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
decl = [declination(i) for i in tsd.index.to_pydatetime()]
w = np.arccos(-np.tan(decl) * np.tan(lat))
es = meteolib.es_calc(tsd.tmean)
N = 24 * w / np.pi
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_hamon:mm"])
pe["pet_hamon:mm"] = k * 29.8 * N * es / (273.3 + tsd.tmean)
pe.loc[tsd.tmean <= 0, "pet_hamon:mm"] = 0.0
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def hargreaves(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units="mm",
print_input=False,
):
"""hargreaves"""
temp_min_required = True
temp_max_required = True
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
tsdiff = tsd.tmax - tsd.tmin
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_hargreaves:mm"])
pe["pet_hargreaves:mm"] = (
0.408
* 0.0023
* newra.ra.values
* np.abs(tsdiff.values) ** 0.5
* (tsd.tmean.values + 17.8)
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def oudin_form(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
k1=100,
k2=5,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""oudin form"""
temp_min_required = False
temp_max_required = False
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_oudin:mm"])
gamma = 2.45 # the latent heat flux (MJ kg−1)
rho = 1000.0 # density of water (kg m-3)
pe.loc[tsd.tmean > k2, "pet_oudin:mm"] = (
newra.ra / (gamma * rho) * (tsd.tmean + k2) / k1 * 1000
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def allen(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Allen"""
temp_min_required = False
temp_max_required = False
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_allen:mm"])
pe["pet_allen:mm"] = (
0.408 * 0.0029 * newra.ra * (tsd.tmax - tsd.tmin) ** 0.4 * (tsd.tmean + 20)
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
def reference():
"""reference penman-monteith"""
print("reference")
def potential():
"""potential"""
print("potential")
| bsd-3-clause |
Teamxrtc/webrtc-streaming-node | third_party/depot_tools/third_party/logilab/common/optparser.py | 92 | 3386 | # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extend OptionParser with commands.
Example:
>>> parser = OptionParser()
>>> parser.usage = '%prog COMMAND [options] <arg> ...'
>>> parser.add_command('build', 'mymod.build')
>>> parser.add_command('clean', run_clean, add_opt_clean)
>>> run, options, args = parser.parse_command(sys.argv[1:])
>>> return run(options, args[1:])
With mymod.build that defines two functions run and add_options
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
from warnings import warn
warn('lgc.optparser module is deprecated, use lgc.clcommands instead', DeprecationWarning,
stacklevel=2)
import sys
import optparse
class OptionParser(optparse.OptionParser):
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
self._commands = {}
self.min_args, self.max_args = 0, 1
def add_command(self, name, mod_or_funcs, help=''):
"""name of the command, name of module or tuple of functions
(run, add_options)
"""
assert isinstance(mod_or_funcs, str) or isinstance(mod_or_funcs, tuple), \
"mod_or_funcs has to be a module name or a tuple of functions"
self._commands[name] = (mod_or_funcs, help)
def print_main_help(self):
optparse.OptionParser.print_help(self)
print('\ncommands:')
for cmdname, (_, help) in self._commands.items():
print('% 10s - %s' % (cmdname, help))
def parse_command(self, args):
if len(args) == 0:
self.print_main_help()
sys.exit(1)
cmd = args[0]
args = args[1:]
if cmd not in self._commands:
if cmd in ('-h', '--help'):
self.print_main_help()
sys.exit(0)
elif self.version is not None and cmd == "--version":
self.print_version()
sys.exit(0)
self.error('unknown command')
self.prog = '%s %s' % (self.prog, cmd)
mod_or_f, help = self._commands[cmd]
# optparse inserts self.description between usage and options help
self.description = help
if isinstance(mod_or_f, str):
exec('from %s import run, add_options' % mod_or_f)
else:
run, add_options = mod_or_f
add_options(self)
(options, args) = self.parse_args(args)
if not (self.min_args <= len(args) <= self.max_args):
self.error('incorrect number of arguments')
return run, options, args
| mit |
bob-the-hamster/commandergenius | project/jni/python/src/Lib/bsddb/dbrecio.py | 203 | 5308 |
"""
File-like objects that read from or write to a bsddb record.
This implements (nearly) all stdio methods.
f = DBRecIO(db, key, txn=None)
f.close() # explicitly release resources held
flag = f.isatty() # always false
pos = f.tell() # get current position
f.seek(pos) # set current position
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
buf = f.read() # read until EOF
buf = f.read(n) # read up to n bytes
f.truncate([size]) # truncate file at to at most size (default: current pos)
f.write(buf) # write at current position
f.writelines(list) # for line in list: f.write(line)
Notes:
- fileno() is left unimplemented so that code which uses it triggers
an exception early.
- There's a simple test set (see end of this file) - not yet updated
for DBRecIO.
- readline() is not implemented yet.
From:
Itamar Shtull-Trauring <[email protected]>
"""
import errno
import string
class DBRecIO:
def __init__(self, db, key, txn=None):
self.db = db
self.key = key
self.txn = txn
self.len = None
self.pos = 0
self.closed = 0
self.softspace = 0
def close(self):
if not self.closed:
self.closed = 1
del self.db, self.txn
def isatty(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return 0
def seek(self, pos, mode = 0):
if self.closed:
raise ValueError, "I/O operation on closed file"
if mode == 1:
pos = pos + self.pos
elif mode == 2:
pos = pos + self.len
self.pos = max(0, pos)
def tell(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return self.pos
def read(self, n = -1):
if self.closed:
raise ValueError, "I/O operation on closed file"
if n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
dlen = newpos - self.pos
r = self.db.get(self.key, txn=self.txn, dlen=dlen, doff=self.pos)
self.pos = newpos
return r
__fixme = """
def readline(self, length=None):
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.buflist:
self.buf = self.buf + string.joinfields(self.buflist, '')
self.buflist = []
i = string.find(self.buf, '\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
"""
def truncate(self, size=None):
if self.closed:
raise ValueError, "I/O operation on closed file"
if size is None:
size = self.pos
elif size < 0:
raise IOError(errno.EINVAL,
"Negative size not allowed")
elif size < self.pos:
self.pos = size
self.db.put(self.key, "", txn=self.txn, dlen=self.len-size, doff=size)
def write(self, s):
if self.closed:
raise ValueError, "I/O operation on closed file"
if not s: return
if self.pos > self.len:
self.buflist.append('\0'*(self.pos - self.len))
self.len = self.pos
newpos = self.pos + len(s)
self.db.put(self.key, s, txn=self.txn, dlen=len(s), doff=self.pos)
self.pos = newpos
def writelines(self, list):
self.write(string.joinfields(list, ''))
def flush(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
"""
# A little test suite
def _test():
import sys
if sys.argv[1:]:
file = sys.argv[1]
else:
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.write(line)
f.writelines(lines[-2:])
if f.getvalue() != text:
raise RuntimeError, 'write failed'
length = f.tell()
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line =', repr(f.readline())
here = f.tell()
line = f.readline()
print 'Second line =', repr(line)
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
raise RuntimeError, 'bad result after seek back'
f.seek(len(line2), 1)
list = f.readlines()
line = list[-1]
f.seek(f.tell() - len(line))
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.close()
if __name__ == '__main__':
_test()
"""
| lgpl-2.1 |
eckucukoglu/arm-linux-gnueabihf | arm-linux-gnueabihf/libc/usr/lib/python2.7/unittest/test/test_break.py | 105 | 9641 | import gc
import os
import sys
import signal
import weakref
from cStringIO import StringIO
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
int_handler = None
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
if self.int_handler is not None:
signal.signal(signal.SIGINT, self.int_handler)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakDefaultIntHandler(TestBreak):
int_handler = signal.default_int_handler
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakSignalIgnored(TestBreak):
int_handler = signal.SIG_IGN
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakSignalDefault(TestBreak):
int_handler = signal.SIG_DFL
| gpl-2.0 |
GaryBrittain/DB2S3 | process.py | 1 | 3210 | import dropbox
import sys
from sqlsync import *
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import os
from pushover import message
import json
if check_lock() == 1:
print 'Database is locked or unreachable, quitting...'
sys.exit()
conn = S3Connection('', '')
pb = conn.get_bucket('')
access_token = 'YOUR DROPBOX APP'
client = dropbox.client.DropboxClient(access_token)
curr_cursor_file = open("cursor.txt", "r")
curr_cursor = curr_cursor_file.read()
curr_cursor_file.close()
next_cursor = client.delta(curr_cursor, '/Camera Uploads')
curr_cursor_file = open("cursor.txt", "w")
curr_cursor_file.write(next_cursor['cursor'])
curr_cursor_file.close()
new_files = 0
if len(next_cursor['entries']) > 0:
for entry in next_cursor['entries']:
if entry[1] != None:
cur_path = entry[0]
cur_file = cur_path.rsplit("/",1)[1]
print 'processing file ['+str(new_files+1)+']: ' + cur_file
post_file(cur_path, cur_file)
new_files += 1
else:
print entry[0] + " has been removed."
with open("errors.txt", "a") as err:
err.write('File removed from dropbox: '+str(entry[0])+"\n")
err.close()
else:
print "No files have changed."
uploaded = 0
failed = 0
path = next_file_to_process()
workload = len(path)
processed = 0
for i in path:
if check_lock() == 1:
print 'Process locked by database, terminating...'
message('Process locked by database, terminating...')
break
processed += 1
cPath = i["PATH"]
cFile = i["FILENAME"]
print ' '
print 'Processing ' + str("{:,}".format(processed)) + ' of ' + str("{:,}".format(workload)) + ': ' + cPath
meta = client.metadata(cPath)
bytes = meta['bytes']
print meta['size']
#100MB chunks
chunk_size = 104857600
chunk_loops = int(bytes / chunk_size) + 1
current = 0
chunk_loop = 1
out = open(cFile, 'wb')
try:
while (bytes > current):
print 'Downloading chunk %s of %s' % (chunk_loop, chunk_loops)
chunk_loop += 1
f = client.get_file(cPath, rev=None, start=current, length=chunk_size)
out.write(f.read())
current += chunk_size
except:
failed += 1
print 'Error downloading ' + cPath
with open("errors.txt", "a") as err:
err.write('Could not download from dropbox file: '+str(cPath)+"\n")
err.close()
continue
print 'Downloaded'
out.close()
filesize = os.path.getsize(cFile)
if bytes != filesize:
print 'Downloaded file corrupted'
failed += 1
continue
k = Key(pb)
k.name = cPath
try:
k.set_contents_from_filename(cFile, encrypt_key=True)
os.remove(cFile)
print 'Uploaded'
s3_uploaded_confirm(cPath, meta['size'], meta['bytes'], meta['rev'], meta['revision'], meta['mime_type'], meta['modified'], meta['client_mtime'])
uploaded = uploaded + 1
except:
print 'Error uploading ' + cPath
try:
k.name='/db2s3/cursor.txt'
k.set_contents_from_filename('cursor.txt', encrypt_key=True)
except:
print 'could not copy cursor key to S3'
print '******************************'
summary = """%s new files found
%s files uploaded
%s failures - check errors.txt for info"""%(new_files,uploaded,failed)
message(summary)
print 'Finished!'
print summary
| mit |
yongtang/tensorflow | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver.py | 14 | 6847 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for TF_CONFIG Environment Variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.util.tf_export import tf_export
_TF_CONFIG_ENV = 'TF_CONFIG'
_SESSION_MASTER_KEY = 'session_master'
_RPC_LAYER_KEY = 'rpc_layer'
_TASK_KEY = 'task'
def format_master_url(master, rpc_layer=None):
if rpc_layer:
return '%s://%s' % (rpc_layer, master)
else:
return master
def _load_tf_config():
return json.loads(os.environ.get(_TF_CONFIG_ENV, '{}'))
def _get_value_in_tfconfig(key, default=None):
tf_config = _load_tf_config()
return tf_config[key] if key in tf_config else default
@tf_export('distribute.cluster_resolver.TFConfigClusterResolver')
class TFConfigClusterResolver(ClusterResolver):
"""Implementation of a ClusterResolver which reads the TF_CONFIG EnvVar.
This is an implementation of cluster resolvers when using TF_CONFIG to set
information about the cluster. The cluster spec returned will be
initialized from the TF_CONFIG environment variable.
An example to set TF_CONFIG is:
```Python
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ["localhost:12345", "localhost:23456"]
},
'task': {'type': 'worker', 'index': 0}
})
```
However, sometimes the container orchestration framework will set TF_CONFIG
for you. In this case, you can just create an instance without passing in any
arguments. You can find an example here to let Kuburnetes set TF_CONFIG for
you: https://github.com/tensorflow/ecosystem/tree/master/kubernetes. Then you
can use it with `tf.distribute.Strategy` as:
```Python
# `TFConfigClusterResolver` is already the default one in the following
# strategy.
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=TFConfigClusterResolver())
```
"""
def __init__(self,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
"""Creates a new TFConfigClusterResolver.
Args:
task_type: (String, optional) Overrides the task type specified in the
TF_CONFIG environment variable.
task_id: (Integer, optional) Overrides the task index specified in the
TF_CONFIG environment variable.
rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.
environment: (String, optional) Overrides the environment TensorFlow
operates in.
"""
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._environment = environment
@property
def task_type(self):
if self._task_type is None:
task_info = _get_value_in_tfconfig(_TASK_KEY, {})
return str(task_info['type']) if 'type' in task_info else None
else:
return str(self._task_type)
@property
def task_id(self):
if self._task_id is None:
task_info = _get_value_in_tfconfig(_TASK_KEY, {})
return int(task_info['index']) if 'index' in task_info else None
else:
return int(self._task_id)
@task_type.setter
def task_type(self, task_type):
self._task_type = task_type
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def environment(self):
return self._environment
@property
def rpc_layer(self):
if self._rpc_layer is None:
return _get_value_in_tfconfig(_RPC_LAYER_KEY)
else:
return self._rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
task_type = self.task_type if task_type is None else task_type
task_id = self.task_id if task_id is None else task_id
return super(TFConfigClusterResolver, self).num_accelerators(
task_type, task_id, config_proto)
def cluster_spec(self):
"""Returns a ClusterSpec based on the TF_CONFIG environment variable.
Returns:
A ClusterSpec with information from the TF_CONFIG environment variable.
"""
tf_config = _load_tf_config()
if 'cluster' not in tf_config:
return ClusterSpec({})
return ClusterSpec(tf_config['cluster'])
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a TensorFlow session.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (String, optional) Overrides and sets the task_type of the
master.
task_id: (Integer, optional) Overrides and sets the task id of the
master.
rpc_layer: (String, optional) Overrides and sets the protocol over which
TensorFlow nodes communicate with each other.
Returns:
The address of the master.
Raises:
RuntimeError: If the task_type or task_id is not specified and the
`TF_CONFIG` environment variable does not contain a task section.
"""
# If `session_master` is set, just use that.
session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY)
if session_master is not None:
return session_master
# Return an empty string if we are the only job in the ClusterSpec.
cluster_spec = self.cluster_spec()
if (not cluster_spec.jobs or
(len(cluster_spec.jobs) == 1 and
len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1)):
return ''
# We try to auto-detect the task type and id, but uses the user-supplied one
# where available
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer
return format_master_url(cluster_spec.task_address(task_type, task_id),
rpc_layer)
| apache-2.0 |
saydulk/horizon | openstack_dashboard/usage/views.py | 32 | 4722 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import base
class UsageView(tables.DataTableView):
usage_class = None
show_terminated = True
csv_template_name = None
page_title = _("Overview")
def __init__(self, *args, **kwargs):
super(UsageView, self).__init__(*args, **kwargs)
if not issubclass(self.usage_class, base.BaseUsage):
raise AttributeError("You must specify a usage_class attribute "
"which is a subclass of BaseUsage.")
def get_template_names(self):
if self.request.GET.get('format', 'html') == 'csv':
return (self.csv_template_name or
".".join((self.template_name.rsplit('.', 1)[0], 'csv')))
return self.template_name
def get_content_type(self):
if self.request.GET.get('format', 'html') == 'csv':
return "text/csv"
return "text/html"
def get_data(self):
try:
project_id = self.kwargs.get('project_id',
self.request.user.tenant_id)
self.usage = self.usage_class(self.request, project_id)
self.usage.summarize(*self.usage.get_date_range())
self.usage.get_limits()
self.kwargs['usage'] = self.usage
return self.usage.usage_list
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve usage information.'))
return []
def get_context_data(self, **kwargs):
context = super(UsageView, self).get_context_data(**kwargs)
context['table'].kwargs['usage'] = self.usage
context['form'] = self.usage.form
context['usage'] = self.usage
context['charts'] = []
# (Used key, Max key, Human Readable Name, text to display when
# describing the quota by default it is 'Used')
types = [("totalInstancesUsed", "maxTotalInstances", _("Instances")),
("totalCoresUsed", "maxTotalCores", _("VCPUs")),
("totalRAMUsed", "maxTotalRAMSize", _("RAM")),
("totalFloatingIpsUsed", "maxTotalFloatingIps",
"Floating IPs", _("Allocated")),
("totalSecurityGroupsUsed", "maxSecurityGroups",
_("Security Groups"))]
# Check for volume usage
if 'totalVolumesUsed' in self.usage.limits and self.usage.limits[
'totalVolumesUsed'] >= 0:
types.append(("totalVolumesUsed", "maxTotalVolumes",
_("Volumes")))
types.append(("totalGigabytesUsed", "maxTotalVolumeGigabytes",
_("Volume Storage")))
for t in types:
if t[0] in self.usage.limits and t[1] in self.usage.limits:
text = False
if len(t) > 3:
text = t[3]
context['charts'].append({
'name': t[2],
'used': self.usage.limits[t[0]],
'max': self.usage.limits[t[1]],
'text': text
})
try:
context['simple_tenant_usage_enabled'] = \
api.nova.extension_supported('SimpleTenantUsage', self.request)
except Exception:
context['simple_tenant_usage_enabled'] = True
return context
def render_to_response(self, context, **response_kwargs):
if self.request.GET.get('format', 'html') == 'csv':
render_class = self.csv_response_class
response_kwargs.setdefault("filename", "usage.csv")
else:
render_class = self.response_class
context = self.render_context_with_title(context)
resp = render_class(request=self.request,
template=self.get_template_names(),
context=context,
content_type=self.get_content_type(),
**response_kwargs)
return resp
| apache-2.0 |
krahman/BuildingMachineLearningSystemsWithPython | ch04/build_lda.py | 1 | 2472 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
try:
import nltk.corpus
except ImportError:
print("nltk not found")
print("please install it")
raise
from scipy.spatial import distance
import numpy as np
import string
from gensim import corpora, models, similarities
import sklearn.datasets
import nltk.stem
from collections import defaultdict
english_stemmer = nltk.stem.SnowballStemmer('english')
stopwords = set(nltk.corpus.stopwords.words('english'))
stopwords.update(['from:', 'subject:', 'writes:', 'writes'])
class DirectText(corpora.textcorpus.TextCorpus):
def get_texts(self):
return self.input
def __len__(self):
return len(self.input)
try:
dataset = sklearn.datasets.load_mlcomp("20news-18828", "train",
mlcomp_root='./data')
except:
print("Newsgroup data not found.")
print("Please download from http://mlcomp.org/datasets/379")
print("And expand the zip into the subdirectory data/")
print()
print()
raise
otexts = dataset.data
texts = dataset.data
texts = [t.decode('utf-8', 'ignore') for t in texts]
texts = [t.split() for t in texts]
texts = [map(lambda w: w.lower(), t) for t in texts]
texts = [filter(lambda s: not len(set("+-.?!()>@012345689") & set(s)), t)
for t in texts]
texts = [filter(lambda s: (len(s) > 3) and (s not in stopwords), t)
for t in texts]
texts = [map(english_stemmer.stem, t) for t in texts]
usage = defaultdict(int)
for t in texts:
for w in set(t):
usage[w] += 1
limit = len(texts) / 10
too_common = [w for w in usage if usage[w] > limit]
too_common = set(too_common)
texts = [filter(lambda s: s not in too_common, t) for t in texts]
corpus = DirectText(texts)
dictionary = corpus.dictionary
try:
dictionary['computer']
except:
pass
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=dictionary.id2token)
thetas = np.zeros((len(texts), 100))
for i, c in enumerate(corpus):
for ti, v in model[c]:
thetas[i, ti] += v
distances = distance.squareform(distance.pdist(thetas))
large = distances.max() + 1
for i in xrange(len(distances)):
distances[i, i] = large
print(otexts[1])
print()
print()
print()
print(otexts[distances[1].argmin()])
| mit |
point97/hapifis | server/apps/survey/migrations/0069_auto__add_field_response_answer_number.py | 1 | 15989 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Response.answer_number'
db.add_column(u'survey_response', 'answer_number',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Response.answer_number'
db.delete_column(u'survey_response', 'answer_number')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'survey.block': {
'Meta': {'object_name': 'Block'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'})
},
u'survey.gridanswer': {
'Meta': {'object_name': 'GridAnswer'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'col_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'col_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"}),
'row_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'row_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'lng': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.locationanswer': {
'Meta': {'object_name': 'LocationAnswer'},
'answer': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Location']"})
},
u'survey.multianswer': {
'Meta': {'object_name': 'MultiAnswer'},
'answer_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'answer_text': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'integer'", 'max_length': '20'})
},
u'survey.page': {
'Meta': {'ordering': "['survey', 'question__order']", 'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']", 'null': 'True', 'blank': 'True'})
},
u'survey.question': {
'Meta': {'ordering': "['order']", 'object_name': 'Question'},
'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Block']", 'null': 'True', 'blank': 'True'}),
'cols': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'filterBy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_questions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'filter_questions_rel_+'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'foreach_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'foreach'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'grid_cols': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'grid_cols'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Option']"}),
'hoist_answers': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hoisted'", 'null': 'True', 'to': u"orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'integer_max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'integer_min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'lng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'min_zoom': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modal_question'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}),
'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'options_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'report_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'term_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}),
'visualize': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.respondant': {
'Meta': {'object_name': 'Respondant'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '254', 'null': 'True', 'blank': 'True'}),
'last_question': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'locations': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'responses': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'responses'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Response']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}),
'surveyor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 12, 0, 0)'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'ddec2809-7f56-44c8-adf6-d609312f8e15'", 'max_length': '36', 'primary_key': 'True'})
},
u'survey.response': {
'Meta': {'object_name': 'Response'},
'answer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'answer_number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'answer_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 12, 0, 0)'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}),
'states': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey'] | gpl-3.0 |
nuagenetworks/vspk-python | vspk/v6/nuzfbrequest.py | 1 | 35457 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUJobsFetcher
from bambou import NURESTObject
class NUZFBRequest(NURESTObject):
""" Represents a ZFBRequest in the VSD
Notes:
Pending requests reflect Network Services Gateways that have initiated request for bootstrapping. Requests can be assigned, or matched, to continue the bootstrapping process. If a request is rejected, the NSG will terminate the auto-bootstrapping attempts.
"""
__rest_name__ = "zfbrequest"
__resource_name__ = "zfbrequests"
## Constants
CONST_ZFB_APPROVAL_STATUS_DENIED = "DENIED"
CONST_REQUEST_TYPE_SELF_REBOOTSTRAP = "SELF_REBOOTSTRAP"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_REQUEST_TYPE_ZFB = "ZFB"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ZFB_APPROVAL_STATUS_UNASSIGNED = "UNASSIGNED"
CONST_ZFB_APPROVAL_STATUS_APPROVED = "APPROVED"
CONST_ZFB_APPROVAL_STATUS_ASSIGNED = "ASSIGNED"
CONST_ASSOCIATED_ENTITY_TYPE_GATEWAY = "GATEWAY"
CONST_ASSOCIATED_ENTITY_TYPE_NSGATEWAY = "NSGATEWAY"
def __init__(self, **kwargs):
""" Initializes a ZFBRequest instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> zfbrequest = NUZFBRequest(id=u'xxxx-xxx-xxx-xxx', name=u'ZFBRequest')
>>> zfbrequest = NUZFBRequest(data=my_dict)
"""
super(NUZFBRequest, self).__init__()
# Read/Write Attributes
self._mac_address = None
self._zfb_approval_status = None
self._zfb_bootstrap_enabled = None
self._zfb_info = None
self._zfb_request_retry_timer = None
self._sku = None
self._ip_address = None
self._cpu_type = None
self._nsg_version = None
self._uuid = None
self._family = None
self._last_connected_time = None
self._last_updated_by = None
self._last_updated_date = None
self._registration_url = None
self._request_type = None
self._serial_number = None
self._embedded_metadata = None
self._entity_scope = None
self._hostname = None
self._creation_date = None
self._original_enterprise_name = None
self._original_gateway_datapath_id = None
self._original_gateway_name = None
self._original_uplink_connection_info = None
self._associated_enterprise_id = None
self._associated_enterprise_name = None
self._associated_entity_type = None
self._associated_gateway_id = None
self._associated_gateway_name = None
self._associated_ns_gateway_id = None
self._associated_ns_gateway_name = None
self._status_string = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_approval_status", remote_name="ZFBApprovalStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'APPROVED', u'ASSIGNED', u'DENIED', u'UNASSIGNED'])
self.expose_attribute(local_name="zfb_bootstrap_enabled", remote_name="ZFBBootstrapEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_info", remote_name="ZFBInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_request_retry_timer", remote_name="ZFBRequestRetryTimer", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="sku", remote_name="SKU", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ip_address", remote_name="IPAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="nsg_version", remote_name="NSGVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_connected_time", remote_name="lastConnectedTime", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="registration_url", remote_name="registrationURL", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="request_type", remote_name="requestType", attribute_type=str, is_required=False, is_unique=False, choices=[u'SELF_REBOOTSTRAP', u'ZFB'])
self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="hostname", remote_name="hostname", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_enterprise_name", remote_name="originalEnterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_gateway_datapath_id", remote_name="originalGatewayDatapathID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_gateway_name", remote_name="originalGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_uplink_connection_info", remote_name="originalUplinkConnectionInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_enterprise_id", remote_name="associatedEnterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_enterprise_name", remote_name="associatedEnterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False, choices=[u'GATEWAY', u'NSGATEWAY'])
self.expose_attribute(local_name="associated_gateway_id", remote_name="associatedGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_gateway_name", remote_name="associatedGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_id", remote_name="associatedNSGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_name", remote_name="associatedNSGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status_string", remote_name="statusString", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def mac_address(self):
""" Get mac_address value.
Notes:
MAC Address fo the NSG Port1 interface
This attribute is named `MACAddress` in VSD API.
"""
return self._mac_address
@mac_address.setter
def mac_address(self, value):
""" Set mac_address value.
Notes:
MAC Address fo the NSG Port1 interface
This attribute is named `MACAddress` in VSD API.
"""
self._mac_address = value
@property
def zfb_approval_status(self):
""" Get zfb_approval_status value.
Notes:
the status of the request
This attribute is named `ZFBApprovalStatus` in VSD API.
"""
return self._zfb_approval_status
@zfb_approval_status.setter
def zfb_approval_status(self, value):
""" Set zfb_approval_status value.
Notes:
the status of the request
This attribute is named `ZFBApprovalStatus` in VSD API.
"""
self._zfb_approval_status = value
@property
def zfb_bootstrap_enabled(self):
""" Get zfb_bootstrap_enabled value.
Notes:
whether the NSG should bootstrap, or just simulate bootstrap. Set from System Config
This attribute is named `ZFBBootstrapEnabled` in VSD API.
"""
return self._zfb_bootstrap_enabled
@zfb_bootstrap_enabled.setter
def zfb_bootstrap_enabled(self, value):
""" Set zfb_bootstrap_enabled value.
Notes:
whether the NSG should bootstrap, or just simulate bootstrap. Set from System Config
This attribute is named `ZFBBootstrapEnabled` in VSD API.
"""
self._zfb_bootstrap_enabled = value
@property
def zfb_info(self):
""" Get zfb_info value.
Notes:
The Base64 encoded JSON string of ZFB Attributes
This attribute is named `ZFBInfo` in VSD API.
"""
return self._zfb_info
@zfb_info.setter
def zfb_info(self, value):
""" Set zfb_info value.
Notes:
The Base64 encoded JSON string of ZFB Attributes
This attribute is named `ZFBInfo` in VSD API.
"""
self._zfb_info = value
@property
def zfb_request_retry_timer(self):
""" Get zfb_request_retry_timer value.
Notes:
ZFB Request retry timer on the gateway. Set on VSD's System Config panel.
This attribute is named `ZFBRequestRetryTimer` in VSD API.
"""
return self._zfb_request_retry_timer
@zfb_request_retry_timer.setter
def zfb_request_retry_timer(self, value):
""" Set zfb_request_retry_timer value.
Notes:
ZFB Request retry timer on the gateway. Set on VSD's System Config panel.
This attribute is named `ZFBRequestRetryTimer` in VSD API.
"""
self._zfb_request_retry_timer = value
@property
def sku(self):
""" Get sku value.
Notes:
The part number of the gateway being bootstrapped through ZFB.
This attribute is named `SKU` in VSD API.
"""
return self._sku
@sku.setter
def sku(self, value):
""" Set sku value.
Notes:
The part number of the gateway being bootstrapped through ZFB.
This attribute is named `SKU` in VSD API.
"""
self._sku = value
@property
def ip_address(self):
""" Get ip_address value.
Notes:
IP Address of the gateway being bootstrapped using ZFB.
This attribute is named `IPAddress` in VSD API.
"""
return self._ip_address
@ip_address.setter
def ip_address(self, value):
""" Set ip_address value.
Notes:
IP Address of the gateway being bootstrapped using ZFB.
This attribute is named `IPAddress` in VSD API.
"""
self._ip_address = value
@property
def cpu_type(self):
""" Get cpu_type value.
Notes:
Processor Type
This attribute is named `CPUType` in VSD API.
"""
return self._cpu_type
@cpu_type.setter
def cpu_type(self, value):
""" Set cpu_type value.
Notes:
Processor Type
This attribute is named `CPUType` in VSD API.
"""
self._cpu_type = value
@property
def nsg_version(self):
""" Get nsg_version value.
Notes:
The Nuage NSG Version
This attribute is named `NSGVersion` in VSD API.
"""
return self._nsg_version
@nsg_version.setter
def nsg_version(self, value):
""" Set nsg_version value.
Notes:
The Nuage NSG Version
This attribute is named `NSGVersion` in VSD API.
"""
self._nsg_version = value
@property
def uuid(self):
""" Get uuid value.
Notes:
Redhat UUID
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
Redhat UUID
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def family(self):
""" Get family value.
Notes:
Gateway Type
"""
return self._family
@family.setter
def family(self, value):
""" Set family value.
Notes:
Gateway Type
"""
self._family = value
@property
def last_connected_time(self):
""" Get last_connected_time value.
Notes:
The time in which the last GET was made from the gateway.
This attribute is named `lastConnectedTime` in VSD API.
"""
return self._last_connected_time
@last_connected_time.setter
def last_connected_time(self, value):
""" Set last_connected_time value.
Notes:
The time in which the last GET was made from the gateway.
This attribute is named `lastConnectedTime` in VSD API.
"""
self._last_connected_time = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def registration_url(self):
""" Get registration_url value.
Notes:
Registration URL to be used for a gateway to be bootstrapped using ZFB.
This attribute is named `registrationURL` in VSD API.
"""
return self._registration_url
@registration_url.setter
def registration_url(self, value):
""" Set registration_url value.
Notes:
Registration URL to be used for a gateway to be bootstrapped using ZFB.
This attribute is named `registrationURL` in VSD API.
"""
self._registration_url = value
@property
def request_type(self):
""" Get request_type value.
Notes:
Value that serves in indicating if the Auto-Bootstrapping request is made in the context of a new NSG instance being bootstrapped or an NSG going through a self-rebootstrapping phase following a revocation triggered by entering quarantine.
This attribute is named `requestType` in VSD API.
"""
return self._request_type
@request_type.setter
def request_type(self, value):
""" Set request_type value.
Notes:
Value that serves in indicating if the Auto-Bootstrapping request is made in the context of a new NSG instance being bootstrapped or an NSG going through a self-rebootstrapping phase following a revocation triggered by entering quarantine.
This attribute is named `requestType` in VSD API.
"""
self._request_type = value
@property
def serial_number(self):
""" Get serial_number value.
Notes:
The gateway's Serial Number.
This attribute is named `serialNumber` in VSD API.
"""
return self._serial_number
@serial_number.setter
def serial_number(self, value):
""" Set serial_number value.
Notes:
The gateway's Serial Number.
This attribute is named `serialNumber` in VSD API.
"""
self._serial_number = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def hostname(self):
""" Get hostname value.
Notes:
Hostname of the gateway bootstrapped using ZFB.
"""
return self._hostname
@hostname.setter
def hostname(self, value):
""" Set hostname value.
Notes:
Hostname of the gateway bootstrapped using ZFB.
"""
self._hostname = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def original_enterprise_name(self):
""" Get original_enterprise_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name of the enterprise/organisation to which the NSG belonged.
This attribute is named `originalEnterpriseName` in VSD API.
"""
return self._original_enterprise_name
@original_enterprise_name.setter
def original_enterprise_name(self, value):
""" Set original_enterprise_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name of the enterprise/organisation to which the NSG belonged.
This attribute is named `originalEnterpriseName` in VSD API.
"""
self._original_enterprise_name = value
@property
def original_gateway_datapath_id(self):
""" Get original_gateway_datapath_id value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original datapath ID that it had before revoking.
This attribute is named `originalGatewayDatapathID` in VSD API.
"""
return self._original_gateway_datapath_id
@original_gateway_datapath_id.setter
def original_gateway_datapath_id(self, value):
""" Set original_gateway_datapath_id value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original datapath ID that it had before revoking.
This attribute is named `originalGatewayDatapathID` in VSD API.
"""
self._original_gateway_datapath_id = value
@property
def original_gateway_name(self):
""" Get original_gateway_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name the gateway had before revoking.
This attribute is named `originalGatewayName` in VSD API.
"""
return self._original_gateway_name
@original_gateway_name.setter
def original_gateway_name(self, value):
""" Set original_gateway_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name the gateway had before revoking.
This attribute is named `originalGatewayName` in VSD API.
"""
self._original_gateway_name = value
@property
def original_uplink_connection_info(self):
""" Get original_uplink_connection_info value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents an information blob of the original uplink connection information that applied to this NSG.
This attribute is named `originalUplinkConnectionInfo` in VSD API.
"""
return self._original_uplink_connection_info
@original_uplink_connection_info.setter
def original_uplink_connection_info(self, value):
""" Set original_uplink_connection_info value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents an information blob of the original uplink connection information that applied to this NSG.
This attribute is named `originalUplinkConnectionInfo` in VSD API.
"""
self._original_uplink_connection_info = value
@property
def associated_enterprise_id(self):
""" Get associated_enterprise_id value.
Notes:
the ID of the associated enteprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
return self._associated_enterprise_id
@associated_enterprise_id.setter
def associated_enterprise_id(self, value):
""" Set associated_enterprise_id value.
Notes:
the ID of the associated enteprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
self._associated_enterprise_id = value
@property
def associated_enterprise_name(self):
""" Get associated_enterprise_name value.
Notes:
Name of the associated enterprise
This attribute is named `associatedEnterpriseName` in VSD API.
"""
return self._associated_enterprise_name
@associated_enterprise_name.setter
def associated_enterprise_name(self, value):
""" Set associated_enterprise_name value.
Notes:
Name of the associated enterprise
This attribute is named `associatedEnterpriseName` in VSD API.
"""
self._associated_enterprise_name = value
@property
def associated_entity_type(self):
""" Get associated_entity_type value.
Notes:
Associated Entity Type: NSGATEWAY or GATEWAY
This attribute is named `associatedEntityType` in VSD API.
"""
return self._associated_entity_type
@associated_entity_type.setter
def associated_entity_type(self, value):
""" Set associated_entity_type value.
Notes:
Associated Entity Type: NSGATEWAY or GATEWAY
This attribute is named `associatedEntityType` in VSD API.
"""
self._associated_entity_type = value
@property
def associated_gateway_id(self):
""" Get associated_gateway_id value.
Notes:
ID of the assigned Gateway
This attribute is named `associatedGatewayID` in VSD API.
"""
return self._associated_gateway_id
@associated_gateway_id.setter
def associated_gateway_id(self, value):
""" Set associated_gateway_id value.
Notes:
ID of the assigned Gateway
This attribute is named `associatedGatewayID` in VSD API.
"""
self._associated_gateway_id = value
@property
def associated_gateway_name(self):
""" Get associated_gateway_name value.
Notes:
Name of the associated Gateway
This attribute is named `associatedGatewayName` in VSD API.
"""
return self._associated_gateway_name
@associated_gateway_name.setter
def associated_gateway_name(self, value):
""" Set associated_gateway_name value.
Notes:
Name of the associated Gateway
This attribute is named `associatedGatewayName` in VSD API.
"""
self._associated_gateway_name = value
@property
def associated_ns_gateway_id(self):
""" Get associated_ns_gateway_id value.
Notes:
ID of the assigned NSG
This attribute is named `associatedNSGatewayID` in VSD API.
"""
return self._associated_ns_gateway_id
@associated_ns_gateway_id.setter
def associated_ns_gateway_id(self, value):
""" Set associated_ns_gateway_id value.
Notes:
ID of the assigned NSG
This attribute is named `associatedNSGatewayID` in VSD API.
"""
self._associated_ns_gateway_id = value
@property
def associated_ns_gateway_name(self):
""" Get associated_ns_gateway_name value.
Notes:
Name of the associated NSG
This attribute is named `associatedNSGatewayName` in VSD API.
"""
return self._associated_ns_gateway_name
@associated_ns_gateway_name.setter
def associated_ns_gateway_name(self, value):
""" Set associated_ns_gateway_name value.
Notes:
Name of the associated NSG
This attribute is named `associatedNSGatewayName` in VSD API.
"""
self._associated_ns_gateway_name = value
@property
def status_string(self):
""" Get status_string value.
Notes:
Extra status info
This attribute is named `statusString` in VSD API.
"""
return self._status_string
@status_string.setter
def status_string(self, value):
""" Set status_string value.
Notes:
Extra status info
This attribute is named `statusString` in VSD API.
"""
self._status_string = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause |
os2webscanner/os2webscanner | scrapy-webscanner/scanners/rules/regexrule.py | 1 | 6057 | # The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import logging
import re
import regex
from .cpr import CPRRule
from .rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, pattern_strings, sensitivity, cpr_enabled=False, ignore_irrelevant=False,
do_modulus11=False, *args, **kwargs):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
# Convert QuerySet to list
super().__init__(*args, **kwargs)
self.regex_patterns = list(pattern_strings.all())
self.name = name
self.sensitivity = sensitivity
self.cpr_enabled = cpr_enabled
self.ignore_irrelevant = ignore_irrelevant
self.do_modulus11 = do_modulus11
self.regex_str = ''
if not self._is_cpr_only():
logging.info('------- Regex patters ---------')
for _psuedoRule in self.regex_patterns:
logging.info(_psuedoRule.pattern_string)
logging.info('-----------------------------\n')
self.regex_str = self.compund_rules()
self.regex = regex.compile(self.regex_str, regex.DOTALL)
# bind the 'do_modulus11' and 'ignore_irrelevant' variables to the cpr_enabled property so that they're always
# false if it is false
if not cpr_enabled:
self.do_modulus11 = cpr_enabled
self.ignore_irrelevant = cpr_enabled
def __str__(self):
"""
Returns a string object representation of this object
:return:
"""
return '{\n\tname: ' + self.name + \
',\n\tregex: ' + self.regex_str + \
',\n\tcpr_enabled: ' + str(self._is_cpr_only()) + \
',\n\tsensitivity: ' + str(self.sensitivity) + '\n}'
def compund_rules(self):
"""
This method compounds all the regex patterns in the rule set into one regex rule that is OR'ed
e.g. A ruleSet of {pattern1, pattern2, pattern3} becomes (pattern1 | pattern2 | pattern3)
:return: RegexRule representing the compound rule
"""
rule_set = set(self.regex_patterns)
if len(rule_set) == 1:
return rule_set.pop().pattern_string
if len(rule_set) > 1:
compound_rule = '('
for _ in self.regex_patterns:
compound_rule += rule_set.pop().pattern_string
if len(rule_set) <= 0:
compound_rule += ')'
else:
compound_rule += '|'
print('Returning< '+compound_rule+' >')
return compound_rule
if len(rule_set) < 1:
return None
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
if self._is_cpr_only():
cpr_rule = CPRRule(self.do_modulus11, self.ignore_irrelevant, whitelist=None)
temp_matches = cpr_rule.execute(text)
matches.update(temp_matches)
else:
re_matches = self.regex.finditer(text)
if self.cpr_enabled:
cpr_rule = CPRRule(self.do_modulus11, self.ignore_irrelevant, whitelist=None)
matches.update(cpr_rule.execute(text))
for match in re_matches:
matched_data = match.group(0)
if len(matched_data) > 1024:
# TODO: Get rid of magic number
matched_data = match.group(1)
matches.add(MatchItem(matched_data=matched_data,
sensitivity=self.sensitivity))
return matches
def is_all_match(self, matches):
"""
Checks if each rule is matched with the provided list of matches
:param matches: List of matches
:return: {True | false}
"""
if not isinstance(matches, set):
return False
cpr_match = False
# If it turns out that we're only doing a cpr scan then scan for the first match and return true
if self._is_cpr_only():
for match in matches:
if re.match(self.cpr_pattern, match['original_matched_data']):
return True
else:
regex_patterns = set(self.regex_patterns)
# for rule in self.regex_patterns:
for pattern in self.regex_patterns:
for match in matches:
if re.match(pattern.pattern_string, match['matched_data']) and regex_patterns:
regex_patterns.pop()
continue
if self.cpr_enabled and not cpr_match and 'original_matched_data' in match:
if re.match(self.cpr_pattern, match['original_matched_data']):
cpr_match = True
if not regex_patterns:
break
if not self.cpr_enabled:
return not regex_patterns
else:
return not regex_patterns and cpr_match
def _is_cpr_only(self):
"""Just a method to decide if we are only doing a CPR scan."""
return self.cpr_enabled and len(self.regex_patterns) <= 0
| mpl-2.0 |
magcius/dolphin | Tools/find-includes-cycles.py | 157 | 2630 | #! /usr/bin/env python
'''
Run this script from Source/Core/ to find all the #include cycles.
'''
import subprocess
def get_local_includes_for(path):
lines = open(path).read().split('\n')
includes = [l.strip() for l in lines if l.strip().startswith('#include')]
return [i.split()[1][1:-1] for i in includes if '"' in i.split()[1]]
def find_all_files():
'''Could probably use os.walk, but meh.'''
f = subprocess.check_output(['find', '.', '-name', '*.h'],
universal_newlines=True).strip().split('\n')
return [p[2:] for p in f]
def make_include_graph():
return { f: get_local_includes_for(f) for f in find_all_files() }
def strongly_connected_components(graph):
"""
Tarjan's Algorithm (named for its discoverer, Robert Tarjan) is a graph theory algorithm
for finding the strongly connected components of a graph.
Based on: http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
"""
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors of `node`
try:
successors = graph[node]
except:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited; recurse on it
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
if __name__ == '__main__':
comp = strongly_connected_components(make_include_graph())
for c in comp:
if len(c) != 1:
print(c)
| gpl-2.0 |
stevelle/glance | glance/registry/api/v2/__init__.py | 20 | 1125 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.common import wsgi
from glance.registry.api.v2 import rpc
def init(mapper):
rpc_resource = rpc.create_resource()
mapper.connect("/rpc", controller=rpc_resource,
conditions=dict(method=["POST"]),
action="__call__")
class API(wsgi.Router):
"""WSGI entry point for all Registry requests."""
def __init__(self, mapper):
mapper = mapper or wsgi.APIMapper()
init(mapper)
super(API, self).__init__(mapper)
| apache-2.0 |
bolkedebruin/airflow | airflow/providers/docker/example_dags/example_docker_swarm_operator.py | 1 | 1606 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
from datetime import timedelta
from airflow.utils.dates import days_ago
from airflow import DAG
from airflow.providers.docker.operators.docker_swarm import DockerSwarmOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': days_ago(1),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False
}
dag = DAG(
'docker_swarm_sample',
default_args=default_args,
schedule_interval=timedelta(minutes=10),
catchup=False
)
with dag as dag:
t1 = DockerSwarmOperator(
api_version='auto',
docker_url='tcp://localhost:2375', # Set your docker URL
command='/bin/sleep 10',
image='centos:latest',
auto_remove=True,
task_id='sleep_with_swarm',
)
"""
| apache-2.0 |
mezz64/home-assistant | homeassistant/components/rpi_pfio/binary_sensor.py | 14 | 2527 | """Support for binary sensor using the PiFace Digital I/O module on a RPi."""
import voluptuous as vol
from homeassistant.components import rpi_pfio
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = "invert_logic"
CONF_PORTS = "ports"
CONF_SETTLE_TIME = "settle_time"
DEFAULT_INVERT_LOGIC = False
DEFAULT_SETTLE_TIME = 20
PORT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_SETTLE_TIME, default=DEFAULT_SETTLE_TIME): cv.positive_int,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_PORTS, default={}): vol.Schema({cv.positive_int: PORT_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PiFace Digital Input devices."""
binary_sensors = []
ports = config.get(CONF_PORTS)
for port, port_entity in ports.items():
name = port_entity.get(CONF_NAME)
settle_time = port_entity[CONF_SETTLE_TIME] / 1000
invert_logic = port_entity[CONF_INVERT_LOGIC]
binary_sensors.append(
RPiPFIOBinarySensor(hass, port, name, settle_time, invert_logic)
)
add_entities(binary_sensors, True)
rpi_pfio.activate_listener(hass)
class RPiPFIOBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that a PiFace Digital Input."""
def __init__(self, hass, port, name, settle_time, invert_logic):
"""Initialize the RPi binary sensor."""
self._port = port
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._state = None
def read_pfio(port):
"""Read state from PFIO."""
self._state = rpi_pfio.read_input(self._port)
self.schedule_update_ha_state()
rpi_pfio.edge_detect(hass, self._port, read_pfio, settle_time)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state != self._invert_logic
def update(self):
"""Update the PFIO state."""
self._state = rpi_pfio.read_input(self._port)
| apache-2.0 |
geishatokyo-lightning/lightning | lightning_core/test/testvg.py | 1 | 5533 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Geisha Tokyo Entertainment, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
from lightning_core.vg.vg import *
from lightning_core.vg.parser import *
from lxml import etree
class TestLinearGrad(unittest.TestCase):
def test_constructor(self):
c1 = [256,256,256,256]
c2 = [ 0, 0, 0, 0]
sp1 = Stop(c1, '100')
sp2 = Stop(c2, '0')
gtf = {'scaleX':'0', 'scaleY':'0.101'}
lg = LinearGradient('100', gtf, (sp1,sp2))
self.assertEqual(lg.get('id'), '100')
self.assertEqual(lg.get('gradientUnits'), 'userSpaceOnUse')
self.assertEqual(lg.get('x1'), '-819')
self.assertEqual(lg.get('x2'), '819')
self.assertEqual(lg.get('gradientTransform'), 'matrix(0.00 0.00 0.00 0.10 0.0000 0.0000)')
self.assertEqual(len(lg), 2)
self.assertEqual(lg[0].get('stop-color'), '#ffffff')
self.assertEqual(lg[0].get('stop-opacity'), '1.0')
self.assertEqual(lg[0].get('offset'), str(100.0/255))
self.assertEqual(lg[1].get('stop-color'), '#000000')
self.assertEqual(lg[1].get('stop-opacity'), str(0.0/255))
self.assertEqual(lg[1].get('offset'), str(0.0/255))
class TestTransform(unittest.TestCase):
def setUp(self):
filename = './lightning_core/test/xmlsamples.xml'
f = open(filename,'r')
samplexml = f.read()
self.poxml = etree.XML(samplexml).xpath('.//PLACE_OBJECT2_HAS_COLORTRANS/PlaceObject2')[0]
self.transform = Transform()
self.parser = Parser()
self.po = self.parser._get_place_object(self.poxml)
def test_constructor(self):
transform = Transform()
self.assertEqual(transform.sx, 1.0)
self.assertEqual(transform.sy, 1.0)
self.assertEqual(transform.tx, 0.0)
self.assertEqual(transform.ty, 0.0)
self.assertEqual(transform.wx, 0.0)
self.assertEqual(transform.wy, 0.0)
self.assertEqual(transform.ctf, [])
self.assertEqual(transform.depth, 1)
self.assertEqual(transform.clipDepth, None)
self.assertEqual(transform.name, None)
self.assertEqual(transform.visible, True)
def test_set_items_and_get_matrix(self):
transform = Transform()
transform.set_items(self.po.items())
self.assertEqual(transform.get_matrix(), (1.001770019531250, 0.0, 0.0, 1.0, -25.7, -57.0))
class TestTree(unittest.TestCase):
def test_constructor(self):
tree = Tree()
self.assertAlmostEqual(tree.sx, 1.0)
self.assertAlmostEqual(tree.sy, 1.0)
self.assertAlmostEqual(tree.wx, 0.0)
self.assertAlmostEqual(tree.wy, 0.0)
self.assertAlmostEqual(tree.tx, 0.0)
self.assertAlmostEqual(tree.ty, 0.0)
self.assertEqual(len(tree.ctf), 0.0)
self.assertEqual(tree.depth, 1)
self.assertEqual(tree.name, None)
self.assertEqual(len(tree.children), 0)
self.assertEqual(tree.parent, None)
def test_update(self):
tree = Tree()
tree.set_items({'tx':2.0})
self.assertAlmostEqual(tree.tx, 2.0)
def test_str(self):
tree = Tree()
self.assertEqual(str(tree), 'key=None\n')
tree.key = 'hoge'
self.assertEqual(str(tree), 'key=hoge\n')
tree2 = Tree()
tree2.key = 'fuga'
tree.children.append(tree2)
self.assertEqual(str(tree), 'key=hoge\n\tkey=fuga')
class TestAnimation(unittest.TestCase):
def test_constructor(self):
anim = Animation()
self.assertEqual(anim.key, None)
self.assertEqual(len(anim.frames), 0)
def test_appendFrame(self):
anim = Animation()
index = 1
sx = 1.0
sy = 1.0
wx = 0.0
wy = 0.0
tx = 0.0
ty = 0.0
ctf = []
anim.key = 'hoge'
anim.appendFrame(index, sx, sy, wx, wy, tx, ty, ctf)
self.assertEqual(anim.key, 'hoge')
self.assertEqual(len(anim.frames), 1)
frame = anim.frames[0]
self.assertEqual(frame['index'], 1)
self.assertAlmostEqual(frame.sx, 1.0)
self.assertAlmostEqual(frame['sy'], 1.0)
self.assertAlmostEqual(frame['wx'], 0.0)
self.assertAlmostEqual(frame['wy'], 0.0)
self.assertAlmostEqual(frame['tx'], 0.0)
self.assertAlmostEqual(frame['ty'], 0.0)
self.assertEqual(frame['ctf'], [])
if __name__ == '__main__':
unittest.main()
| mit |
glorizen/nupic | tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py | 12 | 10082 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests OPF descriptionTemplate.py-based experiment/sub-experiment pair"""
import os
import pprint
import sys
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.frameworks.opf.opfhelpers import (
loadExperimentDescriptionScriptFromDir,
getExperimentDescriptionInterfaceFromModule
)
from nupic.support.unittesthelpers.testcasebase import (
TestCaseBase as HelperTestCaseBase)
# Our __main__ entry block sets this to an instance of MyTestEnvironment()
g_myEnv = None
g_debug = False
class MyTestEnvironment(object):
def __init__(self):
examplesDir = resource_filename("nupic", os.path.join("..", "examples"))
_debugOut("examplesDir=<%s>" % (examplesDir,))
assert os.path.exists(examplesDir), \
"%s is not present in filesystem" % examplesDir
# This is where we find OPF binaries (e.g., run_opf_experiment.py, etc.)
# In the autobuild, it is a read-only directory
self.__opfBinDir = resource_filename("nupic", os.path.join("..", "scripts"))
assert os.path.exists(self.__opfBinDir), \
"%s is not present in filesystem" % self.__opfBinDir
_debugOut("self.__opfBinDir=<%s>" % self.__opfBinDir)
# Where this script is running from (our autotest counterpart may have
# copied it from its original location)
self.__testRunDir = os.path.abspath(os.path.dirname(__file__))
_debugOut("self.__testRunDir=<%s>" % self.__testRunDir)
# Parent directory of our private OPF experiments
self.__opfExperimentsParentDir = os.path.join(self.__testRunDir,
"experiments")
assert os.path.exists(self.__opfExperimentsParentDir), \
"%s is not present in filesystem" % self.__opfExperimentsParentDir
_debugOut("self.__opfExperimentsParentDir=<%s>"
% self.__opfExperimentsParentDir)
def getOpfRunExperimentPyPath(self):
return os.path.join(self.__opfBinDir, "run_opf_experiment.py")
def getOpfExperimentPath(self, experimentName):
"""
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
Returns: absolute path to the experiment directory
"""
path = os.path.join(self.__opfExperimentsParentDir, experimentName)
assert os.path.isdir(path), \
"Experiment path %s doesn't exist or is not a directory" % (path,)
return path
class MyTestCaseBase(HelperTestCaseBase):
def setUp(self):
""" Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method will be
considered an error rather than a test failure. The default implementation
does nothing.
"""
global g_myEnv
if not g_myEnv:
# Setup environment
g_myEnv = MyTestEnvironment()
def tearDown(self):
""" Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.
"""
# Reset our log items
self.resetExtraLogItems()
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def executePositiveOpfExperiment(self, experimentName, short=False):
""" Executes a positive OPF RunExperiment test as a subprocess and validates
its exit status.
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
Returns: result from _executeExternalCmdAndReapOutputs
"""
opfRunner = g_myEnv.getOpfRunExperimentPyPath()
opfExpDir = g_myEnv.getOpfExperimentPath(experimentName)
r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner,
experimentDirPath=opfExpDir,
short=short)
return r
def __executePositiveRunExperimentTest(self,
runnerPath,
experimentDirPath,
customOptions=[],
short=False):
""" Executes a positive RunExperiment.py test and performs
basic validation
runnerPath: experiment running (LPF or OPF RunExperiment.py path)
experimentDirPath: directory containing the description.py file of interest
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
NOTE: if the (possibly aggregated) dataset has fewer
rows than the count overrides, then an LPF experiment
will fail.
Returns: result from _executeExternalCmdAndReapOutputs
"""
#----------------------------------------
# Set up args
command = [
"python",
runnerPath,
experimentDirPath,
]
command.extend(customOptions)
if short:
command.append("--testMode")
self.addExtraLogItem({'command':command})
#----------------------------------------
# Execute RunExperiment.py as subprocess and collect results
r = _executeExternalCmdAndReapOutputs(command)
self.addExtraLogItem({'result':r})
_debugOut(("_executeExternalCmdAndReapOutputs(%s)=%s") % (command, r))
#----------------------------------------
# Check subprocess exit status
self.assertEqual(r['exitStatus'], 0,
("Expected status = 0 from %s; got: %s") % \
(runnerPath, r['exitStatus'],))
self.resetExtraLogItems()
return r
class PositiveTests(MyTestCaseBase):
#========================
def test_sub_experiment_override(self):
expDir = g_myEnv.getOpfExperimentPath("gym")
module = loadExperimentDescriptionScriptFromDir(expDir)
expIface = getExperimentDescriptionInterfaceFromModule(module)
modelDesc = expIface.getModelDescription()
tpActivationThreshold = modelDesc['modelParams'] \
['tpParams']['activationThreshold']
expectedValue = 12
self.assertEqual(tpActivationThreshold, expectedValue,
"Expected tp activationThreshold=%s, but got %s" % (
expectedValue, tpActivationThreshold))
def test_run_sub_experiment(self):
self.executePositiveOpfExperiment(experimentName="gym", short=True)
################################################################################
# Support functions
################################################################################
def _executeExternalCmdAndReapOutputs(args):
"""
args: Args list as defined for the args parameter in subprocess.Popen()
Returns: result dicionary:
{
'exitStatus':<exit-status-of-external-command>,
'stdoutData':"string",
'stderrData':"string"
}
"""
import subprocess
_debugOut(("Starting...\n<%s>") % \
(args,))
p = subprocess.Popen(args,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_debugOut(("Process started for <%s>") % (args,))
(stdoutData, stderrData) = p.communicate()
_debugOut(("Process completed for <%s>: exit status=%s, " + \
"stdoutDataType=%s, stdoutData=<%s>, stderrData=<%s>") % \
(args, p.returncode, type(stdoutData), stdoutData, stderrData))
result = dict(
exitStatus = p.returncode,
stdoutData = stdoutData,
stderrData = stderrData,
)
_debugOut(("args: <%s>: result:\n%s") % \
(args, pprint.pformat(result, indent=4)))
return result
def _debugOut(msg):
if g_debug:
callerTraceback = whoisCallersCaller()
print "OPF TestDescriptionTemplate (f=%s;line=%s): %s" % \
(callerTraceback.function, callerTraceback.lineno, msg,)
sys.stdout.flush()
def whoisCallersCaller():
"""
Returns: Traceback namedtuple for our caller's caller
"""
import inspect
frameObj = inspect.stack()[2][0]
return inspect.getframeinfo(frameObj)
if __name__ == "__main__":
g_myEnv = MyTestEnvironment()
unittest.longMessage = True
unittest.main()
| agpl-3.0 |
fdzh/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
MattDevo/edk2 | AppPkg/Applications/Python/Python-2.7.2/Lib/colorsys.py | 75 | 3847 | """Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0:
r = 0.0
if g < 0.0:
g = 0.0
if b < 0.0:
b = 0.0
if r > 1.0:
r = 1.0
if g > 1.0:
g = 1.0
if b > 1.0:
b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc:
return 0.0, l, 0.0
if l <= 0.5:
s = (maxc-minc) / (maxc+minc)
else:
s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0:
return l, l, l
if l <= 0.5:
m2 = l * (1.0+s)
else:
m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH:
return m1 + (m2-m1)*hue*6.0
if hue < 0.5:
return m2
if hue < TWO_THIRD:
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
# Cannot get here
| bsd-2-clause |
srault95/netcall | examples/threading/server_threading_prefork.py | 1 | 3725 | #!/usr/bin/env python
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0 fdm=indent
"""
A simple RPC server that shows how to:
* start several worker processes
* use zmq proxy device to load balance requests to the workers
* make each worker to serve multiple RPC services asynchronously
using the Python Threading multitasking
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE, distributed as part of this software.
#-----------------------------------------------------------------------------
from os import getpid
from time import sleep
from multiprocessing import Process, cpu_count
from zmq import ROUTER, DEALER
from zmq.devices import ThreadProxy
from netcall.threading import ThreadingRPCService, JSONSerializer
from netcall.utils import get_zmq_classes
class EchoService(ThreadingRPCService):
def echo(self, s):
print "<pid:%s> %r echo %r" % (getpid(), self.connected, s)
return s
def sleep(self, t):
print "<pid:%s> %r sleep %s" % (getpid(), self.connected, t)
sleep(t)
def error(self):
raise ValueError('raising ValueError for fun!')
class MathService(ThreadingRPCService):
def add(self, a, b):
print "<pid:%s> %r add %r %r" % (getpid(), self.connected, a, b)
return a+b
def subtract(self, a, b):
print "<pid:%s> %r subtract %r %r" % (getpid(), self.connected, a, b)
return a-b
def multiply(self, a, b):
print "<pid:%s> %r multiply %r %r" % (getpid(), self.connected, a, b)
return a*b
def divide(self, a, b):
print "<pid:%s> %r divide %r %r" % (getpid(), self.connected, a, b)
return a/b
class Worker(Process):
def run(self):
# Multiple RPCService instances can be run in a single process
# via Python Threads
Context, _ = get_zmq_classes()
context = Context()
# Custom serializer/deserializer functions can be passed in. The server
# side ones must match.
echo = EchoService(context=context, serializer=JSONSerializer())
echo.connect('ipc:///tmp/rpc-demo-echo.service')
# We create two Math services to simulate load balancing. A client can
# connect to both of these services and requests will be load balanced.
math1 = MathService(context=context)
math1.connect('ipc:///tmp/rpc-demo-math1.service')
math2 = MathService(context=context)
math2.connect('ipc:///tmp/rpc-demo-math2.service')
# Next we spawn service greenlets and wait for them to exit
echo .start()
math1 .start()
math2 .start()
echo .serve()
math1 .serve()
math2 .serve()
if __name__ == '__main__':
workers = [Worker() for _ in range(cpu_count())]
for w in workers:
w.start()
echo_proxy = ThreadProxy(ROUTER, DEALER)
math1_proxy = ThreadProxy(ROUTER, DEALER)
math2_proxy = ThreadProxy(ROUTER, DEALER)
echo_proxy .bind_in('tcp://127.0.0.1:5555')
math1_proxy .bind_in('tcp://127.0.0.1:5556')
math2_proxy .bind_in('tcp://127.0.0.1:5557')
echo_proxy .bind_out('ipc:///tmp/rpc-demo-echo.service')
math1_proxy .bind_out('ipc:///tmp/rpc-demo-math1.service')
math2_proxy .bind_out('ipc:///tmp/rpc-demo-math2.service')
echo_proxy .start()
math1_proxy .start()
math2_proxy .start()
while True:
echo_proxy .join(0.25)
math1_proxy .join(0.25)
math2_proxy .join(0.25)
| bsd-3-clause |
flavoi/diventi | diventi/adventures/migrations/0017_auto_20200504_2229.py | 1 | 1287 | # Generated by Django 2.2.12 on 2020-05-04 20:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adventures', '0016_auto_20200503_1924'),
]
operations = [
migrations.RemoveField(
model_name='resolution',
name='antagonist_goals',
),
migrations.RemoveField(
model_name='situation',
name='resolution',
),
migrations.AddField(
model_name='antagonistgoal',
name='situations',
field=models.ManyToManyField(through='adventures.Resolution', to='adventures.Situation', verbose_name='situations'),
),
migrations.AddField(
model_name='resolution',
name='antagonist_goal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='adventures.AntagonistGoal', verbose_name='antagonist goal'),
),
migrations.AddField(
model_name='resolution',
name='situation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='adventures.Situation', verbose_name='situation'),
),
]
| apache-2.0 |
n3storm/django-dynamic-preferences | dynamic_preferences/models.py | 1 | 4395 | """
Preference models, queryset and managers that handle the logic for persisting preferences.
"""
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.utils.functional import cached_property
from dynamic_preferences import user_preferences_registry, global_preferences_registry
from dynamic_preferences.registries import preference_models
from .utils import update
class BasePreferenceModel(models.Model):
"""
A base model with common logic for all preferences models.
"""
#: The section under which the preference is declared
section = models.CharField(
max_length=255, db_index=True, blank=True, null=True, default=None)
#: a name for the preference
name = models.CharField(max_length=255, db_index=True)
#: a value, serialized to a string. This field should not be accessed directly, use :py:attr:`BasePreferenceModel.value` instead
raw_value = models.TextField(null=True, blank=True)
class Meta:
abstract = True
app_label = 'dynamic_preferences'
@cached_property
def preference(self):
return self.registry.get(section=self.section, name=self.name)
@property
def verbose_name(self):
return self.preference.get('verbose_name', self.preference.identifier)
@property
def help_text(self):
return self.preference.get('help_text', '')
def set_value(self, value):
"""
Save serialized self.value to self.raw_value
"""
self.raw_value = self.preference.serializer.serialize(value)
def get_value(self):
"""
Return deserialized self.raw_value
"""
return self.preference.serializer.deserialize(self.raw_value)
value = property(get_value, set_value)
def save(self, **kwargs):
if self.pk is None and not self.raw_value:
self.value = self.preference.default
super(BasePreferenceModel, self).save(**kwargs)
def __str__(self):
return self.__repr__()
def __repr__(self):
return '{0} - {1}/{2}'.format(self.__class__.__name__, self.section, self.name)
class GlobalPreferenceModel(BasePreferenceModel):
registry = global_preferences_registry
class Meta:
unique_together = ('section', 'name')
app_label = 'dynamic_preferences'
verbose_name = "global preference"
verbose_name_plural = "global preferences"
class PerInstancePreferenceModel(BasePreferenceModel):
"""For preferences that are tied to a specific model instance"""
#: the instance which is concerned by the preference
#: use a ForeignKey pointing to the model of your choice
instance = None
class Meta(BasePreferenceModel.Meta):
unique_together = ('instance', 'section', 'name')
abstract = True
@classmethod
def get_instance_model(cls):
return cls._meta.get_field('instance').rel.to
@property
def registry(self):
return preference_models.get_by_instance(self.instance)
class UserPreferenceModel(PerInstancePreferenceModel):
instance = models.ForeignKey(settings.AUTH_USER_MODEL)
class Meta(PerInstancePreferenceModel.Meta):
app_label = 'dynamic_preferences'
verbose_name = "user preference"
verbose_name_plural = "user preferences"
global_preferences_registry.preference_model = GlobalPreferenceModel
# Create default preferences for new instances
from django.db.models.signals import post_save
def create_default_per_instance_preferences(sender, created, instance, **kwargs):
"""Create default preferences for PerInstancePreferenceModel"""
if created:
try:
registry = preference_models.get_by_instance(instance)
registry.create_default_preferences(instance)
except AttributeError:
pass
def invalidate_cache(sender, created, instance, **kwargs):
if not isinstance(instance, BasePreferenceModel):
return
registry = preference_models.get_by_preference(instance)
linked_instance = getattr(instance, 'instance', None)
kwargs = {}
if linked_instance:
kwargs['instance'] = linked_instance
manager = registry.manager(**kwargs)
manager.to_cache(instance)
post_save.connect(create_default_per_instance_preferences)
post_save.connect(invalidate_cache)
| bsd-3-clause |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/Demos/cerapi.py | 17 | 7436 | # A demo of the Windows CE Remote API
#
# This connects to a CE device, and interacts with it.
import wincerapi
import win32event
import win32api
import win32con
import os
import sys
import getopt
from repr import repr
def DumpPythonRegistry():
try:
h = wincerapi.CeRegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, "Software\\Python\\PythonCore\\%s\\PythonPath" % sys.winver)
except win32api.error:
print "The remote device does not appear to have Python installed"
return 0
path, typ = wincerapi.CeRegQueryValueEx(h, None)
print "The remote PythonPath is '%s'" % (str(path), )
h.Close()
return 1
def DumpRegistry(root, level=0):
# A recursive dump of the remote registry to test most functions.
h = wincerapi.CeRegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, None)
level_prefix = " " * level
index = 0
# Enumerate values.
while 1:
try:
name, data, typ = wincerapi.CeRegEnumValue(root, index)
except win32api.error:
break
print "%s%s=%s" % (level_prefix, name, repr(str(data)))
index = index+1
# Now enumerate all keys.
index=0
while 1:
try:
name, klass = wincerapi.CeRegEnumKeyEx(root, index)
except win32api.error:
break
print "%s%s\\" % (level_prefix, name)
subkey = wincerapi.CeRegOpenKeyEx(root, name)
DumpRegistry(subkey, level+1)
index = index+1
def DemoCopyFile():
# Create a file on the device, and write a string.
cefile = wincerapi.CeCreateFile("TestPython", win32con.GENERIC_WRITE, 0, None, win32con.OPEN_ALWAYS, 0, None)
wincerapi.CeWriteFile(cefile, "Hello from Python")
cefile.Close()
# reopen the file and check the data.
cefile = wincerapi.CeCreateFile("TestPython", win32con.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
if wincerapi.CeReadFile(cefile, 100) != "Hello from Python":
print "Couldnt read the data from the device!"
cefile.Close()
# Delete the test file
wincerapi.CeDeleteFile("TestPython")
print "Created, wrote to, read from and deleted a test file!"
def DemoCreateProcess():
try:
hp, ht, pid, tid = wincerapi.CeCreateProcess("Windows\\Python.exe", "", None, None, 0, 0, None, "", None)
# Not necessary, except to see if handle closing raises an exception
# (if auto-closed, the error is suppressed)
hp.Close()
ht.Close()
print "Python is running on the remote device!"
except win32api.error, (hr, fn, msg):
print "Couldnt execute remote process -", msg
def DumpRemoteMachineStatus():
ACLineStatus, BatteryFlag, BatteryLifePercent, BatteryLifeTime, BatteryFullLifeTime, BackupBatteryFlag, BackupBatteryLifePercent, BackupBatteryLifeTime, BackupBatteryLifeTime = \
wincerapi.CeGetSystemPowerStatusEx()
if ACLineStatus:
power = "AC"
else:
power = "battery"
if BatteryLifePercent==255:
batPerc = "unknown"
else:
batPerc = BatteryLifePercent
print "The batteries are at %s%%, and is currently being powered by %s" % (batPerc, power)
memLoad, totalPhys, availPhys, totalPage, availPage, totalVirt, availVirt = \
wincerapi.CeGlobalMemoryStatus()
print "The memory is %d%% utilized." % (memLoad)
print "%-20s%-10s%-10s" % ("", "Total", "Avail")
print "%-20s%-10s%-10s" % ("Physical Memory", totalPhys, availPhys)
print "%-20s%-10s%-10s" % ("Virtual Memory", totalVirt, availVirt)
print "%-20s%-10s%-10s" % ("Paging file", totalPage, availPage)
storeSize, freeSize = wincerapi.CeGetStoreInformation()
print "%-20s%-10s%-10s" % ("File store", storeSize, freeSize)
print "The CE temp path is", wincerapi.CeGetTempPath()
print "The system info for the device is", wincerapi.CeGetSystemInfo()
def DumpRemoteFolders():
# Dump all special folders possible.
for name, val in wincerapi.__dict__.items():
if name[:6]=="CSIDL_":
try:
loc = str(wincerapi.CeGetSpecialFolderPath(val))
print "Folder %s is at %s" % (name, loc)
except win32api.error, details:
pass
# Get the shortcut targets for the "Start Menu"
print "Dumping start menu shortcuts..."
try:
startMenu = str(wincerapi.CeGetSpecialFolderPath(wincerapi.CSIDL_STARTMENU))
except win32api.error, details:
print "This device has no start menu!", details
startMenu = None
if startMenu:
for fileAttr in wincerapi.CeFindFiles(os.path.join(startMenu, "*")):
fileName = fileAttr[8]
fullPath = os.path.join(startMenu, str(fileName))
try:
resolved = wincerapi.CeSHGetShortcutTarget(fullPath)
except win32api.error, (rc, fn, msg):
resolved = "#Error - %s" % msg
print "%s->%s" % (fileName, resolved)
# print "The start menu is at",
# print wincerapi.CeSHGetShortcutTarget("\\Windows\\Start Menu\\Shortcut to Python.exe.lnk")
def usage():
print "Options:"
print "-a - Execute all demos"
print "-p - Execute Python process on remote device"
print "-r - Dump the remote registry"
print "-f - Dump all remote special folder locations"
print "-s - Dont dump machine status"
print "-y - Perform asynch init of CE connection"
def main():
async_init = bStartPython = bDumpRegistry = bDumpFolders = 0
bDumpStatus = 1
try:
opts, args = getopt.getopt(sys.argv[1:], "apr")
except getopt.error, why:
print "Invalid usage:", why
usage()
return
for o, v in opts:
if o=="-a":
bStartPython = bDumpRegistry = bDumpStatus = bDumpFolders = asynch_init = 1
if o=="-p":
bStartPython=1
if o=="-r":
bDumpRegistry=1
if o=="-s":
bDumpStatus=0
if o=="-f":
bDumpFolders = 1
if o=="-y":
print "Doing asynch init of CE connection"
async_init = 1
if async_init:
event, rc = wincerapi.CeRapiInitEx()
while 1:
rc = win32event.WaitForSingleObject(event, 500)
if rc==win32event.WAIT_OBJECT_0:
# We connected.
break
else:
print "Waiting for Initialize to complete (picture a Cancel button here :)"
else:
wincerapi.CeRapiInit()
print "Connected to remote CE device."
try:
verinfo = wincerapi.CeGetVersionEx()
print "The device is running windows CE version %d.%d - %s" % (verinfo[0], verinfo[1], verinfo[4])
if bDumpStatus:
print "Dumping remote machine status"
DumpRemoteMachineStatus()
if bDumpRegistry:
print "Dumping remote registry..."
DumpRegistry(win32con.HKEY_LOCAL_MACHINE)
if bDumpFolders:
print "Dumping remote folder information"
DumpRemoteFolders()
DemoCopyFile()
if bStartPython:
print "Starting remote Python process"
if DumpPythonRegistry():
DemoCreateProcess()
else:
print "Not trying to start Python, as it's not installed"
finally:
wincerapi.CeRapiUninit()
print "Disconnected"
if __name__=='__main__':
main()
| apache-2.0 |
AnishShah/tensorflow | tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py | 2 | 22613 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class EqualitySplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testObliviousFeatureSplitGeneration(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 1 | 1 |
# i1 | (-0.5, 0.07) | 1 | 2 |
# i2 | (1.2, 0.2) | 1 | 1 |
# i3 | (4.0, 0.13) | 2 | 2 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [1, 1, 1, 2]
indices = [[0, 0], [1, 0], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 1, 2], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([1, 2], partitions)
# For partition 1.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight1 = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain1 = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight1 = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain1 = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain1 = 0.46043165467625885
split_info = split_info_pb2.ObliviousSplitInfo()
split_info.ParseFromString(splits[0])
# Children of partition 1.
left_child = split_info.children[0].vector
right_child = split_info.children[1].vector
split_node = split_info.split_node.oblivious_categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
self.assertAllClose([expected_left_weight1], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight1], right_child.value, 0.00001)
# For partition2.
expected_left_weight2 = 0
expected_left_gain2 = 0
# -(4 - 0.1) / (0.13 + 1)
expected_right_weight2 = -3.4513274336283186
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_right_gain2 = 13.460176991150442
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_bias_gain2 = 13.460176991150442
# Children of partition 2.
left_child = split_info.children[2].vector
right_child = split_info.children[3].vector
self.assertAllClose([expected_left_weight2], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight2], right_child.value, 0.00001)
self.assertAllClose(
expected_left_gain1 + expected_right_gain1 - expected_bias_gain1 +
expected_left_gain2 + expected_right_gain2 - expected_bias_gain2,
gains[0], 0.00001)
def testGenerateFeatureSplitCandidatesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.4 + 2.4 - 0.1) / (0.24 + 0.4 + 1)
expected_left_weight = -1.6463414634146338
# (0.4 + 2.4 - 0.1) ** 2 / (0.24 + 0.4 + 1)
expected_left_gain = 4.445121951219511
# -(-1 + 0.1) / (0.14 + 1)
expected_right_weight = 0.789473684211
# (-1 + 0.1) ** 2 / (0.14 + 1)
expected_right_gain = 0.710526315789
# (0.4 + -1 + 2.4 - 0.1) ** 2 / (0.24 + 0.14 + 0.4 + 1)
expected_bias_gain = 1.6235955056179772
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-8 + 0.1) / (0.26 + 1)
expected_left_weight = -6.26984126984
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_left_gain = 49.5317460317
expected_right_weight = 0
expected_right_gain = 0
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_bias_gain = 49.5317460317
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testGenerateFeatureSplitCandidatesMulticlass(self):
with self.cached_session() as sess:
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
def testEmpty(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = array_ops.constant([], dtype=dtypes.int64, shape=[0, 2])
values = array_ops.constant([], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testInactive(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, False]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.